*/
struct cmdQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct freelQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
/*
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (likely(pci_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
if (q->sop)
q->sop = 0;
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, dma_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
return NULL;
}
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
skb = ce->skb;
prefetch(skb->data);
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
- pci_unmap_len_set(ce1, dma_len, 0);
+ dma_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
- pci_unmap_len_set(ce, dma_len, 0);
+ dma_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
}
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
}
ce->skb = skb;
wmb();