return -ENOMEM;
mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(bp->pdev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
rx_pg->page = page;
pci_unmap_addr_set(rx_pg, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(bp->pdev, mapping)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
rx_buf->skb = skb;
pci_unmap_addr_set(rx_buf, mapping, mapping);
sw_cons = txr->tx_cons;
while (sw_cons != hw_cons) {
- struct sw_bd *tx_buf;
+ struct sw_tx_bd *tx_buf;
struct sk_buff *skb;
int i, last;
}
}
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
tx_buf->skb = NULL;
last = skb_shinfo(skb)->nr_frags;
for (i = 0; i < last; i++) {
sw_cons = NEXT_TX_BD(sw_cons);
-
- pci_unmap_page(bp->pdev,
- pci_unmap_addr(
- &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
- mapping),
- skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
}
sw_cons = NEXT_TX_BD(sw_cons);
{
struct sw_pg *cons_rx_pg, *prod_rx_pg;
struct rx_bd *cons_bd, *prod_bd;
- dma_addr_t mapping;
int i;
- u16 hw_prod = rxr->rx_pg_prod, prod;
+ u16 hw_prod, prod;
u16 cons = rxr->rx_pg_cons;
+ cons_rx_pg = &rxr->rx_pg_ring[cons];
+
+ /* The caller was unable to allocate a new page to replace the
+ * last one in the frags array, so we need to recycle that page
+ * and then free the skb.
+ */
+ if (skb) {
+ struct page *page;
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->nr_frags--;
+ page = shinfo->frags[shinfo->nr_frags].page;
+ shinfo->frags[shinfo->nr_frags].page = NULL;
+
+ cons_rx_pg->page = page;
+ dev_kfree_skb(skb);
+ }
+
+ hw_prod = rxr->rx_pg_prod;
+
for (i = 0; i < count; i++) {
prod = RX_PG_RING_IDX(hw_prod);
cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- if (i == 0 && skb) {
- struct page *page;
- struct skb_shared_info *shinfo;
-
- shinfo = skb_shinfo(skb);
- shinfo->nr_frags--;
- page = shinfo->frags[shinfo->nr_frags].page;
- shinfo->frags[shinfo->nr_frags].page = NULL;
- mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- cons_rx_pg->page = page;
- pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
- dev_kfree_skb(skb);
- }
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
cons_rx_pg->page = NULL;
skb_put(skb, hdr_len);
for (i = 0; i < pages; i++) {
+ dma_addr_t mapping_old;
+
frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
if (unlikely(frag_len <= 4)) {
unsigned int tail = 4 - frag_len;
}
rx_pg = &rxr->rx_pg_ring[pg_cons];
- pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
-
+ /* Don't unmap yet. If we're unable to allocate a new
+ * page, we need to recycle the page and the DMA addr.
+ */
+ mapping_old = pci_unmap_addr(rx_pg, mapping);
if (i == pages - 1)
frag_len -= 4;
return err;
}
+ pci_unmap_page(bp->pdev, mapping_old,
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
frag_size -= frag_len;
skb->data_len += frag_len;
skb->truesize += frag_len;
continue;
for (j = 0; j < TX_DESC_CNT; ) {
- struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb = tx_buf->skb;
- int k, last;
if (skb == NULL) {
j++;
continue;
}
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
tx_buf->skb = NULL;
- last = skb_shinfo(skb)->nr_frags;
- for (k = 0; k < last; k++) {
- tx_buf = &txr->tx_buf_ring[j + k + 1];
- pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[j].size,
- PCI_DMA_TODEVICE);
- }
+ j += skb_shinfo(skb)->nr_frags + 1;
dev_kfree_skb(skb);
- j += k + 1;
}
}
}
for (i = 14; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
- map = pci_map_single(bp->pdev, skb->data, pkt_size,
- PCI_DMA_TODEVICE);
+ if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ map = skb_shinfo(skb)->dma_maps[0];
REG_WR(bp, BNX2_HC_COMMAND,
bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
udelay(5);
- pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
+ skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
dev_kfree_skb(skb);
if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
struct bnx2 *bp = netdev_priv(dev);
dma_addr_t mapping;
struct tx_bd *txbd;
- struct sw_bd *tx_buf;
+ struct sw_tx_bd *tx_buf;
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
struct bnx2_napi *bnapi;
struct bnx2_tx_ring_info *txr;
struct netdev_queue *txq;
+ struct skb_shared_info *sp;
/* Determine which tx ring we will be placed on */
i = skb_get_queue_mapping(skb);
} else
mss = 0;
- mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sp = skb_shinfo(skb);
+ mapping = sp->dma_maps[0];
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
- pci_unmap_addr_set(tx_buf, mapping, mapping);
txbd = &txr->tx_desc_ring[ring_prod];
txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
- mapping, mapping);
+ mapping = sp->dma_maps[i + 1];
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;