return;
}
- pci_unmap_single(tp->pdev,
- pci_unmap_addr(ri, mapping),
- skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
ri->skb = NULL;
ri = &tp->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
-
- pci_unmap_page(tp->pdev,
- pci_unmap_addr(ri, mapping),
- skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
-
sw_idx = NEXT_TX(sw_idx);
}
} else {
/* New SKB is guaranteed to be linear. */
entry = *start;
- new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
- PCI_DMA_TODEVICE);
+ ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
+ new_addr = skb_shinfo(new_skb)->dma_maps[0];
+
/* Make sure new skb does not cross any 4G boundaries.
* Drop the packet if it does.
*/
- if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
+ if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
ret = -1;
dev_kfree_skb(new_skb);
new_skb = NULL;
/* Now clean up the sw ring entries. */
i = 0;
while (entry != last_plus_one) {
- int len;
-
- if (i == 0)
- len = skb_headlen(skb);
- else
- len = skb_shinfo(skb)->frags[i-1].size;
- pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tp->tx_buffers[entry], mapping),
- len, PCI_DMA_TODEVICE);
if (i == 0) {
tp->tx_buffers[entry].skb = new_skb;
- pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
} else {
tp->tx_buffers[entry].skb = NULL;
}
i++;
}
+ skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
dev_kfree_skb(skb);
return ret;
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- dma_addr_t mapping;
u32 len, entry, base_flags, mss;
+ struct skb_shared_info *sp;
+ dma_addr_t mapping;
len = skb_headlen(skb);
(vlan_tx_tag_get(skb) << 16));
#endif
- /* Queue skb data, a.k.a. the main skb fragment. */
- mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ sp = skb_shinfo(skb);
+
+ mapping = sp->dma_maps[0];
tp->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
tg3_set_txd(tp, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
- mapping = pci_map_page(tp->pdev,
- frag->page,
- frag->page_offset,
- len, PCI_DMA_TODEVICE);
-
+ mapping = sp->dma_maps[i + 1];
tp->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
tg3_set_txd(tp, entry, mapping, len,
base_flags, (i == last) | (mss << 1));
static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- dma_addr_t mapping;
u32 len, entry, base_flags, mss;
+ struct skb_shared_info *sp;
int would_hit_hwbug;
+ dma_addr_t mapping;
len = skb_headlen(skb);
(vlan_tx_tag_get(skb) << 16));
#endif
- /* Queue skb data, a.k.a. the main skb fragment. */
- mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ sp = skb_shinfo(skb);
+
+ mapping = sp->dma_maps[0];
tp->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
would_hit_hwbug = 0;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
- mapping = pci_map_page(tp->pdev,
- frag->page,
- frag->page_offset,
- len, PCI_DMA_TODEVICE);
+ mapping = sp->dma_maps[i + 1];
tp->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
for (i = 0; i < TG3_TX_RING_SIZE; ) {
struct tx_ring_info *txp;
struct sk_buff *skb;
- int j;
txp = &tp->tx_buffers[i];
skb = txp->skb;
continue;
}
- pci_unmap_single(tp->pdev,
- pci_unmap_addr(txp, mapping),
- skb_headlen(skb),
- PCI_DMA_TODEVICE);
- txp->skb = NULL;
+ skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
- i++;
+ txp->skb = NULL;
- for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
- txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
- pci_unmap_page(tp->pdev,
- pci_unmap_addr(txp, mapping),
- skb_shinfo(skb)->frags[j].size,
- PCI_DMA_TODEVICE);
- i++;
- }
+ i += skb_shinfo(skb)->nr_frags + 1;
dev_kfree_skb_any(skb);
}