skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_OK;
+ }
cur_p->cntrl = skb_frag_size(frag);
}
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ return;
+ }
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;