net: axienet: Check for DMA mapping errors
authorAndre Przywara <andre.przywara@arm.com>
Tue, 24 Mar 2020 13:23:40 +0000 (13:23 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 24 Mar 2020 23:33:04 +0000 (16:33 -0700)
Especially with the default 32-bit DMA mask, DMA buffers are a limited
resource, so their allocation can fail.
So as the DMA API documentation requires, add error checking code after
dma_map_single() calls to catch the case where we run out of "low" memory.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 5c1b539..736ac1b 100644 (file)
@@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device *ndev)
                                                     skb->data,
                                                     lp->max_frm_size,
                                                     DMA_FROM_DEVICE);
+               if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
+                       netdev_err(ndev, "DMA mapping error\n");
+                       goto out;
+               }
+
                lp->rx_bd_v[i].cntrl = lp->max_frm_size;
        }
 
@@ -679,6 +684,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        dma_addr_t tail_p;
        struct axienet_local *lp = netdev_priv(ndev);
        struct axidma_bd *cur_p;
+       u32 orig_tail_ptr = lp->tx_bd_tail;
 
        num_frag = skb_shinfo(skb)->nr_frags;
        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -714,9 +720,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
        }
 
-       cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
        cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
                                     skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+               if (net_ratelimit())
+                       netdev_err(ndev, "TX DMA mapping error\n");
+               ndev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+       cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 
        for (ii = 0; ii < num_frag; ii++) {
                if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -727,6 +739,16 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                             skb_frag_address(frag),
                                             skb_frag_size(frag),
                                             DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "TX DMA mapping error\n");
+                       ndev->stats.tx_dropped++;
+                       axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+                                             NULL);
+                       lp->tx_bd_tail = orig_tail_ptr;
+
+                       return NETDEV_TX_OK;
+               }
                cur_p->cntrl = skb_frag_size(frag);
        }
 
@@ -807,6 +829,13 @@ static void axienet_recv(struct net_device *ndev)
                cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
                                             lp->max_frm_size,
                                             DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "RX DMA mapping error\n");
+                       dev_kfree_skb(new_skb);
+                       return;
+               }
+
                cur_p->cntrl = lp->max_frm_size;
                cur_p->status = 0;
                cur_p->skb = new_skb;