net: axienet: Clean up device used for DMA calls
authorRobert Hancock <robert.hancock@calian.com>
Sat, 5 Mar 2022 02:24:38 +0000 (20:24 -0600)
committerDavid S. Miller <davem@davemloft.net>
Sat, 5 Mar 2022 11:12:08 +0000 (11:12 +0000)
Instead of using lp->ndev.parent to find the correct device to use for
DMA API calls, just use the dev attribute in the device structure.

Signed-off-by: Robert Hancock <robert.hancock@calian.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 5b18f5f..be17f6b 100644 (file)
@@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
        struct axienet_local *lp = netdev_priv(ndev);
 
        /* If we end up here, tx_bd_v must have been DMA allocated. */
-       dma_free_coherent(ndev->dev.parent,
+       dma_free_coherent(lp->dev,
                          sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
                          lp->tx_bd_v,
                          lp->tx_bd_p);
@@ -215,12 +215,12 @@ static void axienet_dma_bd_release(struct net_device *ndev)
                 */
                if (lp->rx_bd_v[i].cntrl) {
                        phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
-                       dma_unmap_single(ndev->dev.parent, phys,
+                       dma_unmap_single(lp->dev, phys,
                                         lp->max_frm_size, DMA_FROM_DEVICE);
                }
        }
 
-       dma_free_coherent(ndev->dev.parent,
+       dma_free_coherent(lp->dev,
                          sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
                          lp->rx_bd_v,
                          lp->rx_bd_p);
@@ -249,13 +249,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
        lp->rx_bd_ci = 0;
 
        /* Allocate the Tx and Rx buffer descriptors. */
-       lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+       lp->tx_bd_v = dma_alloc_coherent(lp->dev,
                                         sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
                                         &lp->tx_bd_p, GFP_KERNEL);
        if (!lp->tx_bd_v)
                return -ENOMEM;
 
-       lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+       lp->rx_bd_v = dma_alloc_coherent(lp->dev,
                                         sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
                                         &lp->rx_bd_p, GFP_KERNEL);
        if (!lp->rx_bd_v)
@@ -285,9 +285,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
                        goto out;
 
                lp->rx_bd_v[i].skb = skb;
-               addr = dma_map_single(ndev->dev.parent, skb->data,
+               addr = dma_map_single(lp->dev, skb->data,
                                      lp->max_frm_size, DMA_FROM_DEVICE);
-               if (dma_mapping_error(ndev->dev.parent, addr)) {
+               if (dma_mapping_error(lp->dev, addr)) {
                        netdev_err(ndev, "DMA mapping error\n");
                        goto out;
                }
@@ -636,7 +636,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
                /* Ensure we see complete descriptor update */
                dma_rmb();
                phys = desc_get_phys_addr(lp, cur_p);
-               dma_unmap_single(ndev->dev.parent, phys,
+               dma_unmap_single(lp->dev, phys,
                                 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
                                 DMA_TO_DEVICE);
 
@@ -774,9 +774,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
        }
 
-       phys = dma_map_single(ndev->dev.parent, skb->data,
+       phys = dma_map_single(lp->dev, skb->data,
                              skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+       if (unlikely(dma_mapping_error(lp->dev, phys))) {
                if (net_ratelimit())
                        netdev_err(ndev, "TX DMA mapping error\n");
                ndev->stats.tx_dropped++;
@@ -790,11 +790,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        lp->tx_bd_tail = 0;
                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
                frag = &skb_shinfo(skb)->frags[ii];
-               phys = dma_map_single(ndev->dev.parent,
+               phys = dma_map_single(lp->dev,
                                      skb_frag_address(frag),
                                      skb_frag_size(frag),
                                      DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+               if (unlikely(dma_mapping_error(lp->dev, phys))) {
                        if (net_ratelimit())
                                netdev_err(ndev, "TX DMA mapping error\n");
                        ndev->stats.tx_dropped++;
@@ -872,7 +872,7 @@ static void axienet_recv(struct net_device *ndev)
                        length = cur_p->app4 & 0x0000FFFF;
 
                        phys = desc_get_phys_addr(lp, cur_p);
-                       dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+                       dma_unmap_single(lp->dev, phys, lp->max_frm_size,
                                         DMA_FROM_DEVICE);
 
                        skb_put(skb, length);
@@ -905,10 +905,10 @@ static void axienet_recv(struct net_device *ndev)
                if (!new_skb)
                        break;
 
-               phys = dma_map_single(ndev->dev.parent, new_skb->data,
+               phys = dma_map_single(lp->dev, new_skb->data,
                                      lp->max_frm_size,
                                      DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+               if (unlikely(dma_mapping_error(lp->dev, phys))) {
                        if (net_ratelimit())
                                netdev_err(ndev, "RX DMA mapping error\n");
                        dev_kfree_skb(new_skb);
@@ -1712,7 +1712,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
                if (cur_p->cntrl) {
                        dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
 
-                       dma_unmap_single(ndev->dev.parent, addr,
+                       dma_unmap_single(lp->dev, addr,
                                         (cur_p->cntrl &
                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
                                         DMA_TO_DEVICE);