bnxt_en: Refactor bnxt_init_one_rx_ring().
authorMichael Chan <michael.chan@broadcom.com>
Sun, 4 Oct 2020 19:22:57 +0000 (15:22 -0400)
committerDavid S. Miller <davem@davemloft.net>
Sun, 4 Oct 2020 21:41:05 +0000 (14:41 -0700)
bnxt_init_one_rx_ring() includes logic to initialize the BDs for one RX
ring and to allocate the buffers.  Separate the allocation logic into a
new bnxt_alloc_one_rx_ring() function.  The allocation function will be
used later to allocate new buffers for one specified RX ring when we
reset that RX ring.

Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c

index 6d7e197c875c6fe7702785cb760f11b6799464e7..ef0267060a46052b2b0f8c64552a641580a4c92f 100644 (file)
@@ -3163,31 +3163,16 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
        }
 }
 
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
 {
+       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
        struct net_device *dev = bp->dev;
-       struct bnxt_rx_ring_info *rxr;
-       struct bnxt_ring_struct *ring;
-       u32 prod, type;
+       u32 prod;
        int i;
 
-       type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
-               RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
-
-       if (NET_IP_ALIGN == 2)
-               type |= RX_BD_FLAGS_SOP;
-
-       rxr = &bp->rx_ring[ring_nr];
-       ring = &rxr->rx_ring_struct;
-       bnxt_init_rxbd_pages(ring, type);
-
-       if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
-               bpf_prog_add(bp->xdp_prog, 1);
-               rxr->xdp_prog = bp->xdp_prog;
-       }
        prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+               if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
                        netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
                                    ring_nr, i, bp->rx_ring_size);
                        break;
@@ -3195,22 +3180,13 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
                prod = NEXT_RX(prod);
        }
        rxr->rx_prod = prod;
-       ring->fw_ring_id = INVALID_HW_RING_ID;
-
-       ring = &rxr->rx_agg_ring_struct;
-       ring->fw_ring_id = INVALID_HW_RING_ID;
 
        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
                return 0;
 
-       type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
-               RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
-
-       bnxt_init_rxbd_pages(ring, type);
-
        prod = rxr->rx_agg_prod;
        for (i = 0; i < bp->rx_agg_ring_size; i++) {
-               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
                        netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
                                    ring_nr, i, bp->rx_ring_size);
                        break;
@@ -3219,30 +3195,58 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        }
        rxr->rx_agg_prod = prod;
 
-       if (bp->flags & BNXT_FLAG_TPA) {
-               if (rxr->rx_tpa) {
-                       u8 *data;
-                       dma_addr_t mapping;
+       if (rxr->rx_tpa) {
+               dma_addr_t mapping;
+               u8 *data;
 
-                       for (i = 0; i < bp->max_tpa; i++) {
-                               data = __bnxt_alloc_rx_data(bp, &mapping,
-                                                           GFP_KERNEL);
-                               if (!data)
-                                       return -ENOMEM;
+               for (i = 0; i < bp->max_tpa; i++) {
+                       data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+                       if (!data)
+                               return -ENOMEM;
 
-                               rxr->rx_tpa[i].data = data;
-                               rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
-                               rxr->rx_tpa[i].mapping = mapping;
-                       }
-               } else {
-                       netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
-                       return -ENOMEM;
+                       rxr->rx_tpa[i].data = data;
+                       rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+                       rxr->rx_tpa[i].mapping = mapping;
                }
        }
-
        return 0;
 }
 
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_ring_struct *ring;
+       u32 type;
+
+       type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+               RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+       if (NET_IP_ALIGN == 2)
+               type |= RX_BD_FLAGS_SOP;
+
+       rxr = &bp->rx_ring[ring_nr];
+       ring = &rxr->rx_ring_struct;
+       bnxt_init_rxbd_pages(ring, type);
+
+       if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+               bpf_prog_add(bp->xdp_prog, 1);
+               rxr->xdp_prog = bp->xdp_prog;
+       }
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       ring = &rxr->rx_agg_ring_struct;
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
+               type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+                       RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+               bnxt_init_rxbd_pages(ring, type);
+       }
+
+       return bnxt_alloc_one_rx_ring(bp, ring_nr);
+}
+
 static void bnxt_init_cp_rings(struct bnxt *bp)
 {
        int i, j;