bnxt_en: Use the unified RX page pool buffers for XDP and non-XDP
authorSomnath Kotur <somnath.kotur@broadcom.com>
Thu, 17 Aug 2023 23:19:06 +0000 (16:19 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 19 Aug 2023 02:13:58 +0000 (19:13 -0700)
Convert to use the page pool buffers for the aggregation ring when
running in non-XDP mode.  This simplifies the driver and we benefit
from the recycling of pages.  Adjust the page pool size to account
for the aggregation ring size.

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230817231911.165035-2-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h

index 7be917a..6b815a2 100644 (file)
@@ -877,48 +877,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        struct rx_bd *rxbd =
                &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
        struct bnxt_sw_rx_agg_bd *rx_agg_buf;
-       struct pci_dev *pdev = bp->pdev;
        struct page *page;
        dma_addr_t mapping;
        u16 sw_prod = rxr->rx_sw_agg_prod;
        unsigned int offset = 0;
 
-       if (BNXT_RX_PAGE_MODE(bp)) {
-               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
-               if (!page)
-                       return -ENOMEM;
-
-       } else {
-               if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
-                       page = rxr->rx_page;
-                       if (!page) {
-                               page = alloc_page(gfp);
-                               if (!page)
-                                       return -ENOMEM;
-                               rxr->rx_page = page;
-                               rxr->rx_page_offset = 0;
-                       }
-                       offset = rxr->rx_page_offset;
-                       rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
-                       if (rxr->rx_page_offset == PAGE_SIZE)
-                               rxr->rx_page = NULL;
-                       else
-                               get_page(page);
-               } else {
-                       page = alloc_page(gfp);
-                       if (!page)
-                               return -ENOMEM;
-               }
+       page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 
-               mapping = dma_map_page_attrs(&pdev->dev, page, offset,
-                                            BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-                                            DMA_ATTR_WEAK_ORDERING);
-               if (dma_mapping_error(&pdev->dev, mapping)) {
-                       __free_page(page);
-                       return -EIO;
-               }
-       }
+       if (!page)
+               return -ENOMEM;
 
        if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
                sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
@@ -1204,6 +1171,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
        total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
                                             agg_bufs, tpa, NULL);
        if (!total_frag_len) {
+               skb_mark_for_recycle(skb);
                dev_kfree_skb(skb);
                return NULL;
        }
@@ -1794,6 +1762,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
                return;
        }
        skb_record_rx_queue(skb, bnapi->index);
+       skb_mark_for_recycle(skb);
        napi_gro_receive(&bnapi->napi, skb);
 }
 
@@ -3002,30 +2971,16 @@ skip_rx_buf_free:
                if (!page)
                        continue;
 
-               if (BNXT_RX_PAGE_MODE(bp)) {
-                       dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
-                                            BNXT_RX_PAGE_SIZE, bp->rx_dir,
-                                            DMA_ATTR_WEAK_ORDERING);
-                       rx_agg_buf->page = NULL;
-                       __clear_bit(i, rxr->rx_agg_bmap);
-
-                       page_pool_recycle_direct(rxr->page_pool, page);
-               } else {
-                       dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
-                                            BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-                                            DMA_ATTR_WEAK_ORDERING);
-                       rx_agg_buf->page = NULL;
-                       __clear_bit(i, rxr->rx_agg_bmap);
+               dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+                                    BNXT_RX_PAGE_SIZE, bp->rx_dir,
+                                    DMA_ATTR_WEAK_ORDERING);
+               rx_agg_buf->page = NULL;
+               __clear_bit(i, rxr->rx_agg_bmap);
 
-                       __free_page(page);
-               }
+               page_pool_recycle_direct(rxr->page_pool, page);
        }
 
 skip_rx_agg_free:
-       if (rxr->rx_page) {
-               __free_page(rxr->rx_page);
-               rxr->rx_page = NULL;
-       }
        map = rxr->rx_tpa_idx_map;
        if (map)
                memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
@@ -3244,7 +3199,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
 {
        struct page_pool_params pp = { 0 };
 
-       pp.pool_size = bp->rx_ring_size;
+       pp.pool_size = bp->rx_agg_ring_size;
+       if (BNXT_RX_PAGE_MODE(bp))
+               pp.pool_size += bp->rx_ring_size;
        pp.nid = dev_to_node(&bp->pdev->dev);
        pp.napi = &rxr->bnapi->napi;
        pp.dev = &bp->pdev->dev;
index 3629188..d6a1eaa 100644 (file)
@@ -919,9 +919,6 @@ struct bnxt_rx_ring_info {
        unsigned long           *rx_agg_bmap;
        u16                     rx_agg_bmap_size;
 
-       struct page             *rx_page;
-       unsigned int            rx_page_offset;
-
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];