net: mana: Add page pool for RX buffers
authorHaiyang Zhang <haiyangz@microsoft.com>
Fri, 4 Aug 2023 20:33:53 +0000 (13:33 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 6 Aug 2023 07:36:06 +0000 (08:36 +0100)
Add page pool for RX buffers for faster buffer cycle and reduce CPU
usage.

The standard page pool API is used.

With iperf and 128 threads test, this patch improved the throughput
by 12-15%, and decreased the IRQ associated CPU's usage from 99-100% to
10-50%.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/mana.h

index 21665f1..a08023c 100644 (file)
@@ -1415,8 +1415,8 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
        return skb;
 }
 
        return skb;
 }
 
-static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
-                       struct mana_rxq *rxq)
+static void mana_rx_skb(void *buf_va, bool from_pool,
+                       struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
 {
        struct mana_stats_rx *rx_stats = &rxq->stats;
        struct net_device *ndev = rxq->ndev;
 {
        struct mana_stats_rx *rx_stats = &rxq->stats;
        struct net_device *ndev = rxq->ndev;
@@ -1449,6 +1449,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
        if (!skb)
                goto drop;
 
        if (!skb)
                goto drop;
 
+       if (from_pool)
+               skb_mark_for_recycle(skb);
+
        skb->dev = napi->dev;
 
        skb->protocol = eth_type_trans(skb, ndev);
        skb->dev = napi->dev;
 
        skb->protocol = eth_type_trans(skb, ndev);
@@ -1499,9 +1502,14 @@ drop_xdp:
        u64_stats_update_end(&rx_stats->syncp);
 
 drop:
        u64_stats_update_end(&rx_stats->syncp);
 
 drop:
-       WARN_ON_ONCE(rxq->xdp_save_va);
-       /* Save for reuse */
-       rxq->xdp_save_va = buf_va;
+       if (from_pool) {
+               page_pool_recycle_direct(rxq->page_pool,
+                                        virt_to_head_page(buf_va));
+       } else {
+               WARN_ON_ONCE(rxq->xdp_save_va);
+               /* Save for reuse */
+               rxq->xdp_save_va = buf_va;
+       }
 
        ++ndev->stats.rx_dropped;
 
 
        ++ndev->stats.rx_dropped;
 
@@ -1509,11 +1517,13 @@ drop:
 }
 
 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
 }
 
 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
-                            dma_addr_t *da, bool is_napi)
+                            dma_addr_t *da, bool *from_pool, bool is_napi)
 {
        struct page *page;
        void *va;
 
 {
        struct page *page;
        void *va;
 
+       *from_pool = false;
+
        /* Reuse XDP dropped page if available */
        if (rxq->xdp_save_va) {
                va = rxq->xdp_save_va;
        /* Reuse XDP dropped page if available */
        if (rxq->xdp_save_va) {
                va = rxq->xdp_save_va;
@@ -1534,17 +1544,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
                        return NULL;
                }
        } else {
                        return NULL;
                }
        } else {
-               page = dev_alloc_page();
+               page = page_pool_dev_alloc_pages(rxq->page_pool);
                if (!page)
                        return NULL;
 
                if (!page)
                        return NULL;
 
+               *from_pool = true;
                va = page_to_virt(page);
        }
 
        *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
                             DMA_FROM_DEVICE);
        if (dma_mapping_error(dev, *da)) {
                va = page_to_virt(page);
        }
 
        *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
                             DMA_FROM_DEVICE);
        if (dma_mapping_error(dev, *da)) {
-               put_page(virt_to_head_page(va));
+               if (*from_pool)
+                       page_pool_put_full_page(rxq->page_pool, page, false);
+               else
+                       put_page(virt_to_head_page(va));
+
                return NULL;
        }
 
                return NULL;
        }
 
@@ -1553,21 +1568,25 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
 
 /* Allocate frag for rx buffer, and save the old buf */
 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
 
 /* Allocate frag for rx buffer, and save the old buf */
 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
-                              struct mana_recv_buf_oob *rxoob, void **old_buf)
+                              struct mana_recv_buf_oob *rxoob, void **old_buf,
+                              bool *old_fp)
 {
 {
+       bool from_pool;
        dma_addr_t da;
        void *va;
 
        dma_addr_t da;
        void *va;
 
-       va = mana_get_rxfrag(rxq, dev, &da, true);
+       va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
        if (!va)
                return;
 
        dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
                         DMA_FROM_DEVICE);
        *old_buf = rxoob->buf_va;
        if (!va)
                return;
 
        dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
                         DMA_FROM_DEVICE);
        *old_buf = rxoob->buf_va;
+       *old_fp = rxoob->from_pool;
 
        rxoob->buf_va = va;
        rxoob->sgl[0].address = da;
 
        rxoob->buf_va = va;
        rxoob->sgl[0].address = da;
+       rxoob->from_pool = from_pool;
 }
 
 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
 }
 
 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -1581,6 +1600,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        struct device *dev = gc->dev;
        void *old_buf = NULL;
        u32 curr, pktlen;
        struct device *dev = gc->dev;
        void *old_buf = NULL;
        u32 curr, pktlen;
+       bool old_fp;
 
        apc = netdev_priv(ndev);
 
 
        apc = netdev_priv(ndev);
 
@@ -1623,12 +1643,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        rxbuf_oob = &rxq->rx_oobs[curr];
        WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
 
        rxbuf_oob = &rxq->rx_oobs[curr];
        WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
 
-       mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf);
+       mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
 
        /* Unsuccessful refill will have old_buf == NULL.
         * In this case, mana_rx_skb() will drop the packet.
         */
 
        /* Unsuccessful refill will have old_buf == NULL.
         * In this case, mana_rx_skb() will drop the packet.
         */
-       mana_rx_skb(old_buf, oob, rxq);
+       mana_rx_skb(old_buf, old_fp, oob, rxq);
 
 drop:
        mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
 
 drop:
        mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
@@ -1888,6 +1908,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
        struct mana_recv_buf_oob *rx_oob;
        struct device *dev = gc->dev;
        struct napi_struct *napi;
        struct mana_recv_buf_oob *rx_oob;
        struct device *dev = gc->dev;
        struct napi_struct *napi;
+       struct page *page;
        int i;
 
        if (!rxq)
        int i;
 
        if (!rxq)
@@ -1920,10 +1941,18 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
                dma_unmap_single(dev, rx_oob->sgl[0].address,
                                 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
 
                dma_unmap_single(dev, rx_oob->sgl[0].address,
                                 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
 
-               put_page(virt_to_head_page(rx_oob->buf_va));
+               page = virt_to_head_page(rx_oob->buf_va);
+
+               if (rx_oob->from_pool)
+                       page_pool_put_full_page(rxq->page_pool, page, false);
+               else
+                       put_page(page);
+
                rx_oob->buf_va = NULL;
        }
 
                rx_oob->buf_va = NULL;
        }
 
+       page_pool_destroy(rxq->page_pool);
+
        if (rxq->gdma_rq)
                mana_gd_destroy_queue(gc, rxq->gdma_rq);
 
        if (rxq->gdma_rq)
                mana_gd_destroy_queue(gc, rxq->gdma_rq);
 
@@ -1934,18 +1963,20 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
                            struct mana_rxq *rxq, struct device *dev)
 {
        struct mana_port_context *mpc = netdev_priv(rxq->ndev);
                            struct mana_rxq *rxq, struct device *dev)
 {
        struct mana_port_context *mpc = netdev_priv(rxq->ndev);
+       bool from_pool = false;
        dma_addr_t da;
        void *va;
 
        if (mpc->rxbufs_pre)
                va = mana_get_rxbuf_pre(rxq, &da);
        else
        dma_addr_t da;
        void *va;
 
        if (mpc->rxbufs_pre)
                va = mana_get_rxbuf_pre(rxq, &da);
        else
-               va = mana_get_rxfrag(rxq, dev, &da, false);
+               va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
 
        if (!va)
                return -ENOMEM;
 
        rx_oob->buf_va = va;
 
        if (!va)
                return -ENOMEM;
 
        rx_oob->buf_va = va;
+       rx_oob->from_pool = from_pool;
 
        rx_oob->sgl[0].address = da;
        rx_oob->sgl[0].size = rxq->datasize;
 
        rx_oob->sgl[0].address = da;
        rx_oob->sgl[0].size = rxq->datasize;
@@ -2015,6 +2046,26 @@ static int mana_push_wqe(struct mana_rxq *rxq)
        return 0;
 }
 
        return 0;
 }
 
+static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
+{
+       struct page_pool_params pprm = {};
+       int ret;
+
+       pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+       pprm.nid = gc->numa_node;
+       pprm.napi = &rxq->rx_cq.napi;
+
+       rxq->page_pool = page_pool_create(&pprm);
+
+       if (IS_ERR(rxq->page_pool)) {
+               ret = PTR_ERR(rxq->page_pool);
+               rxq->page_pool = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
                                        u32 rxq_idx, struct mana_eq *eq,
                                        struct net_device *ndev)
 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
                                        u32 rxq_idx, struct mana_eq *eq,
                                        struct net_device *ndev)
@@ -2044,6 +2095,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
        mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
                           &rxq->headroom);
 
        mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
                           &rxq->headroom);
 
+       /* Create page pool for RX queue */
+       err = mana_create_page_pool(rxq, gc);
+       if (err) {
+               netdev_err(ndev, "Create page pool err:%d\n", err);
+               goto out;
+       }
+
        err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
        if (err)
                goto out;
        err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
        if (err)
                goto out;
@@ -2115,8 +2173,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
 
        WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
                                 cq->napi.napi_id));
 
        WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
                                 cq->napi.napi_id));
-       WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
-                                          MEM_TYPE_PAGE_SHARED, NULL));
+       WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+                                          rxq->page_pool));
 
        napi_enable(&cq->napi);
 
 
        napi_enable(&cq->napi);
 
index 1ccdca0..8799901 100644 (file)
@@ -282,6 +282,7 @@ struct mana_recv_buf_oob {
        struct gdma_wqe_request wqe_req;
 
        void *buf_va;
        struct gdma_wqe_request wqe_req;
 
        void *buf_va;
+       bool from_pool; /* allocated from a page pool */
 
        /* SGL of the buffer going to be sent has part of the work request. */
        u32 num_sge;
 
        /* SGL of the buffer going to be sent has part of the work request. */
        u32 num_sge;
@@ -332,6 +333,8 @@ struct mana_rxq {
        bool xdp_flush;
        int xdp_rc; /* XDP redirect return code */
 
        bool xdp_flush;
        int xdp_rc; /* XDP redirect return code */
 
+       struct page_pool *page_pool;
+
        /* MUST BE THE LAST MEMBER:
         * Each receive buffer has an associated mana_recv_buf_oob.
         */
        /* MUST BE THE LAST MEMBER:
         * Each receive buffer has an associated mana_recv_buf_oob.
         */