net: mana: Reuse XDP dropped page
authorHaiyang Zhang <haiyangz@microsoft.com>
Sat, 29 Jan 2022 02:03:38 +0000 (18:03 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 31 Jan 2022 15:39:58 +0000 (15:39 +0000)
Reuse the dropped page in RX path to save page allocation
overhead.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/microsoft/mana/mana.h
drivers/net/ethernet/microsoft/mana/mana_en.c

index 8ead960..d36405a 100644 (file)
@@ -310,6 +310,7 @@ struct mana_rxq {
 
        struct bpf_prog __rcu *bpf_prog;
        struct xdp_rxq_info xdp_rxq;
+       struct page *xdp_save_page;
 
        /* MUST BE THE LAST MEMBER:
         * Each receive buffer has an associated mana_recv_buf_oob.
index 12067bf..69e791e 100644 (file)
@@ -1059,7 +1059,9 @@ drop_xdp:
        u64_stats_update_end(&rx_stats->syncp);
 
 drop:
-       free_page((unsigned long)buf_va);
+       WARN_ON_ONCE(rxq->xdp_save_page);
+       rxq->xdp_save_page = virt_to_page(buf_va);
+
        ++ndev->stats.rx_dropped;
 
        return;
@@ -1116,7 +1118,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        rxbuf_oob = &rxq->rx_oobs[curr];
        WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
 
-       new_page = alloc_page(GFP_ATOMIC);
+       /* Reuse XDP dropped page if available */
+       if (rxq->xdp_save_page) {
+               new_page = rxq->xdp_save_page;
+               rxq->xdp_save_page = NULL;
+       } else {
+               new_page = alloc_page(GFP_ATOMIC);
+       }
 
        if (new_page) {
                da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1403,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
 
        mana_deinit_cq(apc, &rxq->rx_cq);
 
+       if (rxq->xdp_save_page)
+               __free_page(rxq->xdp_save_page);
+
        for (i = 0; i < rxq->num_rx_buf; i++) {
                rx_oob = &rxq->rx_oobs[i];