bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff
authorAndy Gospodarek <gospo@broadcom.com>
Fri, 8 Apr 2022 07:58:56 +0000 (03:58 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Apr 2022 10:52:47 +0000 (11:52 +0100)
Move initialization of xdp_buff outside of bnxt_rx_xdp to prepare
for allowing bnxt_rx_xdp to operate on multibuffer xdp_buffs.

v2: Fix uninitalized variables warning in bnxt_xdp.c.
v3: Add new define BNXT_PAGE_MODE_BUF_SIZE

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h

index 874fad0a5cf8fbc78876a14ce0d3b5e952416d69..826d94c49d26399588f6c8aeef4300245806bcc8 100644 (file)
@@ -1731,6 +1731,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        u8 *data_ptr, agg_bufs, cmp_type;
        dma_addr_t dma_addr;
        struct sk_buff *skb;
+       struct xdp_buff xdp;
        u32 flags, misc;
        void *data;
        int rc = 0;
@@ -1839,11 +1840,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        len = flags >> RX_CMP_LEN_SHIFT;
        dma_addr = rx_buf->mapping;
 
-       if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
-               rc = 1;
-               goto next_rx;
+       if (bnxt_xdp_attached(bp, rxr)) {
+               bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+               if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+                       rc = 1;
+                       goto next_rx;
+               }
        }
-
        if (len <= bp->rx_copy_thresh) {
                skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
                bnxt_reuse_rx_data(rxr, cons, data);
index 98453a78cbd0408ce994677d31da782e0d49307c..0f35459d520692d475aaabab939b78131a51f3d6 100644 (file)
@@ -591,10 +591,12 @@ struct nqe_cn {
 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
 
 #define BNXT_MAX_MTU           9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+#define BNXT_PAGE_MODE_BUF_SIZE \
        ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -       \
-        XDP_PACKET_HEADROOM - \
-        SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+        XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+       BNXT_PAGE_MODE_BUF_SIZE - \
+       SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
 
 #define BNXT_MIN_PKT_SIZE      52
 
index 03b1d6c04504856a43c4c5649f3fb2a8be765c34..a3924e6030fe5e110218664a1e139215db376883 100644 (file)
@@ -106,18 +106,44 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
        }
 }
 
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+       return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                       u16 cons, u8 **data_ptr, unsigned int *len,
+                       struct xdp_buff *xdp)
+{
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct pci_dev *pdev;
+       dma_addr_t mapping;
+       u32 offset;
+
+       pdev = bp->pdev;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       offset = bp->rx_offset;
+
+       mapping = rx_buf->mapping - bp->rx_dma_offset;
+       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+       xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+       xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
 /* returns the following:
  * true    - packet consumed by XDP and new buffer is allocated.
  * false   - packet should be passed to the stack.
  */
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-                struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+                struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
 {
        struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
        struct bnxt_tx_ring_info *txr;
        struct bnxt_sw_rx_bd *rx_buf;
        struct pci_dev *pdev;
-       struct xdp_buff xdp;
        dma_addr_t mapping;
        void *orig_data;
        u32 tx_avail;
@@ -128,16 +154,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                return false;
 
        pdev = bp->pdev;
-       rx_buf = &rxr->rx_buf_ring[cons];
        offset = bp->rx_offset;
 
-       mapping = rx_buf->mapping - bp->rx_dma_offset;
-       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
        txr = rxr->bnapi->tx_ring;
        /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
-       xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
-       xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
        orig_data = xdp.data;
 
        act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -150,15 +170,17 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                *event &= ~BNXT_RX_EVENT;
 
        *len = xdp.data_end - xdp.data;
-       if (orig_data != xdp.data) {
+       if (orig_data != xdp.data)
                offset = xdp.data - xdp.data_hard_start;
-               *data_ptr = xdp.data_hard_start + offset;
-       }
+
        switch (act) {
        case XDP_PASS:
                return false;
 
        case XDP_TX:
+               rx_buf = &rxr->rx_buf_ring[cons];
+               mapping = rx_buf->mapping - bp->rx_dma_offset;
+
                if (tx_avail < 1) {
                        trace_xdp_exception(bp->dev, xdp_prog, act);
                        bnxt_reuse_rx_data(rxr, cons, page);
@@ -177,6 +199,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                 * redirect is coming from a frame received by the
                 * bnxt_en driver.
                 */
+               rx_buf = &rxr->rx_buf_ring[cons];
+               mapping = rx_buf->mapping - bp->rx_dma_offset;
                dma_unmap_page_attrs(&pdev->dev, mapping,
                                     PAGE_SIZE, bp->rx_dir,
                                     DMA_ATTR_WEAK_ORDERING);
index 067bb5e821f542bbc24e51cd7f303667f1a8ade5..97e7905dbb209dae95b1b404fffcd1b24a8ba8fe 100644 (file)
@@ -17,10 +17,15 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   dma_addr_t mapping, u32 len);
 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-                struct page *page, u8 **data_ptr, unsigned int *len,
+                struct xdp_buff xdp, struct page *page, unsigned int *len,
                 u8 *event);
 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
                  struct xdp_frame **frames, u32 flags);
 
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                       u16 cons, u8 **data_ptr, unsigned int *len,
+                       struct xdp_buff *xdp);
 #endif