ixgbe, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Wed, 13 Apr 2022 15:30:08 +0000 (17:30 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Fri, 15 Apr 2022 19:10:45 +0000 (21:10 +0200)
When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was
returned from xdp_do_redirect() with a XSK Rx queue being full. In such
case, terminate the Rx processing that is being done on the current HW
Rx ring and let the user space consume descriptors from XSK Rx queue so
that there is room that driver can use later on.

Introduce new internal return code IXGBE_XDP_EXIT that will indicate case
described above.

Note that it does not affect Tx processing that is bound to the same
NAPI context, nor the other Rx rings.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220413153015.453864-8-maciej.fijalkowski@intel.com
drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

index bba3feaf3318cd079c20d25830919bb2daf62b32..f1f69ce67420c95a4d1c5d42b3471e0fab33b9cd 100644 (file)
@@ -8,6 +8,7 @@
 #define IXGBE_XDP_CONSUMED     BIT(0)
 #define IXGBE_XDP_TX           BIT(1)
 #define IXGBE_XDP_REDIR                BIT(2)
+#define IXGBE_XDP_EXIT         BIT(3)
 
 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
                       IXGBE_TXD_CMD_RS)
index 85497bf1062477f902612098f802ca7ae44440dc..bdd70b85a787fded74d0c777228c9aedda879cc4 100644 (file)
@@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               if (err)
-                       goto out_failure;
-               return IXGBE_XDP_REDIR;
+               if (!err)
+                       return IXGBE_XDP_REDIR;
+               if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+                       result = IXGBE_XDP_EXIT;
+               else
+                       result = IXGBE_XDP_CONSUMED;
+               goto out_failure;
        }
 
        switch (act) {
@@ -130,16 +134,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
                if (result == IXGBE_XDP_CONSUMED)
                        goto out_failure;
                break;
+       case XDP_DROP:
+               result = IXGBE_XDP_CONSUMED;
+               break;
        default:
                bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_ABORTED:
+               result = IXGBE_XDP_CONSUMED;
 out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
-       case XDP_DROP:
-               result = IXGBE_XDP_CONSUMED;
-               break;
        }
        return result;
 }
@@ -303,12 +308,16 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
                xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
                xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
 
-               if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)))
+               if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
                        xdp_xmit |= xdp_res;
-               else if (xdp_res == IXGBE_XDP_CONSUMED)
+               } else if (xdp_res == IXGBE_XDP_EXIT) {
+                       failure = true;
+                       break;
+               } else if (xdp_res == IXGBE_XDP_CONSUMED) {
                        xsk_buff_free(bi->xdp);
-               else
+               } else if (xdp_res == IXGBE_XDP_PASS) {
                        goto construct_skb;
+               }
 
                bi->xdp = NULL;
                total_rx_packets++;