xsk: recycle buffer in case Rx queue was full
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Wed, 24 Jan 2024 19:15:52 +0000 (20:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Feb 2024 00:19:04 +0000 (16:19 -0800)
[ Upstream commit 269009893146c495f41e9572dd9319e787c2eba9 ]

Add missing xsk_buff_free() call when __xsk_rcv_zc() failed to produce
descriptor to XSK Rx queue.

Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/r/20240124191602.566724-2-maciej.fijalkowski@intel.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/xdp/xsk.c

index 774a6d1916e404492925f1a1aee2fecb51984bab..d849dc04a334397f45498dc605c7214a9f7444fb 100644 (file)
@@ -166,8 +166,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                contd = XDP_PKT_CONTD;
 
        err = __xsk_rcv_zc(xs, xskb, len, contd);
-       if (err || likely(!frags))
-               goto out;
+       if (err)
+               goto err;
+       if (likely(!frags))
+               return 0;
 
        xskb_list = &xskb->pool->xskb_list;
        list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
@@ -176,11 +178,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                len = pos->xdp.data_end - pos->xdp.data;
                err = __xsk_rcv_zc(xs, pos, len, contd);
                if (err)
-                       return err;
+                       goto err;
                list_del(&pos->xskb_list_node);
        }
 
-out:
+       return 0;
+err:
+       xsk_buff_free(xdp);
        return err;
 }