virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>
Mon, 8 May 2023 06:14:06 +0000 (14:14 +0800)
committerJakub Kicinski <kuba@kernel.org>
Wed, 10 May 2023 02:44:27 +0000 (19:44 -0700)
At present, we have two similar logic to perform the XDP prog.

Therefore, this patch separates the code of executing XDP, which is
conducive to later maintenance.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/virtio_net.c

index 3c36029..9334350 100644 (file)
@@ -789,6 +789,60 @@ out:
        return ret;
 }
 
+static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+                              struct net_device *dev,
+                              unsigned int *xdp_xmit,
+                              struct virtnet_rq_stats *stats)
+{
+       struct xdp_frame *xdpf;
+       int err;
+       u32 act;
+
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
+       stats->xdp_packets++;
+
+       switch (act) {
+       case XDP_PASS:
+               return act;
+
+       case XDP_TX:
+               stats->xdp_tx++;
+               xdpf = xdp_convert_buff_to_frame(xdp);
+               if (unlikely(!xdpf)) {
+                       netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+                       return XDP_DROP;
+               }
+
+               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+               if (unlikely(!err)) {
+                       xdp_return_frame_rx_napi(xdpf);
+               } else if (unlikely(err < 0)) {
+                       trace_xdp_exception(dev, xdp_prog, act);
+                       return XDP_DROP;
+               }
+               *xdp_xmit |= VIRTIO_XDP_TX;
+               return act;
+
+       case XDP_REDIRECT:
+               stats->xdp_redirects++;
+               err = xdp_do_redirect(dev, xdp, xdp_prog);
+               if (err)
+                       return XDP_DROP;
+
+               *xdp_xmit |= VIRTIO_XDP_REDIR;
+               return act;
+
+       default:
+               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
+               fallthrough;
+       case XDP_ABORTED:
+               trace_xdp_exception(dev, xdp_prog, act);
+               fallthrough;
+       case XDP_DROP:
+               return XDP_DROP;
+       }
+}
+
 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
 {
        return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
@@ -880,7 +934,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
        struct page *page = virt_to_head_page(buf);
        unsigned int delta = 0;
        struct page *xdp_page;
-       int err;
        unsigned int metasize = 0;
 
        len -= vi->hdr_len;
@@ -902,7 +955,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
                struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
-               struct xdp_frame *xdpf;
                struct xdp_buff xdp;
                void *orig_data;
                u32 act;
@@ -935,8 +987,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
                xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
                                 xdp_headroom, len, true);
                orig_data = xdp.data;
-               act = bpf_prog_run_xdp(xdp_prog, &xdp);
-               stats->xdp_packets++;
+
+               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
 
                switch (act) {
                case XDP_PASS:
@@ -946,35 +998,10 @@ static struct sk_buff *receive_small(struct net_device *dev,
                        metasize = xdp.data - xdp.data_meta;
                        break;
                case XDP_TX:
-                       stats->xdp_tx++;
-                       xdpf = xdp_convert_buff_to_frame(&xdp);
-                       if (unlikely(!xdpf))
-                               goto err_xdp;
-                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
-                       if (unlikely(!err)) {
-                               xdp_return_frame_rx_napi(xdpf);
-                       } else if (unlikely(err < 0)) {
-                               trace_xdp_exception(vi->dev, xdp_prog, act);
-                               goto err_xdp;
-                       }
-                       *xdp_xmit |= VIRTIO_XDP_TX;
-                       rcu_read_unlock();
-                       goto xdp_xmit;
                case XDP_REDIRECT:
-                       stats->xdp_redirects++;
-                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
-                       if (err)
-                               goto err_xdp;
-                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
-                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
-                       fallthrough;
-               case XDP_ABORTED:
-                       trace_xdp_exception(vi->dev, xdp_prog, act);
-                       goto err_xdp;
-               case XDP_DROP:
                        goto err_xdp;
                }
        }
@@ -1282,7 +1309,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        if (xdp_prog) {
                unsigned int xdp_frags_truesz = 0;
                struct skb_shared_info *shinfo;
-               struct xdp_frame *xdpf;
                struct page *xdp_page;
                struct xdp_buff xdp;
                void *data;
@@ -1299,8 +1325,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                if (unlikely(err))
                        goto err_xdp_frags;
 
-               act = bpf_prog_run_xdp(xdp_prog, &xdp);
-               stats->xdp_packets++;
+               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
 
                switch (act) {
                case XDP_PASS:
@@ -1311,38 +1336,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        rcu_read_unlock();
                        return head_skb;
                case XDP_TX:
-                       stats->xdp_tx++;
-                       xdpf = xdp_convert_buff_to_frame(&xdp);
-                       if (unlikely(!xdpf)) {
-                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
-                               goto err_xdp_frags;
-                       }
-                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
-                       if (unlikely(!err)) {
-                               xdp_return_frame_rx_napi(xdpf);
-                       } else if (unlikely(err < 0)) {
-                               trace_xdp_exception(vi->dev, xdp_prog, act);
-                               goto err_xdp_frags;
-                       }
-                       *xdp_xmit |= VIRTIO_XDP_TX;
-                       rcu_read_unlock();
-                       goto xdp_xmit;
                case XDP_REDIRECT:
-                       stats->xdp_redirects++;
-                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
-                       if (err)
-                               goto err_xdp_frags;
-                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
-                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
-                       fallthrough;
-               case XDP_ABORTED:
-                       trace_xdp_exception(vi->dev, xdp_prog, act);
-                       fallthrough;
-               case XDP_DROP:
-                       goto err_xdp_frags;
+                       break;
                }
 err_xdp_frags:
                if (xdp_buff_has_frags(&xdp)) {