virtio_net: introduce mergeable_xdp_get_buf()
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>
Mon, 8 May 2023 06:14:04 +0000 (14:14 +0800)
committerJakub Kicinski <kuba@kernel.org>
Wed, 10 May 2023 02:44:26 +0000 (19:44 -0700)
Separating the logic of preparation for xdp from receive_mergeable.

The purpose of this is to simplify the logic of execution of XDP.

The main logic here is that when headroom is insufficient, we need to
allocate a new page and calculate offset. It should be noted that if
there is new page, the variable page will refer to the new page.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/virtio_net.c

index e9ee4fd..a75745d 100644 (file)
@@ -1166,6 +1166,81 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
        return 0;
 }
 
+static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
+                                  struct receive_queue *rq,
+                                  struct bpf_prog *xdp_prog,
+                                  void *ctx,
+                                  unsigned int *frame_sz,
+                                  int *num_buf,
+                                  struct page **page,
+                                  int offset,
+                                  unsigned int *len,
+                                  struct virtio_net_hdr_mrg_rxbuf *hdr)
+{
+       unsigned int truesize = mergeable_ctx_to_truesize(ctx);
+       unsigned int headroom = mergeable_ctx_to_headroom(ctx);
+       struct page *xdp_page;
+       unsigned int xdp_room;
+
+       /* Transient failure which in theory could occur if
+        * in-flight packets from before XDP was enabled reach
+        * the receive path after XDP is loaded.
+        */
+       if (unlikely(hdr->hdr.gso_type))
+               return NULL;
+
+       /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+        * with headroom may add hole in truesize, which
+        * make their length exceed PAGE_SIZE. So we disabled the
+        * hole mechanism for xdp. See add_recvbuf_mergeable().
+        */
+       *frame_sz = truesize;
+
+       /* This happens when headroom is not enough because
+        * of the buffer was prefilled before XDP is set.
+        * This should only happen for the first several packets.
+        * In fact, vq reset can be used here to help us clean up
+        * the prefilled buffers, but many existing devices do not
+        * support it, and we don't want to bother users who are
+        * using xdp normally.
+        */
+       if (!xdp_prog->aux->xdp_has_frags &&
+           (*num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
+               /* linearize data for XDP */
+               xdp_page = xdp_linearize_page(rq, num_buf,
+                                             *page, offset,
+                                             VIRTIO_XDP_HEADROOM,
+                                             len);
+               *frame_sz = PAGE_SIZE;
+
+               if (!xdp_page)
+                       return NULL;
+               offset = VIRTIO_XDP_HEADROOM;
+
+               put_page(*page);
+               *page = xdp_page;
+       } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
+               xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+                                         sizeof(struct skb_shared_info));
+               if (*len + xdp_room > PAGE_SIZE)
+                       return NULL;
+
+               xdp_page = alloc_page(GFP_ATOMIC);
+               if (!xdp_page)
+                       return NULL;
+
+               memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
+                      page_address(*page) + offset, *len);
+               *frame_sz = PAGE_SIZE;
+               offset = VIRTIO_XDP_HEADROOM;
+
+               put_page(*page);
+               *page = xdp_page;
+       }
+
+       return page_address(*page) + offset;
+}
+
 static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         struct virtnet_info *vi,
                                         struct receive_queue *rq,
@@ -1185,7 +1260,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        unsigned int headroom = mergeable_ctx_to_headroom(ctx);
        unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
        unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
-       unsigned int frame_sz, xdp_room;
+       unsigned int frame_sz;
        int err;
 
        head_skb = NULL;
@@ -1215,63 +1290,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                u32 act;
                int i;
 
-               /* Transient failure which in theory could occur if
-                * in-flight packets from before XDP was enabled reach
-                * the receive path after XDP is loaded.
-                */
-               if (unlikely(hdr->hdr.gso_type))
+               data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz,
+                                            &num_buf, &page, offset, &len, hdr);
+               if (unlikely(!data))
                        goto err_xdp;
 
-               /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
-                * with headroom may add hole in truesize, which
-                * make their length exceed PAGE_SIZE. So we disabled the
-                * hole mechanism for xdp. See add_recvbuf_mergeable().
-                */
-               frame_sz = truesize;
-
-               /* This happens when headroom is not enough because
-                * of the buffer was prefilled before XDP is set.
-                * This should only happen for the first several packets.
-                * In fact, vq reset can be used here to help us clean up
-                * the prefilled buffers, but many existing devices do not
-                * support it, and we don't want to bother users who are
-                * using xdp normally.
-                */
-               if (!xdp_prog->aux->xdp_has_frags &&
-                   (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
-                       /* linearize data for XDP */
-                       xdp_page = xdp_linearize_page(rq, &num_buf,
-                                                     page, offset,
-                                                     VIRTIO_XDP_HEADROOM,
-                                                     &len);
-                       frame_sz = PAGE_SIZE;
-
-                       if (!xdp_page)
-                               goto err_xdp;
-                       offset = VIRTIO_XDP_HEADROOM;
-
-                       put_page(page);
-                       page = xdp_page;
-               } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
-                       xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
-                                                 sizeof(struct skb_shared_info));
-                       if (len + xdp_room > PAGE_SIZE)
-                               goto err_xdp;
-
-                       xdp_page = alloc_page(GFP_ATOMIC);
-                       if (!xdp_page)
-                               goto err_xdp;
-
-                       memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
-                              page_address(page) + offset, len);
-                       frame_sz = PAGE_SIZE;
-                       offset = VIRTIO_XDP_HEADROOM;
-
-                       put_page(page);
-                       page = xdp_page;
-               }
-
-               data = page_address(page) + offset;
                err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
                                                 &num_buf, &xdp_frags_truesz, stats);
                if (unlikely(err))