virtio_net: separate the logic of checking whether sq is full
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>
Wed, 8 Mar 2023 02:49:34 +0000 (10:49 +0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 11 Mar 2023 00:46:00 +0000 (16:46 -0800)
Separate the logic of checking whether sq is full. The subsequent patch
will reuse this func.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/virtio_net.c

index 8b31a04..46bbdda 100644 (file)
@@ -591,6 +591,41 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
                return false;
 }
 
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+                                     struct net_device *dev,
+                                     struct send_queue *sq)
+{
+       bool use_napi = sq->napi.weight;
+       int qnum;
+
+       qnum = sq - vi->sq;
+
+       /* If running out of space, stop queue to avoid getting packets that we
+        * are then unable to transmit.
+        * An alternative would be to force queuing layer to requeue the skb by
+        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+        * returned in a normal path of operation: it means that driver is not
+        * maintaining the TX queue stop/start state properly, and causes
+        * the stack to do a non-trivial amount of useless work.
+        * Since most packets only take 1 or 2 ring slots, stopping the queue
+        * early means 16 slots are typically wasted.
+        */
+       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+               netif_stop_subqueue(dev, qnum);
+               if (use_napi) {
+                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+                               virtqueue_napi_schedule(&sq->napi, sq->vq);
+               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+                       /* More just got used, free them then recheck. */
+                       free_old_xmit_skbs(sq, false);
+                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+                               netif_start_subqueue(dev, qnum);
+                               virtqueue_disable_cb(sq->vq);
+                       }
+               }
+       }
+}
+
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
                                   struct send_queue *sq,
                                   struct xdp_frame *xdpf)
@@ -1989,30 +2024,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
                nf_reset_ct(skb);
        }
 
-       /* If running out of space, stop queue to avoid getting packets that we
-        * are then unable to transmit.
-        * An alternative would be to force queuing layer to requeue the skb by
-        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
-        * returned in a normal path of operation: it means that driver is not
-        * maintaining the TX queue stop/start state properly, and causes
-        * the stack to do a non-trivial amount of useless work.
-        * Since most packets only take 1 or 2 ring slots, stopping the queue
-        * early means 16 slots are typically wasted.
-        */
-       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
-               netif_stop_subqueue(dev, qnum);
-               if (use_napi) {
-                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
-                               virtqueue_napi_schedule(&sq->napi, sq->vq);
-               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
-                       /* More just got used, free them then recheck. */
-                       free_old_xmit_skbs(sq, false);
-                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
-                               netif_start_subqueue(dev, qnum);
-                               virtqueue_disable_cb(sq->vq);
-                       }
-               }
-       }
+       check_sq_full_and_disable(vi, dev, sq);
 
        if (kick || netif_xmit_stopped(txq)) {
                if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {