virtio_net: move tx vq operation under tx queue lock
authorMichael S. Tsirkin <mst@redhat.com>
Tue, 13 Apr 2021 05:35:26 +0000 (01:35 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Jul 2021 14:05:52 +0000 (16:05 +0200)
[ Upstream commit 5a2f966d0f3fa0ef6dada7ab9eda74cacee96b8a ]

It's unsafe to operate a vq from multiple threads.
Unfortunately this is exactly what we do when invoking
clean tx poll from rx napi.
Same happens with napi-tx even without the
opportunistic cleaning from the receive interrupt: that races
with processing the vq in start_xmit.

As a fix move everything that deals with the vq to under tx lock.

Fixes: b92f1e6751a6 ("virtio-net: transmit napi")
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/virtio_net.c

index 345a0f51e8d74b5d5747e02bec48a4e32d2305b3..7d1f609306f94d311271c7080658e4c47debb327 100644 (file)
@@ -1519,6 +1519,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = sq->vq->vdev->priv;
        unsigned int index = vq2txq(sq->vq);
        struct netdev_queue *txq;
+       int opaque;
+       bool done;
 
        if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
                /* We don't need to enable cb for XDP */
@@ -1528,10 +1530,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 
        txq = netdev_get_tx_queue(vi->dev, index);
        __netif_tx_lock(txq, raw_smp_processor_id());
+       virtqueue_disable_cb(sq->vq);
        free_old_xmit_skbs(sq, true);
+
+       opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+       done = napi_complete_done(napi, 0);
+
+       if (!done)
+               virtqueue_disable_cb(sq->vq);
+
        __netif_tx_unlock(txq);
 
-       virtqueue_napi_complete(napi, sq->vq, 0);
+       if (done) {
+               if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+                       if (napi_schedule_prep(napi)) {
+                               __netif_tx_lock(txq, raw_smp_processor_id());
+                               virtqueue_disable_cb(sq->vq);
+                               __netif_tx_unlock(txq);
+                               __napi_schedule(napi);
+                       }
+               }
+       }
 
        if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
                netif_tx_wake_queue(txq);