vhost: take worker or vq instead of dev for queueing
authorMike Christie <michael.christie@oracle.com>
Mon, 26 Jun 2023 23:22:55 +0000 (18:22 -0500)
committerMichael S. Tsirkin <mst@redhat.com>
Mon, 3 Jul 2023 16:15:13 +0000 (12:15 -0400)
This patch has the core work queueing function take a worker for when we
support multiple workers. It also adds a helper that takes a vq during
queueing so modules can control which vq/worker to queue work on.

This temp leaves vhost_work_queue. It will be removed when the drivers
are converted in the next patches.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Message-Id: <20230626232307.97930-6-michael.christie@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
drivers/vhost/vhost.c
drivers/vhost/vhost.h

index aafb23e1247794f7632f862dca487ba10bffb5b1..611e495eeb3c3cd214cbb2a8056a1cc93ce77afc 100644 (file)
@@ -231,21 +231,10 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-void vhost_dev_flush(struct vhost_dev *dev)
+static bool vhost_worker_queue(struct vhost_worker *worker,
+                              struct vhost_work *work)
 {
-       struct vhost_flush_struct flush;
-
-       init_completion(&flush.wait_event);
-       vhost_work_init(&flush.work, vhost_flush_work);
-
-       if (vhost_work_queue(dev, &flush.work))
-               wait_for_completion(&flush.wait_event);
-}
-EXPORT_SYMBOL_GPL(vhost_dev_flush);
-
-bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
-{
-       if (!dev->worker)
+       if (!worker)
                return false;
        /*
         * vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb
@@ -257,14 +246,37 @@ bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
                 * sure it was not in the list.
                 * test_and_set_bit() implies a memory barrier.
                 */
-               llist_add(&work->node, &dev->worker->work_list);
-               vhost_task_wake(dev->worker->vtsk);
+               llist_add(&work->node, &worker->work_list);
+               vhost_task_wake(worker->vtsk);
        }
 
        return true;
 }
+
+bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
+{
+       return vhost_worker_queue(dev->worker, work);
+}
 EXPORT_SYMBOL_GPL(vhost_work_queue);
 
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
+{
+       return vhost_worker_queue(vq->worker, work);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+
+void vhost_dev_flush(struct vhost_dev *dev)
+{
+       struct vhost_flush_struct flush;
+
+       init_completion(&flush.wait_event);
+       vhost_work_init(&flush.work, vhost_flush_work);
+
+       if (vhost_work_queue(dev, &flush.work))
+               wait_for_completion(&flush.wait_event);
+}
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
+
 /* A lockless hint for busy polling code to exit the loop */
 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 {
index 37c183b37c42a1f321debc8c2f0b6446b1546f6d..6a1ae8ae9c7ded25d648be35c4c71fcb248aa50e 100644 (file)
@@ -198,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
                      struct vhost_log *log, unsigned int *log_num);
 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
 
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
 bool vhost_vq_has_work(struct vhost_virtqueue *vq);
 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
 int vhost_vq_init_access(struct vhost_virtqueue *);