virtio-blk: support polling I/O
authorSuwan Kim <suwan.kim027@gmail.com>
Wed, 6 Apr 2022 15:32:06 +0000 (00:32 +0900)
committerMichael S. Tsirkin <mst@redhat.com>
Tue, 31 May 2022 16:44:23 +0000 (12:44 -0400)
This patch supports polling I/O via virtio-blk driver. Polling
feature is enabled by module parameter "poll_queues" and it sets
dedicated polling queues for virtio-blk. This patch improves the
polling I/O throughput and latency.

The virtio-blk driver doesn't not have a poll function and a poll
queue and it has been operating in interrupt driven method even if
the polling function is called in the upper layer.

virtio-blk polling is implemented upon 'batched completion' of block
layer. virtblk_poll() queues completed request to io_comp_batch->req_list
and later, virtblk_complete_batch() calls unmap function and ends
the requests in batch.

virtio-blk reads the number of poll queues from module parameter
"poll_queues". If VM sets queue parameter as below,
("num-queues=N" [QEMU property], "poll_queues=M" [module parameter])
It allocates N virtqueues to virtio_blk->vqs[N] and it uses [0..(N-M-1)]
as default queues and [(N-M)..(N-1)] as poll queues. Unlike the default
queues, the poll queues have no callback function.

Regarding HW-SW queue mapping, the default queue mapping uses the
existing method that condsiders MSI irq vector. But the poll queue
doesn't have an irq, so it uses the regular blk-mq cpu mapping.

For verifying the improvement, I did Fio polling I/O performance test
with io_uring engine with the options below.
(io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=N)
I set 4 vcpu and 4 virtio-blk queues - 2 default queues and 2 poll
queues for VM.

As a result, IOPS and average latency improved about 10%.

Test result:

- Fio io_uring poll without virtio-blk poll support
-- numjobs=1 : IOPS = 339K, avg latency = 188.33us
-- numjobs=2 : IOPS = 367K, avg latency = 347.33us
-- numjobs=4 : IOPS = 383K, avg latency = 682.06us

- Fio io_uring poll with virtio-blk poll support
-- numjobs=1 : IOPS = 385K, avg latency = 165.94us
-- numjobs=2 : IOPS = 408K, avg latency = 313.28us
-- numjobs=4 : IOPS = 424K, avg latency = 613.05us

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
Message-Id: <20220406153207.163134-2-suwan.kim027@gmail.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
drivers/block/virtio_blk.c

index d624cc8..ad5f9ce 100644 (file)
@@ -37,6 +37,10 @@ MODULE_PARM_DESC(num_request_queues,
                 "0 for no limit. "
                 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
 
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
+MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
+
 static int major;
 static DEFINE_IDA(vd_index_ida);
 
@@ -74,6 +78,7 @@ struct virtio_blk {
 
        /* num of vqs */
        int num_vqs;
+       int io_queues[HCTX_MAX_TYPES];
        struct virtio_blk_vq *vqs;
 };
 
@@ -512,6 +517,7 @@ static int init_vq(struct virtio_blk *vblk)
        const char **names;
        struct virtqueue **vqs;
        unsigned short num_vqs;
+       unsigned int num_poll_vqs;
        struct virtio_device *vdev = vblk->vdev;
        struct irq_affinity desc = { 0, };
 
@@ -520,6 +526,7 @@ static int init_vq(struct virtio_blk *vblk)
                                   &num_vqs);
        if (err)
                num_vqs = 1;
+
        if (!err && !num_vqs) {
                dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
                return -EINVAL;
@@ -529,6 +536,17 @@ static int init_vq(struct virtio_blk *vblk)
                        min_not_zero(num_request_queues, nr_cpu_ids),
                        num_vqs);
 
+       num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
+
+       vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
+       vblk->io_queues[HCTX_TYPE_READ] = 0;
+       vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+       dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+                               vblk->io_queues[HCTX_TYPE_DEFAULT],
+                               vblk->io_queues[HCTX_TYPE_READ],
+                               vblk->io_queues[HCTX_TYPE_POLL]);
+
        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
        if (!vblk->vqs)
                return -ENOMEM;
@@ -541,12 +559,18 @@ static int init_vq(struct virtio_blk *vblk)
                goto out;
        }
 
-       for (i = 0; i < num_vqs; i++) {
+       for (i = 0; i < num_vqs - num_poll_vqs; i++) {
                callbacks[i] = virtblk_done;
                snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
                names[i] = vblk->vqs[i].name;
        }
 
+       for (; i < num_vqs; i++) {
+               callbacks[i] = NULL;
+               snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
+               names[i] = vblk->vqs[i].name;
+       }
+
        /* Discover virtqueues and write information to configuration.  */
        err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
        if (err)
@@ -692,16 +716,89 @@ static const struct attribute_group *virtblk_attr_groups[] = {
 static int virtblk_map_queues(struct blk_mq_tag_set *set)
 {
        struct virtio_blk *vblk = set->driver_data;
+       int i, qoff;
+
+       for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+               struct blk_mq_queue_map *map = &set->map[i];
+
+               map->nr_queues = vblk->io_queues[i];
+               map->queue_offset = qoff;
+               qoff += map->nr_queues;
+
+               if (map->nr_queues == 0)
+                       continue;
+
+               /*
+                * Regular queues have interrupts and hence CPU affinity is
+                * defined by the core virtio code, but polling queues have
+                * no interrupts so we let the block layer assign CPU affinity.
+                */
+               if (i == HCTX_TYPE_POLL)
+                       blk_mq_map_queues(&set->map[i]);
+               else
+                       blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+       }
+
+       return 0;
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+       struct request *req;
 
-       return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
-                                       vblk->vdev, 0);
+       rq_list_for_each(&iob->req_list, req) {
+               virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+               virtblk_cleanup_cmd(req);
+       }
+       blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+       struct virtio_blk *vblk = hctx->queue->queuedata;
+       struct virtio_blk_vq *vq = hctx->driver_data;
+       struct virtblk_req *vbr;
+       unsigned long flags;
+       unsigned int len;
+       int found = 0;
+
+       spin_lock_irqsave(&vq->lock, flags);
+
+       while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+               struct request *req = blk_mq_rq_from_pdu(vbr);
+
+               found++;
+               if (!blk_mq_add_to_batch(req, iob, vbr->status,
+                                               virtblk_complete_batch))
+                       blk_mq_complete_request(req);
+       }
+
+       if (found)
+               blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+
+       spin_unlock_irqrestore(&vq->lock, flags);
+
+       return found;
+}
+
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                         unsigned int hctx_idx)
+{
+       struct virtio_blk *vblk = data;
+       struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+       WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
+       hctx->driver_data = vq;
+       return 0;
 }
 
 static const struct blk_mq_ops virtio_mq_ops = {
        .queue_rq       = virtio_queue_rq,
        .commit_rqs     = virtio_commit_rqs,
+       .init_hctx      = virtblk_init_hctx,
        .complete       = virtblk_request_done,
        .map_queues     = virtblk_map_queues,
+       .poll           = virtblk_poll,
 };
 
 static unsigned int virtblk_queue_depth;
@@ -778,6 +875,9 @@ static int virtblk_probe(struct virtio_device *vdev)
                sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
        vblk->tag_set.driver_data = vblk;
        vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+       vblk->tag_set.nr_maps = 1;
+       if (vblk->io_queues[HCTX_TYPE_POLL])
+               vblk->tag_set.nr_maps = 3;
 
        err = blk_mq_alloc_tag_set(&vblk->tag_set);
        if (err)