nvme: add support for mq_ops->queue_rqs()
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Nov 2021 15:37:30 +0000 (08:37 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 16 Dec 2021 17:54:36 +0000 (10:54 -0700)
This enables the block layer to send us a full plug list of requests
that need submitting. The block layer guarantees that they all belong
to the same queue, but we do have to check the hardware queue mapping
for each request.

If errors are encountered, leave them in the passed in list. Then the
block layer will handle them individually.

This is good for about a 4% improvement in peak performance, taking us
from 9.6M to 10M IOPS/core.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c

index 081abbe..50deb8b 100644 (file)
@@ -969,6 +969,64 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        return BLK_STS_OK;
 }
 
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+       spin_lock(&nvmeq->sq_lock);
+       while (!rq_list_empty(*rqlist)) {
+               struct request *req = rq_list_pop(rqlist);
+               struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+               nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+       }
+       nvme_write_sq_db(nvmeq, true);
+       spin_unlock(&nvmeq->sq_lock);
+}
+
+static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+{
+       /*
+        * We should not need to do this, but we're still using this to
+        * ensure we can drain requests on a dying queue.
+        */
+       if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+               return false;
+       if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+               return false;
+
+       req->mq_hctx->tags->rqs[req->tag] = req;
+       return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+       struct request *req = rq_list_peek(rqlist), *prev = NULL;
+       struct request *requeue_list = NULL;
+
+       do {
+               struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+               if (!nvme_prep_rq_batch(nvmeq, req)) {
+                       /* detach 'req' and add to remainder list */
+                       if (prev)
+                               prev->rq_next = req->rq_next;
+                       rq_list_add(&requeue_list, req);
+               } else {
+                       prev = req;
+               }
+
+               req = rq_list_next(req);
+               if (!req || (prev && req->mq_hctx != prev->mq_hctx)) {
+                       /* detach rest of list, and submit */
+                       if (prev)
+                               prev->rq_next = NULL;
+                       nvme_submit_cmds(nvmeq, rqlist);
+                       *rqlist = req;
+               }
+       } while (req);
+
+       *rqlist = requeue_list;
+}
+
 static __always_inline void nvme_pci_unmap_rq(struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1670,6 +1728,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
 
 static const struct blk_mq_ops nvme_mq_ops = {
        .queue_rq       = nvme_queue_rq,
+       .queue_rqs      = nvme_queue_rqs,
        .complete       = nvme_pci_complete_rq,
        .commit_rqs     = nvme_commit_rqs,
        .init_hctx      = nvme_init_hctx,