nvme: separate command prep and issue
authorJens Axboe <axboe@kernel.dk>
Fri, 29 Oct 2021 20:34:11 +0000 (14:34 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 16 Dec 2021 17:54:35 +0000 (10:54 -0700)
Add a nvme_prep_rq() helper to setup a command, and nvme_queue_rq() is
adapted to use this helper.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c

index 2009f8c..081abbe 100644 (file)
@@ -903,55 +903,32 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
        return BLK_STS_OK;
 }
 
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
-static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
-                        const struct blk_mq_queue_data *bd)
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
 {
-       struct nvme_ns *ns = hctx->queue->queuedata;
-       struct nvme_queue *nvmeq = hctx->driver_data;
-       struct nvme_dev *dev = nvmeq->dev;
-       struct request *req = bd->rq;
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       struct nvme_command *cmnd = &iod->cmd;
        blk_status_t ret;
 
        iod->aborted = 0;
        iod->npages = -1;
        iod->nents = 0;
 
-       /*
-        * We should not need to do this, but we're still using this to
-        * ensure we can drain requests on a dying queue.
-        */
-       if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
-               return BLK_STS_IOERR;
-
-       if (!nvme_check_ready(&dev->ctrl, req, true))
-               return nvme_fail_nonready_command(&dev->ctrl, req);
-
-       ret = nvme_setup_cmd(ns, req);
+       ret = nvme_setup_cmd(req->q->queuedata, req);
        if (ret)
                return ret;
 
        if (blk_rq_nr_phys_segments(req)) {
-               ret = nvme_map_data(dev, req, cmnd);
+               ret = nvme_map_data(dev, req, &iod->cmd);
                if (ret)
                        goto out_free_cmd;
        }
 
        if (blk_integrity_rq(req)) {
-               ret = nvme_map_metadata(dev, req, cmnd);
+               ret = nvme_map_metadata(dev, req, &iod->cmd);
                if (ret)
                        goto out_unmap_data;
        }
 
        blk_mq_start_request(req);
-       spin_lock(&nvmeq->sq_lock);
-       nvme_sq_copy_cmd(nvmeq, &iod->cmd);
-       nvme_write_sq_db(nvmeq, bd->last);
-       spin_unlock(&nvmeq->sq_lock);
        return BLK_STS_OK;
 out_unmap_data:
        nvme_unmap_data(dev, req);
@@ -960,6 +937,38 @@ out_free_cmd:
        return ret;
 }
 
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+                        const struct blk_mq_queue_data *bd)
+{
+       struct nvme_queue *nvmeq = hctx->driver_data;
+       struct nvme_dev *dev = nvmeq->dev;
+       struct request *req = bd->rq;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       blk_status_t ret;
+
+       /*
+        * We should not need to do this, but we're still using this to
+        * ensure we can drain requests on a dying queue.
+        */
+       if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+               return BLK_STS_IOERR;
+
+       if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
+               return nvme_fail_nonready_command(&dev->ctrl, req);
+
+       ret = nvme_prep_rq(dev, req);
+       if (unlikely(ret))
+               return ret;
+       spin_lock(&nvmeq->sq_lock);
+       nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+       nvme_write_sq_db(nvmeq, bd->last);
+       spin_unlock(&nvmeq->sq_lock);
+       return BLK_STS_OK;
+}
+
 static __always_inline void nvme_pci_unmap_rq(struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);