nvme: introduce nvme_start_request
authorSagi Grimberg <sagi@grimberg.me>
Mon, 3 Oct 2022 09:43:43 +0000 (12:43 +0300)
committerChristoph Hellwig <hch@lst.de>
Tue, 6 Dec 2022 08:16:57 +0000 (09:16 +0100)
In preparation for nvme-multipath IO stats accounting, we want the
accounting to happen in a centralized place. The request completion
is already centralized, but we need a common helper to request I/O
start.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
drivers/nvme/host/apple.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index cab6951..94ef797 100644 (file)
@@ -763,7 +763,7 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                        goto out_free_cmd;
        }
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        apple_nvme_submit_cmd(q, cmnd);
        return BLK_STS_OK;
 
index aa5fb56..489f5e7 100644 (file)
@@ -2733,7 +2733,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        atomic_set(&op->state, FCPOP_STATE_ACTIVE);
 
        if (!(op->flags & FCOP_FLAGS_AEN))
-               blk_mq_start_request(op->rq);
+               nvme_start_request(op->rq);
 
        cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
        ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
index b3a1c59..8522d6d 100644 (file)
@@ -1012,6 +1012,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
 }
 #endif
 
+static inline void nvme_start_request(struct request *rq)
+{
+       blk_mq_start_request(rq);
+}
+
 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
 {
        return ctrl->sgls & ((1 << 0) | (1 << 1));
index e0da4a6..ac734c8 100644 (file)
@@ -907,7 +907,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
                        goto out_unmap_data;
        }
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        return BLK_STS_OK;
 out_unmap_data:
        nvme_unmap_data(dev, req);
index de591cd..448abf8 100644 (file)
@@ -2040,7 +2040,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                goto unmap_qe;
 
-       blk_mq_start_request(rq);
+       nvme_start_request(rq);
 
        if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
            queue->pi_support &&
index 776b8d9..79789da 100644 (file)
@@ -2405,7 +2405,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (unlikely(ret))
                return ret;
 
-       blk_mq_start_request(rq);
+       nvme_start_request(rq);
 
        nvme_tcp_queue_request(req, true, bd->last);
 
index 4173099..6d17662 100644 (file)
@@ -145,7 +145,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                return ret;
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
        iod->req.port = queue->ctrl->port;
        if (!nvmet_req_init(&iod->req, &queue->nvme_cq,