nvme: pass nr_maps explicitly to nvme_alloc_io_tag_set
authorChristoph Hellwig <hch@lst.de>
Wed, 30 Nov 2022 16:16:52 +0000 (17:16 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 31 Dec 2022 12:32:25 +0000 (13:32 +0100)
[ Upstream commit dcef77274ae52136925287b6b59d5c6e6a4adfb9 ]

Don't look at ctrl->ops as only RDMA and TCP actually support multiple
maps.

Fixes: 6dfba1c09c10 ("nvme-fc: use the tagset alloc/free helpers")
Fixes: ceee1953f923 ("nvme-loop: use the tagset alloc/free helpers")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index a4ea0607bc9870ae341019826070f423fc85fa0d..95b73b386719619fa50942ff5b584e6e55d5d13c 100644 (file)
@@ -4867,7 +4867,7 @@ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
 
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
                const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size)
+               unsigned int nr_maps, unsigned int cmd_size)
 {
        int ret;
 
@@ -4881,8 +4881,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
        set->driver_data = ctrl;
        set->nr_hw_queues = ctrl->queue_count - 1;
        set->timeout = NVME_IO_TIMEOUT;
-       if (ops->map_queues)
-               set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+       set->nr_maps = nr_maps;
        ret = blk_mq_alloc_tag_set(set);
        if (ret)
                return ret;
index 5d57a042dbcade8170facfddb205080a06ffc088..20b0c29a9a34118faa0e75f91d5115bf783aee67 100644 (file)
@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        nvme_fc_init_io_queues(ctrl);
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
                        struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
                                    ctrl->lport->ops->fcprqst_priv_sz));
        if (ret)
index a29877217ee65ca007d696b13f2c20dadd0d14ab..8a0db9e06dc65c3f759703fbcde3e055166044f9 100644 (file)
@@ -743,7 +743,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
                const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size);
+               unsigned int nr_maps, unsigned int cmd_size);
 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
index 6e079abb22ee97d11bd29f38811ab152fe6ae2c8..a55d3e8b607d545404926adea1a04a4adfb7ee9b 100644 (file)
@@ -798,7 +798,9 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
                            NVME_RDMA_METADATA_SGL_SIZE;
 
        return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
-                       &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
+                       &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+                       cmd_size);
 }
 
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
index 9b47dcb2a7d97184334cb2ca64daa5aa74e5a101..83735c52d34a0af7f7a7a41b1e06881c95ba7d02 100644 (file)
@@ -1868,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
                ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
                                &nvme_tcp_mq_ops,
                                BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+                               ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
                                sizeof(struct nvme_tcp_request));
                if (ret)
                        goto out_free_io_queues;
index b45fe3adf015fd1327ee73a6452ad32feafbff81..08c583258e90fa3966f062c3ca3cc2c47666b73a 100644 (file)
@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
                return ret;
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
                        sizeof(struct nvme_loop_iod) +
                        NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
        if (ret)