nvme: consolidate setting the tagset flags
authorChristoph Hellwig <hch@lst.de>
Wed, 30 Nov 2022 16:19:50 +0000 (17:19 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Feb 2023 07:34:43 +0000 (08:34 +0100)
[ Upstream commit db45e1a5ddccc034eb60d62fc5352022d7963ae2 ]

All nvme transports should be using the same flags for their tagsets,
with the exception for the blocking flag that should only be set for
transports that can block in ->queue_rq.

Add a NVME_F_BLOCKING flag to nvme_ctrl_ops to control the blocking
behavior and lift setting the flags into nvme_alloc_{admin,io}_tag_set.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Stable-dep-of: 98e3528012cd ("nvme-fc: fix initialization order")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index badc698..9e9ad91 100644 (file)
@@ -4840,8 +4840,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
-               const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size)
+               const struct blk_mq_ops *ops, unsigned int cmd_size)
 {
        int ret;
 
@@ -4851,7 +4850,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
        if (ctrl->ops->flags & NVME_F_FABRICS)
                set->reserved_tags = NVMF_RESERVED_TAGS;
        set->numa_node = ctrl->numa_node;
-       set->flags = flags;
+       set->flags = BLK_MQ_F_NO_SCHED;
+       if (ctrl->ops->flags & NVME_F_BLOCKING)
+               set->flags |= BLK_MQ_F_BLOCKING;
        set->cmd_size = cmd_size;
        set->driver_data = ctrl;
        set->nr_hw_queues = 1;
@@ -4895,8 +4896,8 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
 
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
-               const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int nr_maps, unsigned int cmd_size)
+               const struct blk_mq_ops *ops, unsigned int nr_maps,
+               unsigned int cmd_size)
 {
        int ret;
 
@@ -4905,7 +4906,9 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
        set->queue_depth = ctrl->sqsize + 1;
        set->reserved_tags = NVMF_RESERVED_TAGS;
        set->numa_node = ctrl->numa_node;
-       set->flags = flags;
+       set->flags = BLK_MQ_F_SHOULD_MERGE;
+       if (ctrl->ops->flags & NVME_F_BLOCKING)
+               set->flags |= BLK_MQ_F_BLOCKING;
        set->cmd_size = cmd_size,
        set->driver_data = ctrl;
        set->nr_hw_queues = ctrl->queue_count - 1;
index 20b0c29..5f07a6b 100644 (file)
@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        nvme_fc_init_io_queues(ctrl);
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+                       &nvme_fc_mq_ops, 1,
                        struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
                                    ctrl->lport->ops->fcprqst_priv_sz));
        if (ret)
@@ -3509,7 +3509,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        nvme_fc_init_queue(ctrl, 0);
 
        ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
-                       &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+                       &nvme_fc_admin_mq_ops,
                        struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
                                    ctrl->lport->ops->fcprqst_priv_sz));
        if (ret)
index aef3693..01d9042 100644 (file)
@@ -508,6 +508,8 @@ struct nvme_ctrl_ops {
        unsigned int flags;
 #define NVME_F_FABRICS                 (1 << 0)
 #define NVME_F_METADATA_SUPPORTED      (1 << 1)
+#define NVME_F_BLOCKING                        (1 << 2)
+
        const struct attribute_group **dev_attr_groups;
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
@@ -739,12 +741,11 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl);
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
-               const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size);
+               const struct blk_mq_ops *ops, unsigned int cmd_size);
 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
-               const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int nr_maps, unsigned int cmd_size);
+               const struct blk_mq_ops *ops, unsigned int nr_maps,
+               unsigned int cmd_size);
 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
index a55d3e8..6f918e6 100644 (file)
@@ -798,7 +798,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
                            NVME_RDMA_METADATA_SGL_SIZE;
 
        return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
-                       &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       &nvme_rdma_mq_ops,
                        ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
                        cmd_size);
 }
@@ -848,7 +848,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (new) {
                error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
                                &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
-                               BLK_MQ_F_NO_SCHED,
                                sizeof(struct nvme_rdma_request) +
                                NVME_RDMA_DATA_SGL_SIZE);
                if (error)
index 83735c5..eacd445 100644 (file)
@@ -1867,7 +1867,6 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
        if (new) {
                ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
                                &nvme_tcp_mq_ops,
-                               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
                                ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
                                sizeof(struct nvme_tcp_request));
                if (ret)
@@ -1943,7 +1942,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
        if (new) {
                error = nvme_alloc_admin_tag_set(ctrl,
                                &to_tcp_ctrl(ctrl)->admin_tag_set,
-                               &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+                               &nvme_tcp_admin_mq_ops,
                                sizeof(struct nvme_tcp_request));
                if (error)
                        goto out_free_queue;
@@ -2524,7 +2523,7 @@ static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
        .name                   = "tcp",
        .module                 = THIS_MODULE,
-       .flags                  = NVME_F_FABRICS,
+       .flags                  = NVME_F_FABRICS | NVME_F_BLOCKING,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
index 08c5832..c864e90 100644 (file)
@@ -353,7 +353,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->ctrl.queue_count = 1;
 
        error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
-                       &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+                       &nvme_loop_admin_mq_ops,
                        sizeof(struct nvme_loop_iod) +
                        NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
        if (error)
@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
                return ret;
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+                       &nvme_loop_mq_ops, 1,
                        sizeof(struct nvme_loop_iod) +
                        NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
        if (ret)