int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int cmd_size)
+ unsigned int nr_maps, unsigned int cmd_size)
{
int ret;
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
- if (ops->map_queues)
- set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ set->nr_maps = nr_maps;
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
nvme_fc_init_io_queues(ctrl);
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
- &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int cmd_size);
+ unsigned int nr_maps, unsigned int cmd_size);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
NVME_RDMA_METADATA_SGL_SIZE;
return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
- &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
+ &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ cmd_size);
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
&nvme_tcp_mq_ops,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
sizeof(struct nvme_tcp_request));
if (ret)
goto out_free_io_queues;
return ret;
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
- &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)