EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int cmd_size)
+ const struct blk_mq_ops *ops, unsigned int cmd_size)
{
int ret;
if (ctrl->ops->flags & NVME_F_FABRICS)
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
- set->flags = flags;
+ set->flags = BLK_MQ_F_NO_SCHED;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+ set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int nr_maps, unsigned int cmd_size)
+ const struct blk_mq_ops *ops, unsigned int nr_maps,
+ unsigned int cmd_size)
{
int ret;
set->queue_depth = ctrl->sqsize + 1;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
- set->flags = flags;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+ set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size,
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
nvme_fc_init_io_queues(ctrl);
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
- &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ &nvme_fc_mq_ops, 1,
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
nvme_fc_init_queue(ctrl, 0);
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
- &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ &nvme_fc_admin_mq_ops,
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
+#define NVME_F_BLOCKING (1 << 2)
+
const struct attribute_group **dev_attr_groups;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int cmd_size);
+ const struct blk_mq_ops *ops, unsigned int cmd_size);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops, unsigned int flags,
- unsigned int nr_maps, unsigned int cmd_size);
+ const struct blk_mq_ops *ops, unsigned int nr_maps,
+ unsigned int cmd_size);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
NVME_RDMA_METADATA_SGL_SIZE;
return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
- &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ &nvme_rdma_mq_ops,
ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
cmd_size);
}
if (new) {
error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
- BLK_MQ_F_NO_SCHED,
sizeof(struct nvme_rdma_request) +
NVME_RDMA_DATA_SGL_SIZE);
if (error)
if (new) {
ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
&nvme_tcp_mq_ops,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
sizeof(struct nvme_tcp_request));
if (ret)
if (new) {
error = nvme_alloc_admin_tag_set(ctrl,
&to_tcp_ctrl(ctrl)->admin_tag_set,
- &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+ &nvme_tcp_admin_mq_ops,
sizeof(struct nvme_tcp_request));
if (error)
goto out_free_queue;
static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.name = "tcp",
.module = THIS_MODULE,
- .flags = NVME_F_FABRICS,
+ .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
ctrl->ctrl.queue_count = 1;
error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
- &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ &nvme_loop_admin_mq_ops,
sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
return ret;
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
- &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ &nvme_loop_mq_ops, 1,
sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)