/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
}
flush_rq->tag = first_rq->tag;
fq->orig_rq = first_rq;
- hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
/*
* After populating an empty queue, kick it to avoid stall. Read
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = data->q->mq_ops->map_queue(data->q,
- data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
int hwq = 0;
if (q->mq_ops) {
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
blk_mq_put_ctx(ctx);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
ctx = alloc_data.ctx;
void blk_mq_free_request(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
-
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
- blk_mq_free_hctx_request(hctx, rq);
+ blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
bool from_schedule)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
trace_block_unplug(q, depth, !from_schedule);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
trace_block_sleeprq(q, bio, op);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
ctx = alloc_data.ctx;
{
int ret;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
- rq->mq_ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
struct blk_mq_queue_data bd = {
.rq = rq,
.list = NULL,
return cookie;
}
-/*
- * Default mapping to a software queue, since we use one per CPU.
- */
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
-{
- return q->queue_hw_ctx[q->mq_map[cpu]];
-}
-EXPORT_SYMBOL(blk_mq_map_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
if (!cpu_online(i))
continue;
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
/*
* Set local node, IFF we have more than one hw queue. If
continue;
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+
/*
* sysfs helpers
*/
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{
- struct blk_mq_hw_ctx *hctx;
-
- if (!q->mq_ops)
- return q->fq;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- return hctx->fq;
+ if (q->mq_ops)
+ return blk_mq_map_queue(q, ctx->cpu)->fq;
+ return q->fq;
}
static inline void __blk_get_queue(struct request_queue *q)
static struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = loop_init_request,
};
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
.complete = mtip_softirq_done_fn,
static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
};
static struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = rbd_init_request,
};
static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
- .map_queue = blk_mq_map_queue,
};
static void blkif_set_queue_limits(struct blkfront_info *info)
static struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
};
static struct blk_mq_ops ubiblock_mq_ops = {
.queue_rq = ubiblock_queue_rq,
.init_request = ubiblock_init_request,
- .map_queue = blk_mq_map_queue,
};
static DEFINE_IDR(ubiblock_minor_idr);
static struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_admin_init_request,
static struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
.timeout = nvme_timeout,
static struct blk_mq_ops nvme_rdma_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.reinit_request = nvme_rdma_reinit_request,
static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_admin_request,
.exit_request = nvme_rdma_exit_admin_request,
.reinit_request = nvme_rdma_reinit_request,
static struct blk_mq_ops nvme_loop_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
.timeout = nvme_loop_timeout,
static struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_admin_request,
.init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
}
static struct blk_mq_ops scsi_mq_ops = {
- .map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
*/
queue_rq_fn *queue_rq;
- /*
- * Map to specific hardware queue
- */
- map_queue_fn *map_queue;
-
/*
* Called on request timeout
*/
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);