blk-mq: kill q->mq_map
authorJens Axboe <axboe@kernel.dk>
Tue, 16 Oct 2018 20:23:06 +0000 (14:23 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Nov 2018 20:44:59 +0000 (13:44 -0700)
It's just a pointer to set->mq_map, use that instead. Move the
assignment a bit earlier, so we always know it's valid.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk-mq.h
include/linux/blkdev.h

index 67a2baf..766facf 100644 (file)
@@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         * If the cpu isn't present, the cpu is mapped to first hctx.
         */
        for_each_possible_cpu(i) {
-               hctx_idx = q->mq_map[i];
+               hctx_idx = set->mq_map[i];
                /* unmapped hw queue can be remapped after CPU topo changed */
                if (!set->tags[hctx_idx] &&
                    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                         * case, remap the current ctx to hctx[0] which
                         * is guaranteed to always have tags allocated
                         */
-                       q->mq_map[i] = 0;
+                       set->mq_map[i] = 0;
                }
 
                ctx = per_cpu_ptr(q->queue_ctx, i);
@@ -2430,8 +2430,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
                                     struct request_queue *q)
 {
-       q->tag_set = set;
-
        mutex_lock(&set->tag_list_lock);
 
        /*
@@ -2468,8 +2466,6 @@ void blk_mq_release(struct request_queue *q)
                kobject_put(&hctx->kobj);
        }
 
-       q->mq_map = NULL;
-
        kfree(q->queue_hw_ctx);
 
        /*
@@ -2589,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                int node;
                struct blk_mq_hw_ctx *hctx;
 
-               node = blk_mq_hw_queue_to_node(q->mq_map, i);
+               node = blk_mq_hw_queue_to_node(set->mq_map, i);
                /*
                 * If the hw queue has been mapped to another numa node,
                 * we need to realloc the hctx. If allocation fails, fallback
@@ -2666,8 +2662,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (!q->queue_hw_ctx)
                goto err_percpu;
 
-       q->mq_map = set->mq_map;
-
        blk_mq_realloc_hw_ctxs(set, q);
        if (!q->nr_hw_queues)
                goto err_hctxs;
@@ -2676,6 +2670,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
+       q->tag_set = set;
 
        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
index 9497b47..9536be0 100644 (file)
@@ -75,7 +75,9 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
                int cpu)
 {
-       return q->queue_hw_ctx[q->mq_map[cpu]];
+       struct blk_mq_tag_set *set = q->tag_set;
+
+       return q->queue_hw_ctx[set->mq_map[cpu]];
 }
 
 /*
index c8fa4d3..2ae7465 100644 (file)
@@ -409,8 +409,6 @@ struct request_queue {
 
        const struct blk_mq_ops *mq_ops;
 
-       unsigned int            *mq_map;
-
        /* sw queues */
        struct blk_mq_ctx __percpu      *queue_ctx;
        unsigned int            nr_queues;