blk-mq: fix tags UAF when shrinking q->nr_hw_queues
authorChengming Zhou <zhouchengming@bytedance.com>
Fri, 8 Sep 2023 00:57:02 +0000 (08:57 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 11 Sep 2023 22:17:34 +0000 (16:17 -0600)
When nr_hw_queues shrink, we free the excess tags before realloc'ing
hw_ctxs for each queue. During that resize, we may need to access those
tags, like blk_mq_tag_idle(hctx) will access queue shared tags.

This can cause a slab use-after-free, as reported by KASAN. Fix it by
moving the releasing of excess tags to the end.

Fixes: e1dd7bc93029 ("blk-mq: fix tags leak when shrink nr_hw_queues")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Closes: https://lore.kernel.org/all/CAHj4cs_CK63uoDpGBGZ6DN4OCTpzkR3UaVgK=LX8Owr8ej2ieQ@mail.gmail.com/
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20230908005702.2183908-1-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index ec922c6..1fafd54 100644 (file)
@@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
        struct blk_mq_tags **new_tags;
        int i;
 
-       if (set->nr_hw_queues >= new_nr_hw_queues) {
-               for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
-                       __blk_mq_free_map_and_rqs(set, i);
+       if (set->nr_hw_queues >= new_nr_hw_queues)
                goto done;
-       }
 
        new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
                                GFP_KERNEL, set->numa_node);
@@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 {
        struct request_queue *q;
        LIST_HEAD(head);
-       int prev_nr_hw_queues;
+       int prev_nr_hw_queues = set->nr_hw_queues;
+       int i;
 
        lockdep_assert_held(&set->tag_list_lock);
 
@@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
                blk_mq_sysfs_unregister_hctxs(q);
        }
 
-       prev_nr_hw_queues = set->nr_hw_queues;
        if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
                goto reregister;
 
@@ -4781,6 +4778,10 @@ switch_back:
 
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
+
+       /* Free the excess tags when nr_hw_queues shrink. */
+       for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
+               __blk_mq_free_map_and_rqs(set, i);
 }
 
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)