1 // SPDX-License-Identifier: GPL-2.0
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
7 * Copyright (C) 2013-2014 Jens Axboe
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
16 #include "blk-mq-sched.h"
17 #include "blk-mq-tag.h"
20 * Recalculate wakeup batch when tag is shared by hctx.
22 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
28 sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
30 sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
35 * If a previously inactive queue goes active, bump the active user count.
36 * We need to do this before try to allocate driver tag, then even if fail
37 * to get tag when first time, the other shared-tag users could reserve
40 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
43 struct blk_mq_tags *tags = hctx->tags;
46 * calling test_bit() prior to test_and_set_bit() is intentional,
47 * it avoids dirtying the cacheline if the queue is already active.
49 if (blk_mq_is_shared_tags(hctx->flags)) {
50 struct request_queue *q = hctx->queue;
52 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
53 test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
56 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
57 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
61 spin_lock_irq(&tags->lock);
62 users = tags->active_queues + 1;
63 WRITE_ONCE(tags->active_queues, users);
64 blk_mq_update_wake_batch(tags, users);
65 spin_unlock_irq(&tags->lock);
69 * Wakeup all potentially sleeping on tags
71 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
73 sbitmap_queue_wake_all(&tags->bitmap_tags);
75 sbitmap_queue_wake_all(&tags->breserved_tags);
79 * If a previously busy queue goes inactive, potential waiters could now
80 * be allowed to queue. Wake them up and check.
82 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
84 struct blk_mq_tags *tags = hctx->tags;
87 if (blk_mq_is_shared_tags(hctx->flags)) {
88 struct request_queue *q = hctx->queue;
90 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
94 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
98 spin_lock_irq(&tags->lock);
99 users = tags->active_queues - 1;
100 WRITE_ONCE(tags->active_queues, users);
101 blk_mq_update_wake_batch(tags, users);
102 spin_unlock_irq(&tags->lock);
104 blk_mq_tag_wakeup_all(tags, false);
107 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
108 struct sbitmap_queue *bt)
110 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
111 !hctx_may_queue(data->hctx, bt))
112 return BLK_MQ_NO_TAG;
114 if (data->shallow_depth)
115 return sbitmap_queue_get_shallow(bt, data->shallow_depth);
117 return __sbitmap_queue_get(bt);
120 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
121 unsigned int *offset)
123 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
124 struct sbitmap_queue *bt = &tags->bitmap_tags;
127 if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
128 data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
130 ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
131 *offset += tags->nr_reserved_tags;
135 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
137 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
138 struct sbitmap_queue *bt;
139 struct sbq_wait_state *ws;
140 DEFINE_SBQ_WAIT(wait);
141 unsigned int tag_offset;
144 if (data->flags & BLK_MQ_REQ_RESERVED) {
145 if (unlikely(!tags->nr_reserved_tags)) {
147 return BLK_MQ_NO_TAG;
149 bt = &tags->breserved_tags;
152 bt = &tags->bitmap_tags;
153 tag_offset = tags->nr_reserved_tags;
156 tag = __blk_mq_get_tag(data, bt);
157 if (tag != BLK_MQ_NO_TAG)
160 if (data->flags & BLK_MQ_REQ_NOWAIT)
161 return BLK_MQ_NO_TAG;
163 ws = bt_wait_ptr(bt, data->hctx);
165 struct sbitmap_queue *bt_prev;
168 * We're out of tags on this hardware queue, kick any
169 * pending IO submits before going to sleep waiting for
172 blk_mq_run_hw_queue(data->hctx, false);
175 * Retry tag allocation after running the hardware queue,
176 * as running the queue may also have found completions.
178 tag = __blk_mq_get_tag(data, bt);
179 if (tag != BLK_MQ_NO_TAG)
182 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
184 tag = __blk_mq_get_tag(data, bt);
185 if (tag != BLK_MQ_NO_TAG)
191 sbitmap_finish_wait(bt, ws, &wait);
193 data->ctx = blk_mq_get_ctx(data->q);
194 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
196 tags = blk_mq_tags_from_data(data);
197 if (data->flags & BLK_MQ_REQ_RESERVED)
198 bt = &tags->breserved_tags;
200 bt = &tags->bitmap_tags;
203 * If destination hw queue is changed, fake wake up on
204 * previous queue for compensating the wake up miss, so
205 * other allocations on previous queue won't be starved.
208 sbitmap_queue_wake_up(bt_prev, 1);
210 ws = bt_wait_ptr(bt, data->hctx);
213 sbitmap_finish_wait(bt, ws, &wait);
217 * Give up this allocation if the hctx is inactive. The caller will
218 * retry on an active hctx.
220 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
221 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
222 return BLK_MQ_NO_TAG;
224 return tag + tag_offset;
227 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
230 if (!blk_mq_tag_is_reserved(tags, tag)) {
231 const int real_tag = tag - tags->nr_reserved_tags;
233 BUG_ON(real_tag >= tags->nr_tags);
234 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
236 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
240 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
242 sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
246 struct bt_iter_data {
247 struct blk_mq_hw_ctx *hctx;
248 struct request_queue *q;
249 busy_tag_iter_fn *fn;
254 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
260 spin_lock_irqsave(&tags->lock, flags);
261 rq = tags->rqs[bitnr];
262 if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
264 spin_unlock_irqrestore(&tags->lock, flags);
268 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
270 struct bt_iter_data *iter_data = data;
271 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
272 struct request_queue *q = iter_data->q;
273 struct blk_mq_tag_set *set = q->tag_set;
274 struct blk_mq_tags *tags;
278 if (blk_mq_is_shared_tags(set->flags))
279 tags = set->shared_tags;
283 if (!iter_data->reserved)
284 bitnr += tags->nr_reserved_tags;
286 * We can hit rq == NULL here, because the tagging functions
287 * test and set the bit before assigning ->rqs[].
289 rq = blk_mq_find_and_get_req(tags, bitnr);
293 if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
294 ret = iter_data->fn(rq, iter_data->data);
295 blk_mq_put_rq_ref(rq);
300 * bt_for_each - iterate over the requests associated with a hardware queue
301 * @hctx: Hardware queue to examine.
302 * @q: Request queue to examine.
303 * @bt: sbitmap to examine. This is either the breserved_tags member
304 * or the bitmap_tags member of struct blk_mq_tags.
305 * @fn: Pointer to the function that will be called for each request
306 * associated with @hctx that has been assigned a driver tag.
307 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
308 * where rq is a pointer to a request. Return true to continue
309 * iterating tags, false to stop.
310 * @data: Will be passed as third argument to @fn.
311 * @reserved: Indicates whether @bt is the breserved_tags member or the
312 * bitmap_tags member of struct blk_mq_tags.
314 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
315 struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
316 void *data, bool reserved)
318 struct bt_iter_data iter_data = {
322 .reserved = reserved,
326 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
329 struct bt_tags_iter_data {
330 struct blk_mq_tags *tags;
331 busy_tag_iter_fn *fn;
336 #define BT_TAG_ITER_RESERVED (1 << 0)
337 #define BT_TAG_ITER_STARTED (1 << 1)
338 #define BT_TAG_ITER_STATIC_RQS (1 << 2)
340 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
342 struct bt_tags_iter_data *iter_data = data;
343 struct blk_mq_tags *tags = iter_data->tags;
346 bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
348 if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
349 bitnr += tags->nr_reserved_tags;
352 * We can hit rq == NULL here, because the tagging functions
353 * test and set the bit before assigning ->rqs[].
356 rq = tags->static_rqs[bitnr];
358 rq = blk_mq_find_and_get_req(tags, bitnr);
362 if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
363 blk_mq_request_started(rq))
364 ret = iter_data->fn(rq, iter_data->data);
365 if (!iter_static_rqs)
366 blk_mq_put_rq_ref(rq);
371 * bt_tags_for_each - iterate over the requests in a tag map
372 * @tags: Tag map to iterate over.
373 * @bt: sbitmap to examine. This is either the breserved_tags member
374 * or the bitmap_tags member of struct blk_mq_tags.
375 * @fn: Pointer to the function that will be called for each started
376 * request. @fn will be called as follows: @fn(rq, @data,
377 * @reserved) where rq is a pointer to a request. Return true
378 * to continue iterating tags, false to stop.
379 * @data: Will be passed as second argument to @fn.
380 * @flags: BT_TAG_ITER_*
382 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
383 busy_tag_iter_fn *fn, void *data, unsigned int flags)
385 struct bt_tags_iter_data iter_data = {
393 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
396 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
397 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
399 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
401 if (tags->nr_reserved_tags)
402 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
403 flags | BT_TAG_ITER_RESERVED);
404 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
408 * blk_mq_all_tag_iter - iterate over all requests in a tag map
409 * @tags: Tag map to iterate over.
410 * @fn: Pointer to the function that will be called for each
411 * request. @fn will be called as follows: @fn(rq, @priv,
412 * reserved) where rq is a pointer to a request. 'reserved'
413 * indicates whether or not @rq is a reserved request. Return
414 * true to continue iterating tags, false to stop.
415 * @priv: Will be passed as second argument to @fn.
417 * Caller has to pass the tag map from which requests are allocated.
419 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
422 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
426 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
427 * @tagset: Tag set to iterate over.
428 * @fn: Pointer to the function that will be called for each started
429 * request. @fn will be called as follows: @fn(rq, @priv,
430 * reserved) where rq is a pointer to a request. 'reserved'
431 * indicates whether or not @rq is a reserved request. Return
432 * true to continue iterating tags, false to stop.
433 * @priv: Will be passed as second argument to @fn.
435 * We grab one request reference before calling @fn and release it after
438 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
439 busy_tag_iter_fn *fn, void *priv)
441 unsigned int flags = tagset->flags;
444 nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
446 for (i = 0; i < nr_tags; i++) {
447 if (tagset->tags && tagset->tags[i])
448 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
449 BT_TAG_ITER_STARTED);
452 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
454 static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
456 unsigned *count = data;
458 if (blk_mq_request_completed(rq))
464 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
465 * completions have finished.
466 * @tagset: Tag set to drain completed request
468 * Note: This function has to be run after all IO queues are shutdown
470 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
475 blk_mq_tagset_busy_iter(tagset,
476 blk_mq_tagset_count_completed_rqs, &count);
482 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
485 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
486 * @q: Request queue to examine.
487 * @fn: Pointer to the function that will be called for each request
488 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
489 * reserved) where rq is a pointer to a request and hctx points
490 * to the hardware queue associated with the request. 'reserved'
491 * indicates whether or not @rq is a reserved request.
492 * @priv: Will be passed as third argument to @fn.
494 * Note: if @q->tag_set is shared with other request queues then @fn will be
495 * called for all requests on all queues that share that tag set and not only
496 * for requests associated with @q.
498 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
502 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
503 * while the queue is frozen. So we can use q_usage_counter to avoid
506 if (!percpu_ref_tryget(&q->q_usage_counter))
509 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
510 struct blk_mq_tags *tags = q->tag_set->shared_tags;
511 struct sbitmap_queue *bresv = &tags->breserved_tags;
512 struct sbitmap_queue *btags = &tags->bitmap_tags;
514 if (tags->nr_reserved_tags)
515 bt_for_each(NULL, q, bresv, fn, priv, true);
516 bt_for_each(NULL, q, btags, fn, priv, false);
518 struct blk_mq_hw_ctx *hctx;
521 queue_for_each_hw_ctx(q, hctx, i) {
522 struct blk_mq_tags *tags = hctx->tags;
523 struct sbitmap_queue *bresv = &tags->breserved_tags;
524 struct sbitmap_queue *btags = &tags->bitmap_tags;
527 * If no software queues are currently mapped to this
528 * hardware queue, there's nothing to check
530 if (!blk_mq_hw_queue_mapped(hctx))
533 if (tags->nr_reserved_tags)
534 bt_for_each(hctx, q, bresv, fn, priv, true);
535 bt_for_each(hctx, q, btags, fn, priv, false);
541 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
542 bool round_robin, int node)
544 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
548 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
549 struct sbitmap_queue *breserved_tags,
550 unsigned int queue_depth, unsigned int reserved,
551 int node, int alloc_policy)
553 unsigned int depth = queue_depth - reserved;
554 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
556 if (bt_alloc(bitmap_tags, depth, round_robin, node))
558 if (bt_alloc(breserved_tags, reserved, round_robin, node))
559 goto free_bitmap_tags;
564 sbitmap_queue_free(bitmap_tags);
568 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
569 unsigned int reserved_tags,
570 int node, int alloc_policy)
572 struct blk_mq_tags *tags;
574 if (total_tags > BLK_MQ_TAG_MAX) {
575 pr_err("blk-mq: tag depth too large\n");
579 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
583 tags->nr_tags = total_tags;
584 tags->nr_reserved_tags = reserved_tags;
585 spin_lock_init(&tags->lock);
587 if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
588 total_tags, reserved_tags, node,
596 void blk_mq_free_tags(struct blk_mq_tags *tags)
598 sbitmap_queue_free(&tags->bitmap_tags);
599 sbitmap_queue_free(&tags->breserved_tags);
603 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
604 struct blk_mq_tags **tagsptr, unsigned int tdepth,
607 struct blk_mq_tags *tags = *tagsptr;
609 if (tdepth <= tags->nr_reserved_tags)
613 * If we are allowed to grow beyond the original size, allocate
614 * a new set of tags before freeing the old one.
616 if (tdepth > tags->nr_tags) {
617 struct blk_mq_tag_set *set = hctx->queue->tag_set;
618 struct blk_mq_tags *new;
624 * We need some sort of upper limit, set it high enough that
625 * no valid use cases should require more.
627 if (tdepth > MAX_SCHED_RQ)
631 * Only the sbitmap needs resizing since we allocated the max
634 if (blk_mq_is_shared_tags(set->flags))
637 new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
641 blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
645 * Don't need (or can't) update reserved tags here, they
646 * remain static and should never need resizing.
648 sbitmap_queue_resize(&tags->bitmap_tags,
649 tdepth - tags->nr_reserved_tags);
655 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
657 struct blk_mq_tags *tags = set->shared_tags;
659 sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
662 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
664 sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
665 q->nr_requests - q->tag_set->reserved_tags);
669 * blk_mq_unique_tag() - return a tag that is unique queue-wide
670 * @rq: request for which to compute a unique tag
672 * The tag field in struct request is unique per hardware queue but not over
673 * all hardware queues. Hence this function that returns a tag with the
674 * hardware context index in the upper bits and the per hardware queue tag in
677 * Note: When called for a request that is queued on a non-multiqueue request
678 * queue, the hardware context index is set to zero.
680 u32 blk_mq_unique_tag(struct request *rq)
682 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
683 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
685 EXPORT_SYMBOL(blk_mq_unique_tag);