blk-mq: move tags and sched_tags info from sysfs to debugfs
[platform/kernel/linux-rpi.git] / block / blk-mq-tag.c
1 /*
2  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3  * fairer distribution of tags between multiple submitters when a shared tag map
4  * is used.
5  *
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10
11 #include <linux/blk-mq.h>
12 #include "blk.h"
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
17 {
18         if (!tags)
19                 return true;
20
21         return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
22 }
23
24 /*
25  * If a previously inactive queue goes active, bump the active user count.
26  */
27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
28 {
29         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
30             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
31                 atomic_inc(&hctx->tags->active_queues);
32
33         return true;
34 }
35
36 /*
37  * Wakeup all potentially sleeping on tags
38  */
39 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
40 {
41         sbitmap_queue_wake_all(&tags->bitmap_tags);
42         if (include_reserve)
43                 sbitmap_queue_wake_all(&tags->breserved_tags);
44 }
45
46 /*
47  * If a previously busy queue goes inactive, potential waiters could now
48  * be allowed to queue. Wake them up and check.
49  */
50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
51 {
52         struct blk_mq_tags *tags = hctx->tags;
53
54         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
55                 return;
56
57         atomic_dec(&tags->active_queues);
58
59         blk_mq_tag_wakeup_all(tags, false);
60 }
61
62 /*
63  * For shared tag users, we track the number of currently active users
64  * and attempt to provide a fair share of the tag depth for each of them.
65  */
66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
67                                   struct sbitmap_queue *bt)
68 {
69         unsigned int depth, users;
70
71         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
72                 return true;
73         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
74                 return true;
75
76         /*
77          * Don't try dividing an ant
78          */
79         if (bt->sb.depth == 1)
80                 return true;
81
82         users = atomic_read(&hctx->tags->active_queues);
83         if (!users)
84                 return true;
85
86         /*
87          * Allow at least some tags
88          */
89         depth = max((bt->sb.depth + users - 1) / users, 4U);
90         return atomic_read(&hctx->nr_active) < depth;
91 }
92
93 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
94                             struct sbitmap_queue *bt)
95 {
96         if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
97             !hctx_may_queue(data->hctx, bt))
98                 return -1;
99         return __sbitmap_queue_get(bt);
100 }
101
102 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
103 {
104         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
105         struct sbitmap_queue *bt;
106         struct sbq_wait_state *ws;
107         DEFINE_WAIT(wait);
108         unsigned int tag_offset;
109         int tag;
110
111         if (data->flags & BLK_MQ_REQ_RESERVED) {
112                 if (unlikely(!tags->nr_reserved_tags)) {
113                         WARN_ON_ONCE(1);
114                         return BLK_MQ_TAG_FAIL;
115                 }
116                 bt = &tags->breserved_tags;
117                 tag_offset = 0;
118         } else {
119                 bt = &tags->bitmap_tags;
120                 tag_offset = tags->nr_reserved_tags;
121         }
122
123         tag = __blk_mq_get_tag(data, bt);
124         if (tag != -1)
125                 goto found_tag;
126
127         if (data->flags & BLK_MQ_REQ_NOWAIT)
128                 return BLK_MQ_TAG_FAIL;
129
130         ws = bt_wait_ptr(bt, data->hctx);
131         do {
132                 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
133
134                 tag = __blk_mq_get_tag(data, bt);
135                 if (tag != -1)
136                         break;
137
138                 /*
139                  * We're out of tags on this hardware queue, kick any
140                  * pending IO submits before going to sleep waiting for
141                  * some to complete.
142                  */
143                 blk_mq_run_hw_queue(data->hctx, false);
144
145                 /*
146                  * Retry tag allocation after running the hardware queue,
147                  * as running the queue may also have found completions.
148                  */
149                 tag = __blk_mq_get_tag(data, bt);
150                 if (tag != -1)
151                         break;
152
153                 blk_mq_put_ctx(data->ctx);
154
155                 io_schedule();
156
157                 data->ctx = blk_mq_get_ctx(data->q);
158                 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
159                 tags = blk_mq_tags_from_data(data);
160                 if (data->flags & BLK_MQ_REQ_RESERVED)
161                         bt = &tags->breserved_tags;
162                 else
163                         bt = &tags->bitmap_tags;
164
165                 finish_wait(&ws->wait, &wait);
166                 ws = bt_wait_ptr(bt, data->hctx);
167         } while (1);
168
169         finish_wait(&ws->wait, &wait);
170
171 found_tag:
172         return tag + tag_offset;
173 }
174
175 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
176                     struct blk_mq_ctx *ctx, unsigned int tag)
177 {
178         if (tag >= tags->nr_reserved_tags) {
179                 const int real_tag = tag - tags->nr_reserved_tags;
180
181                 BUG_ON(real_tag >= tags->nr_tags);
182                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
183         } else {
184                 BUG_ON(tag >= tags->nr_reserved_tags);
185                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
186         }
187 }
188
189 struct bt_iter_data {
190         struct blk_mq_hw_ctx *hctx;
191         busy_iter_fn *fn;
192         void *data;
193         bool reserved;
194 };
195
196 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
197 {
198         struct bt_iter_data *iter_data = data;
199         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
200         struct blk_mq_tags *tags = hctx->tags;
201         bool reserved = iter_data->reserved;
202         struct request *rq;
203
204         if (!reserved)
205                 bitnr += tags->nr_reserved_tags;
206         rq = tags->rqs[bitnr];
207
208         if (rq->q == hctx->queue)
209                 iter_data->fn(hctx, rq, iter_data->data, reserved);
210         return true;
211 }
212
213 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
214                         busy_iter_fn *fn, void *data, bool reserved)
215 {
216         struct bt_iter_data iter_data = {
217                 .hctx = hctx,
218                 .fn = fn,
219                 .data = data,
220                 .reserved = reserved,
221         };
222
223         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
224 }
225
226 struct bt_tags_iter_data {
227         struct blk_mq_tags *tags;
228         busy_tag_iter_fn *fn;
229         void *data;
230         bool reserved;
231 };
232
233 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
234 {
235         struct bt_tags_iter_data *iter_data = data;
236         struct blk_mq_tags *tags = iter_data->tags;
237         bool reserved = iter_data->reserved;
238         struct request *rq;
239
240         if (!reserved)
241                 bitnr += tags->nr_reserved_tags;
242         rq = tags->rqs[bitnr];
243
244         iter_data->fn(rq, iter_data->data, reserved);
245         return true;
246 }
247
248 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
249                              busy_tag_iter_fn *fn, void *data, bool reserved)
250 {
251         struct bt_tags_iter_data iter_data = {
252                 .tags = tags,
253                 .fn = fn,
254                 .data = data,
255                 .reserved = reserved,
256         };
257
258         if (tags->rqs)
259                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
260 }
261
262 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
263                 busy_tag_iter_fn *fn, void *priv)
264 {
265         if (tags->nr_reserved_tags)
266                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
267         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
268 }
269
270 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
271                 busy_tag_iter_fn *fn, void *priv)
272 {
273         int i;
274
275         for (i = 0; i < tagset->nr_hw_queues; i++) {
276                 if (tagset->tags && tagset->tags[i])
277                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
278         }
279 }
280 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
281
282 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
283 {
284         int i, j, ret = 0;
285
286         if (!set->ops->reinit_request)
287                 goto out;
288
289         for (i = 0; i < set->nr_hw_queues; i++) {
290                 struct blk_mq_tags *tags = set->tags[i];
291
292                 for (j = 0; j < tags->nr_tags; j++) {
293                         if (!tags->static_rqs[j])
294                                 continue;
295
296                         ret = set->ops->reinit_request(set->driver_data,
297                                                 tags->static_rqs[j]);
298                         if (ret)
299                                 goto out;
300                 }
301         }
302
303 out:
304         return ret;
305 }
306 EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
307
308 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
309                 void *priv)
310 {
311         struct blk_mq_hw_ctx *hctx;
312         int i;
313
314
315         queue_for_each_hw_ctx(q, hctx, i) {
316                 struct blk_mq_tags *tags = hctx->tags;
317
318                 /*
319                  * If not software queues are currently mapped to this
320                  * hardware queue, there's nothing to check
321                  */
322                 if (!blk_mq_hw_queue_mapped(hctx))
323                         continue;
324
325                 if (tags->nr_reserved_tags)
326                         bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
327                 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
328         }
329
330 }
331
332 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
333                     bool round_robin, int node)
334 {
335         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
336                                        node);
337 }
338
339 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
340                                                    int node, int alloc_policy)
341 {
342         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
343         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
344
345         if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
346                 goto free_tags;
347         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
348                      node))
349                 goto free_bitmap_tags;
350
351         return tags;
352 free_bitmap_tags:
353         sbitmap_queue_free(&tags->bitmap_tags);
354 free_tags:
355         kfree(tags);
356         return NULL;
357 }
358
359 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
360                                      unsigned int reserved_tags,
361                                      int node, int alloc_policy)
362 {
363         struct blk_mq_tags *tags;
364
365         if (total_tags > BLK_MQ_TAG_MAX) {
366                 pr_err("blk-mq: tag depth too large\n");
367                 return NULL;
368         }
369
370         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
371         if (!tags)
372                 return NULL;
373
374         tags->nr_tags = total_tags;
375         tags->nr_reserved_tags = reserved_tags;
376
377         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
378 }
379
380 void blk_mq_free_tags(struct blk_mq_tags *tags)
381 {
382         sbitmap_queue_free(&tags->bitmap_tags);
383         sbitmap_queue_free(&tags->breserved_tags);
384         kfree(tags);
385 }
386
387 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
388                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
389                             bool can_grow)
390 {
391         struct blk_mq_tags *tags = *tagsptr;
392
393         if (tdepth <= tags->nr_reserved_tags)
394                 return -EINVAL;
395
396         tdepth -= tags->nr_reserved_tags;
397
398         /*
399          * If we are allowed to grow beyond the original size, allocate
400          * a new set of tags before freeing the old one.
401          */
402         if (tdepth > tags->nr_tags) {
403                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
404                 struct blk_mq_tags *new;
405                 bool ret;
406
407                 if (!can_grow)
408                         return -EINVAL;
409
410                 /*
411                  * We need some sort of upper limit, set it high enough that
412                  * no valid use cases should require more.
413                  */
414                 if (tdepth > 16 * BLKDEV_MAX_RQ)
415                         return -EINVAL;
416
417                 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
418                 if (!new)
419                         return -ENOMEM;
420                 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
421                 if (ret) {
422                         blk_mq_free_rq_map(new);
423                         return -ENOMEM;
424                 }
425
426                 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
427                 blk_mq_free_rq_map(*tagsptr);
428                 *tagsptr = new;
429         } else {
430                 /*
431                  * Don't need (or can't) update reserved tags here, they
432                  * remain static and should never need resizing.
433                  */
434                 sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
435         }
436
437         return 0;
438 }
439
440 /**
441  * blk_mq_unique_tag() - return a tag that is unique queue-wide
442  * @rq: request for which to compute a unique tag
443  *
444  * The tag field in struct request is unique per hardware queue but not over
445  * all hardware queues. Hence this function that returns a tag with the
446  * hardware context index in the upper bits and the per hardware queue tag in
447  * the lower bits.
448  *
449  * Note: When called for a request that is queued on a non-multiqueue request
450  * queue, the hardware context index is set to zero.
451  */
452 u32 blk_mq_unique_tag(struct request *rq)
453 {
454         struct request_queue *q = rq->q;
455         struct blk_mq_hw_ctx *hctx;
456         int hwq = 0;
457
458         if (q->mq_ops) {
459                 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
460                 hwq = hctx->queue_num;
461         }
462
463         return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
464                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
465 }
466 EXPORT_SYMBOL(blk_mq_unique_tag);