Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/kmemleak.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/workqueue.h>
18 #include <linux/smp.h>
19 #include <linux/llist.h>
20 #include <linux/list_sort.h>
21 #include <linux/cpu.h>
22 #include <linux/cache.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/sched/topology.h>
25 #include <linux/sched/signal.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28 #include <linux/prefetch.h>
29
30 #include <trace/events/block.h>
31
32 #include <linux/blk-mq.h>
33 #include "blk.h"
34 #include "blk-mq.h"
35 #include "blk-mq-debugfs.h"
36 #include "blk-mq-tag.h"
37 #include "blk-pm.h"
38 #include "blk-stat.h"
39 #include "blk-mq-sched.h"
40 #include "blk-rq-qos.h"
41
42 static void blk_mq_poll_stats_start(struct request_queue *q);
43 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
44
45 static int blk_mq_poll_stats_bkt(const struct request *rq)
46 {
47         int ddir, bytes, bucket;
48
49         ddir = rq_data_dir(rq);
50         bytes = blk_rq_bytes(rq);
51
52         bucket = ddir + 2*(ilog2(bytes) - 9);
53
54         if (bucket < 0)
55                 return -1;
56         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
57                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
58
59         return bucket;
60 }
61
62 /*
63  * Check if any of the ctx, dispatch list or elevator
64  * have pending work in this hardware queue.
65  */
66 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
67 {
68         return !list_empty_careful(&hctx->dispatch) ||
69                 sbitmap_any_bit_set(&hctx->ctx_map) ||
70                         blk_mq_sched_has_work(hctx);
71 }
72
73 /*
74  * Mark this ctx as having pending work in this hardware queue
75  */
76 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
77                                      struct blk_mq_ctx *ctx)
78 {
79         const int bit = ctx->index_hw[hctx->type];
80
81         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
82                 sbitmap_set_bit(&hctx->ctx_map, bit);
83 }
84
85 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
86                                       struct blk_mq_ctx *ctx)
87 {
88         const int bit = ctx->index_hw[hctx->type];
89
90         sbitmap_clear_bit(&hctx->ctx_map, bit);
91 }
92
93 struct mq_inflight {
94         struct hd_struct *part;
95         unsigned int *inflight;
96 };
97
98 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
99                                   struct request *rq, void *priv,
100                                   bool reserved)
101 {
102         struct mq_inflight *mi = priv;
103
104         /*
105          * index[0] counts the specific partition that was asked for.
106          */
107         if (rq->part == mi->part)
108                 mi->inflight[0]++;
109
110         return true;
111 }
112
113 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
114 {
115         unsigned inflight[2];
116         struct mq_inflight mi = { .part = part, .inflight = inflight, };
117
118         inflight[0] = inflight[1] = 0;
119         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120
121         return inflight[0];
122 }
123
124 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
125                                      struct request *rq, void *priv,
126                                      bool reserved)
127 {
128         struct mq_inflight *mi = priv;
129
130         if (rq->part == mi->part)
131                 mi->inflight[rq_data_dir(rq)]++;
132
133         return true;
134 }
135
136 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
137                          unsigned int inflight[2])
138 {
139         struct mq_inflight mi = { .part = part, .inflight = inflight, };
140
141         inflight[0] = inflight[1] = 0;
142         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
143 }
144
145 void blk_freeze_queue_start(struct request_queue *q)
146 {
147         mutex_lock(&q->mq_freeze_lock);
148         if (++q->mq_freeze_depth == 1) {
149                 percpu_ref_kill(&q->q_usage_counter);
150                 mutex_unlock(&q->mq_freeze_lock);
151                 if (queue_is_mq(q))
152                         blk_mq_run_hw_queues(q, false);
153         } else {
154                 mutex_unlock(&q->mq_freeze_lock);
155         }
156 }
157 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
158
159 void blk_mq_freeze_queue_wait(struct request_queue *q)
160 {
161         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
162 }
163 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
164
165 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
166                                      unsigned long timeout)
167 {
168         return wait_event_timeout(q->mq_freeze_wq,
169                                         percpu_ref_is_zero(&q->q_usage_counter),
170                                         timeout);
171 }
172 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
173
174 /*
175  * Guarantee no request is in use, so we can change any data structure of
176  * the queue afterward.
177  */
178 void blk_freeze_queue(struct request_queue *q)
179 {
180         /*
181          * In the !blk_mq case we are only calling this to kill the
182          * q_usage_counter, otherwise this increases the freeze depth
183          * and waits for it to return to zero.  For this reason there is
184          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
185          * exported to drivers as the only user for unfreeze is blk_mq.
186          */
187         blk_freeze_queue_start(q);
188         blk_mq_freeze_queue_wait(q);
189 }
190
191 void blk_mq_freeze_queue(struct request_queue *q)
192 {
193         /*
194          * ...just an alias to keep freeze and unfreeze actions balanced
195          * in the blk_mq_* namespace
196          */
197         blk_freeze_queue(q);
198 }
199 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
200
201 void blk_mq_unfreeze_queue(struct request_queue *q)
202 {
203         mutex_lock(&q->mq_freeze_lock);
204         q->mq_freeze_depth--;
205         WARN_ON_ONCE(q->mq_freeze_depth < 0);
206         if (!q->mq_freeze_depth) {
207                 percpu_ref_resurrect(&q->q_usage_counter);
208                 wake_up_all(&q->mq_freeze_wq);
209         }
210         mutex_unlock(&q->mq_freeze_lock);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
213
214 /*
215  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
216  * mpt3sas driver such that this function can be removed.
217  */
218 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
219 {
220         blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
221 }
222 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
223
224 /**
225  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
226  * @q: request queue.
227  *
228  * Note: this function does not prevent that the struct request end_io()
229  * callback function is invoked. Once this function is returned, we make
230  * sure no dispatch can happen until the queue is unquiesced via
231  * blk_mq_unquiesce_queue().
232  */
233 void blk_mq_quiesce_queue(struct request_queue *q)
234 {
235         struct blk_mq_hw_ctx *hctx;
236         unsigned int i;
237         bool rcu = false;
238
239         blk_mq_quiesce_queue_nowait(q);
240
241         queue_for_each_hw_ctx(q, hctx, i) {
242                 if (hctx->flags & BLK_MQ_F_BLOCKING)
243                         synchronize_srcu(hctx->srcu);
244                 else
245                         rcu = true;
246         }
247         if (rcu)
248                 synchronize_rcu();
249 }
250 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
251
252 /*
253  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
254  * @q: request queue.
255  *
256  * This function recovers queue into the state before quiescing
257  * which is done by blk_mq_quiesce_queue.
258  */
259 void blk_mq_unquiesce_queue(struct request_queue *q)
260 {
261         blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
262
263         /* dispatch requests which are inserted during quiescing */
264         blk_mq_run_hw_queues(q, true);
265 }
266 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
267
268 void blk_mq_wake_waiters(struct request_queue *q)
269 {
270         struct blk_mq_hw_ctx *hctx;
271         unsigned int i;
272
273         queue_for_each_hw_ctx(q, hctx, i)
274                 if (blk_mq_hw_queue_mapped(hctx))
275                         blk_mq_tag_wakeup_all(hctx->tags, true);
276 }
277
278 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
279 {
280         return blk_mq_has_free_tags(hctx->tags);
281 }
282 EXPORT_SYMBOL(blk_mq_can_queue);
283
284 /*
285  * Only need start/end time stamping if we have stats enabled, or using
286  * an IO scheduler.
287  */
288 static inline bool blk_mq_need_time_stamp(struct request *rq)
289 {
290         return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
291 }
292
293 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
294                 unsigned int tag, unsigned int op)
295 {
296         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
297         struct request *rq = tags->static_rqs[tag];
298         req_flags_t rq_flags = 0;
299
300         if (data->flags & BLK_MQ_REQ_INTERNAL) {
301                 rq->tag = -1;
302                 rq->internal_tag = tag;
303         } else {
304                 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
305                         rq_flags = RQF_MQ_INFLIGHT;
306                         atomic_inc(&data->hctx->nr_active);
307                 }
308                 rq->tag = tag;
309                 rq->internal_tag = -1;
310                 data->hctx->tags->rqs[rq->tag] = rq;
311         }
312
313         /* csd/requeue_work/fifo_time is initialized before use */
314         rq->q = data->q;
315         rq->mq_ctx = data->ctx;
316         rq->mq_hctx = data->hctx;
317         rq->rq_flags = rq_flags;
318         rq->cmd_flags = op;
319         if (data->flags & BLK_MQ_REQ_PREEMPT)
320                 rq->rq_flags |= RQF_PREEMPT;
321         if (blk_queue_io_stat(data->q))
322                 rq->rq_flags |= RQF_IO_STAT;
323         INIT_LIST_HEAD(&rq->queuelist);
324         INIT_HLIST_NODE(&rq->hash);
325         RB_CLEAR_NODE(&rq->rb_node);
326         rq->rq_disk = NULL;
327         rq->part = NULL;
328         if (blk_mq_need_time_stamp(rq))
329                 rq->start_time_ns = ktime_get_ns();
330         else
331                 rq->start_time_ns = 0;
332         rq->io_start_time_ns = 0;
333         rq->nr_phys_segments = 0;
334 #if defined(CONFIG_BLK_DEV_INTEGRITY)
335         rq->nr_integrity_segments = 0;
336 #endif
337         /* tag was already set */
338         rq->extra_len = 0;
339         WRITE_ONCE(rq->deadline, 0);
340
341         rq->timeout = 0;
342
343         rq->end_io = NULL;
344         rq->end_io_data = NULL;
345
346         data->ctx->rq_dispatched[op_is_sync(op)]++;
347         refcount_set(&rq->ref, 1);
348         return rq;
349 }
350
351 static struct request *blk_mq_get_request(struct request_queue *q,
352                                           struct bio *bio,
353                                           struct blk_mq_alloc_data *data)
354 {
355         struct elevator_queue *e = q->elevator;
356         struct request *rq;
357         unsigned int tag;
358         bool put_ctx_on_error = false;
359
360         blk_queue_enter_live(q);
361         data->q = q;
362         if (likely(!data->ctx)) {
363                 data->ctx = blk_mq_get_ctx(q);
364                 put_ctx_on_error = true;
365         }
366         if (likely(!data->hctx))
367                 data->hctx = blk_mq_map_queue(q, data->cmd_flags,
368                                                 data->ctx);
369         if (data->cmd_flags & REQ_NOWAIT)
370                 data->flags |= BLK_MQ_REQ_NOWAIT;
371
372         if (e) {
373                 data->flags |= BLK_MQ_REQ_INTERNAL;
374
375                 /*
376                  * Flush requests are special and go directly to the
377                  * dispatch list. Don't include reserved tags in the
378                  * limiting, as it isn't useful.
379                  */
380                 if (!op_is_flush(data->cmd_flags) &&
381                     e->type->ops.limit_depth &&
382                     !(data->flags & BLK_MQ_REQ_RESERVED))
383                         e->type->ops.limit_depth(data->cmd_flags, data);
384         } else {
385                 blk_mq_tag_busy(data->hctx);
386         }
387
388         tag = blk_mq_get_tag(data);
389         if (tag == BLK_MQ_TAG_FAIL) {
390                 if (put_ctx_on_error) {
391                         blk_mq_put_ctx(data->ctx);
392                         data->ctx = NULL;
393                 }
394                 blk_queue_exit(q);
395                 return NULL;
396         }
397
398         rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
399         if (!op_is_flush(data->cmd_flags)) {
400                 rq->elv.icq = NULL;
401                 if (e && e->type->ops.prepare_request) {
402                         if (e->type->icq_cache)
403                                 blk_mq_sched_assign_ioc(rq);
404
405                         e->type->ops.prepare_request(rq, bio);
406                         rq->rq_flags |= RQF_ELVPRIV;
407                 }
408         }
409         data->hctx->queued++;
410         return rq;
411 }
412
413 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
414                 blk_mq_req_flags_t flags)
415 {
416         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
417         struct request *rq;
418         int ret;
419
420         ret = blk_queue_enter(q, flags);
421         if (ret)
422                 return ERR_PTR(ret);
423
424         rq = blk_mq_get_request(q, NULL, &alloc_data);
425         blk_queue_exit(q);
426
427         if (!rq)
428                 return ERR_PTR(-EWOULDBLOCK);
429
430         blk_mq_put_ctx(alloc_data.ctx);
431
432         rq->__data_len = 0;
433         rq->__sector = (sector_t) -1;
434         rq->bio = rq->biotail = NULL;
435         return rq;
436 }
437 EXPORT_SYMBOL(blk_mq_alloc_request);
438
439 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
440         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
441 {
442         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
443         struct request *rq;
444         unsigned int cpu;
445         int ret;
446
447         /*
448          * If the tag allocator sleeps we could get an allocation for a
449          * different hardware context.  No need to complicate the low level
450          * allocator for this for the rare use case of a command tied to
451          * a specific queue.
452          */
453         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
454                 return ERR_PTR(-EINVAL);
455
456         if (hctx_idx >= q->nr_hw_queues)
457                 return ERR_PTR(-EIO);
458
459         ret = blk_queue_enter(q, flags);
460         if (ret)
461                 return ERR_PTR(ret);
462
463         /*
464          * Check if the hardware context is actually mapped to anything.
465          * If not tell the caller that it should skip this queue.
466          */
467         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
468         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
469                 blk_queue_exit(q);
470                 return ERR_PTR(-EXDEV);
471         }
472         cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
473         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
474
475         rq = blk_mq_get_request(q, NULL, &alloc_data);
476         blk_queue_exit(q);
477
478         if (!rq)
479                 return ERR_PTR(-EWOULDBLOCK);
480
481         return rq;
482 }
483 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
484
485 static void __blk_mq_free_request(struct request *rq)
486 {
487         struct request_queue *q = rq->q;
488         struct blk_mq_ctx *ctx = rq->mq_ctx;
489         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
490         const int sched_tag = rq->internal_tag;
491
492         blk_pm_mark_last_busy(rq);
493         rq->mq_hctx = NULL;
494         if (rq->tag != -1)
495                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
496         if (sched_tag != -1)
497                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
498         blk_mq_sched_restart(hctx);
499         blk_queue_exit(q);
500 }
501
502 void blk_mq_free_request(struct request *rq)
503 {
504         struct request_queue *q = rq->q;
505         struct elevator_queue *e = q->elevator;
506         struct blk_mq_ctx *ctx = rq->mq_ctx;
507         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
508
509         if (rq->rq_flags & RQF_ELVPRIV) {
510                 if (e && e->type->ops.finish_request)
511                         e->type->ops.finish_request(rq);
512                 if (rq->elv.icq) {
513                         put_io_context(rq->elv.icq->ioc);
514                         rq->elv.icq = NULL;
515                 }
516         }
517
518         ctx->rq_completed[rq_is_sync(rq)]++;
519         if (rq->rq_flags & RQF_MQ_INFLIGHT)
520                 atomic_dec(&hctx->nr_active);
521
522         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
523                 laptop_io_completion(q->backing_dev_info);
524
525         rq_qos_done(q, rq);
526
527         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
528         if (refcount_dec_and_test(&rq->ref))
529                 __blk_mq_free_request(rq);
530 }
531 EXPORT_SYMBOL_GPL(blk_mq_free_request);
532
533 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
534 {
535         u64 now = 0;
536
537         if (blk_mq_need_time_stamp(rq))
538                 now = ktime_get_ns();
539
540         if (rq->rq_flags & RQF_STATS) {
541                 blk_mq_poll_stats_start(rq->q);
542                 blk_stat_add(rq, now);
543         }
544
545         if (rq->internal_tag != -1)
546                 blk_mq_sched_completed_request(rq, now);
547
548         blk_account_io_done(rq, now);
549
550         if (rq->end_io) {
551                 rq_qos_done(rq->q, rq);
552                 rq->end_io(rq, error);
553         } else {
554                 blk_mq_free_request(rq);
555         }
556 }
557 EXPORT_SYMBOL(__blk_mq_end_request);
558
559 void blk_mq_end_request(struct request *rq, blk_status_t error)
560 {
561         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
562                 BUG();
563         __blk_mq_end_request(rq, error);
564 }
565 EXPORT_SYMBOL(blk_mq_end_request);
566
567 static void __blk_mq_complete_request_remote(void *data)
568 {
569         struct request *rq = data;
570         struct request_queue *q = rq->q;
571
572         q->mq_ops->complete(rq);
573 }
574
575 static void __blk_mq_complete_request(struct request *rq)
576 {
577         struct blk_mq_ctx *ctx = rq->mq_ctx;
578         struct request_queue *q = rq->q;
579         bool shared = false;
580         int cpu;
581
582         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
583         /*
584          * Most of single queue controllers, there is only one irq vector
585          * for handling IO completion, and the only irq's affinity is set
586          * as all possible CPUs. On most of ARCHs, this affinity means the
587          * irq is handled on one specific CPU.
588          *
589          * So complete IO reqeust in softirq context in case of single queue
590          * for not degrading IO performance by irqsoff latency.
591          */
592         if (q->nr_hw_queues == 1) {
593                 __blk_complete_request(rq);
594                 return;
595         }
596
597         /*
598          * For a polled request, always complete locallly, it's pointless
599          * to redirect the completion.
600          */
601         if ((rq->cmd_flags & REQ_HIPRI) ||
602             !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
603                 q->mq_ops->complete(rq);
604                 return;
605         }
606
607         cpu = get_cpu();
608         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
609                 shared = cpus_share_cache(cpu, ctx->cpu);
610
611         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
612                 rq->csd.func = __blk_mq_complete_request_remote;
613                 rq->csd.info = rq;
614                 rq->csd.flags = 0;
615                 smp_call_function_single_async(ctx->cpu, &rq->csd);
616         } else {
617                 q->mq_ops->complete(rq);
618         }
619         put_cpu();
620 }
621
622 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
623         __releases(hctx->srcu)
624 {
625         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
626                 rcu_read_unlock();
627         else
628                 srcu_read_unlock(hctx->srcu, srcu_idx);
629 }
630
631 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
632         __acquires(hctx->srcu)
633 {
634         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
635                 /* shut up gcc false positive */
636                 *srcu_idx = 0;
637                 rcu_read_lock();
638         } else
639                 *srcu_idx = srcu_read_lock(hctx->srcu);
640 }
641
642 /**
643  * blk_mq_complete_request - end I/O on a request
644  * @rq:         the request being processed
645  *
646  * Description:
647  *      Ends all I/O on a request. It does not handle partial completions.
648  *      The actual completion happens out-of-order, through a IPI handler.
649  **/
650 bool blk_mq_complete_request(struct request *rq)
651 {
652         if (unlikely(blk_should_fake_timeout(rq->q)))
653                 return false;
654         __blk_mq_complete_request(rq);
655         return true;
656 }
657 EXPORT_SYMBOL(blk_mq_complete_request);
658
659 void blk_mq_complete_request_sync(struct request *rq)
660 {
661         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
662         rq->q->mq_ops->complete(rq);
663 }
664 EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
665
666 int blk_mq_request_started(struct request *rq)
667 {
668         return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
669 }
670 EXPORT_SYMBOL_GPL(blk_mq_request_started);
671
672 void blk_mq_start_request(struct request *rq)
673 {
674         struct request_queue *q = rq->q;
675
676         blk_mq_sched_started_request(rq);
677
678         trace_block_rq_issue(q, rq);
679
680         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
681                 rq->io_start_time_ns = ktime_get_ns();
682 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
683                 rq->throtl_size = blk_rq_sectors(rq);
684 #endif
685                 rq->rq_flags |= RQF_STATS;
686                 rq_qos_issue(q, rq);
687         }
688
689         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
690
691         blk_add_timer(rq);
692         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
693
694         if (q->dma_drain_size && blk_rq_bytes(rq)) {
695                 /*
696                  * Make sure space for the drain appears.  We know we can do
697                  * this because max_hw_segments has been adjusted to be one
698                  * fewer than the device can handle.
699                  */
700                 rq->nr_phys_segments++;
701         }
702 }
703 EXPORT_SYMBOL(blk_mq_start_request);
704
705 static void __blk_mq_requeue_request(struct request *rq)
706 {
707         struct request_queue *q = rq->q;
708
709         blk_mq_put_driver_tag(rq);
710
711         trace_block_rq_requeue(q, rq);
712         rq_qos_requeue(q, rq);
713
714         if (blk_mq_request_started(rq)) {
715                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
716                 rq->rq_flags &= ~RQF_TIMED_OUT;
717                 if (q->dma_drain_size && blk_rq_bytes(rq))
718                         rq->nr_phys_segments--;
719         }
720 }
721
722 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
723 {
724         __blk_mq_requeue_request(rq);
725
726         /* this request will be re-inserted to io scheduler queue */
727         blk_mq_sched_requeue_request(rq);
728
729         BUG_ON(!list_empty(&rq->queuelist));
730         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
731 }
732 EXPORT_SYMBOL(blk_mq_requeue_request);
733
734 static void blk_mq_requeue_work(struct work_struct *work)
735 {
736         struct request_queue *q =
737                 container_of(work, struct request_queue, requeue_work.work);
738         LIST_HEAD(rq_list);
739         struct request *rq, *next;
740
741         spin_lock_irq(&q->requeue_lock);
742         list_splice_init(&q->requeue_list, &rq_list);
743         spin_unlock_irq(&q->requeue_lock);
744
745         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
746                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
747                         continue;
748
749                 rq->rq_flags &= ~RQF_SOFTBARRIER;
750                 list_del_init(&rq->queuelist);
751                 /*
752                  * If RQF_DONTPREP, rq has contained some driver specific
753                  * data, so insert it to hctx dispatch list to avoid any
754                  * merge.
755                  */
756                 if (rq->rq_flags & RQF_DONTPREP)
757                         blk_mq_request_bypass_insert(rq, false);
758                 else
759                         blk_mq_sched_insert_request(rq, true, false, false);
760         }
761
762         while (!list_empty(&rq_list)) {
763                 rq = list_entry(rq_list.next, struct request, queuelist);
764                 list_del_init(&rq->queuelist);
765                 blk_mq_sched_insert_request(rq, false, false, false);
766         }
767
768         blk_mq_run_hw_queues(q, false);
769 }
770
771 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
772                                 bool kick_requeue_list)
773 {
774         struct request_queue *q = rq->q;
775         unsigned long flags;
776
777         /*
778          * We abuse this flag that is otherwise used by the I/O scheduler to
779          * request head insertion from the workqueue.
780          */
781         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
782
783         spin_lock_irqsave(&q->requeue_lock, flags);
784         if (at_head) {
785                 rq->rq_flags |= RQF_SOFTBARRIER;
786                 list_add(&rq->queuelist, &q->requeue_list);
787         } else {
788                 list_add_tail(&rq->queuelist, &q->requeue_list);
789         }
790         spin_unlock_irqrestore(&q->requeue_lock, flags);
791
792         if (kick_requeue_list)
793                 blk_mq_kick_requeue_list(q);
794 }
795
796 void blk_mq_kick_requeue_list(struct request_queue *q)
797 {
798         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
799 }
800 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
801
802 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
803                                     unsigned long msecs)
804 {
805         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
806                                     msecs_to_jiffies(msecs));
807 }
808 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
809
810 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
811 {
812         if (tag < tags->nr_tags) {
813                 prefetch(tags->rqs[tag]);
814                 return tags->rqs[tag];
815         }
816
817         return NULL;
818 }
819 EXPORT_SYMBOL(blk_mq_tag_to_rq);
820
821 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
822                                void *priv, bool reserved)
823 {
824         /*
825          * If we find a request that is inflight and the queue matches,
826          * we know the queue is busy. Return false to stop the iteration.
827          */
828         if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
829                 bool *busy = priv;
830
831                 *busy = true;
832                 return false;
833         }
834
835         return true;
836 }
837
838 bool blk_mq_queue_inflight(struct request_queue *q)
839 {
840         bool busy = false;
841
842         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
843         return busy;
844 }
845 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
846
847 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
848 {
849         req->rq_flags |= RQF_TIMED_OUT;
850         if (req->q->mq_ops->timeout) {
851                 enum blk_eh_timer_return ret;
852
853                 ret = req->q->mq_ops->timeout(req, reserved);
854                 if (ret == BLK_EH_DONE)
855                         return;
856                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
857         }
858
859         blk_add_timer(req);
860 }
861
862 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
863 {
864         unsigned long deadline;
865
866         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
867                 return false;
868         if (rq->rq_flags & RQF_TIMED_OUT)
869                 return false;
870
871         deadline = READ_ONCE(rq->deadline);
872         if (time_after_eq(jiffies, deadline))
873                 return true;
874
875         if (*next == 0)
876                 *next = deadline;
877         else if (time_after(*next, deadline))
878                 *next = deadline;
879         return false;
880 }
881
882 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
883                 struct request *rq, void *priv, bool reserved)
884 {
885         unsigned long *next = priv;
886
887         /*
888          * Just do a quick check if it is expired before locking the request in
889          * so we're not unnecessarilly synchronizing across CPUs.
890          */
891         if (!blk_mq_req_expired(rq, next))
892                 return true;
893
894         /*
895          * We have reason to believe the request may be expired. Take a
896          * reference on the request to lock this request lifetime into its
897          * currently allocated context to prevent it from being reallocated in
898          * the event the completion by-passes this timeout handler.
899          *
900          * If the reference was already released, then the driver beat the
901          * timeout handler to posting a natural completion.
902          */
903         if (!refcount_inc_not_zero(&rq->ref))
904                 return true;
905
906         /*
907          * The request is now locked and cannot be reallocated underneath the
908          * timeout handler's processing. Re-verify this exact request is truly
909          * expired; if it is not expired, then the request was completed and
910          * reallocated as a new request.
911          */
912         if (blk_mq_req_expired(rq, next))
913                 blk_mq_rq_timed_out(rq, reserved);
914         if (refcount_dec_and_test(&rq->ref))
915                 __blk_mq_free_request(rq);
916
917         return true;
918 }
919
920 static void blk_mq_timeout_work(struct work_struct *work)
921 {
922         struct request_queue *q =
923                 container_of(work, struct request_queue, timeout_work);
924         unsigned long next = 0;
925         struct blk_mq_hw_ctx *hctx;
926         int i;
927
928         /* A deadlock might occur if a request is stuck requiring a
929          * timeout at the same time a queue freeze is waiting
930          * completion, since the timeout code would not be able to
931          * acquire the queue reference here.
932          *
933          * That's why we don't use blk_queue_enter here; instead, we use
934          * percpu_ref_tryget directly, because we need to be able to
935          * obtain a reference even in the short window between the queue
936          * starting to freeze, by dropping the first reference in
937          * blk_freeze_queue_start, and the moment the last request is
938          * consumed, marked by the instant q_usage_counter reaches
939          * zero.
940          */
941         if (!percpu_ref_tryget(&q->q_usage_counter))
942                 return;
943
944         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
945
946         if (next != 0) {
947                 mod_timer(&q->timeout, next);
948         } else {
949                 /*
950                  * Request timeouts are handled as a forward rolling timer. If
951                  * we end up here it means that no requests are pending and
952                  * also that no request has been pending for a while. Mark
953                  * each hctx as idle.
954                  */
955                 queue_for_each_hw_ctx(q, hctx, i) {
956                         /* the hctx may be unmapped, so check it here */
957                         if (blk_mq_hw_queue_mapped(hctx))
958                                 blk_mq_tag_idle(hctx);
959                 }
960         }
961         blk_queue_exit(q);
962 }
963
964 struct flush_busy_ctx_data {
965         struct blk_mq_hw_ctx *hctx;
966         struct list_head *list;
967 };
968
969 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
970 {
971         struct flush_busy_ctx_data *flush_data = data;
972         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
973         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
974         enum hctx_type type = hctx->type;
975
976         spin_lock(&ctx->lock);
977         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
978         sbitmap_clear_bit(sb, bitnr);
979         spin_unlock(&ctx->lock);
980         return true;
981 }
982
983 /*
984  * Process software queues that have been marked busy, splicing them
985  * to the for-dispatch
986  */
987 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
988 {
989         struct flush_busy_ctx_data data = {
990                 .hctx = hctx,
991                 .list = list,
992         };
993
994         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
995 }
996 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
997
998 struct dispatch_rq_data {
999         struct blk_mq_hw_ctx *hctx;
1000         struct request *rq;
1001 };
1002
1003 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1004                 void *data)
1005 {
1006         struct dispatch_rq_data *dispatch_data = data;
1007         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1008         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1009         enum hctx_type type = hctx->type;
1010
1011         spin_lock(&ctx->lock);
1012         if (!list_empty(&ctx->rq_lists[type])) {
1013                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1014                 list_del_init(&dispatch_data->rq->queuelist);
1015                 if (list_empty(&ctx->rq_lists[type]))
1016                         sbitmap_clear_bit(sb, bitnr);
1017         }
1018         spin_unlock(&ctx->lock);
1019
1020         return !dispatch_data->rq;
1021 }
1022
1023 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1024                                         struct blk_mq_ctx *start)
1025 {
1026         unsigned off = start ? start->index_hw[hctx->type] : 0;
1027         struct dispatch_rq_data data = {
1028                 .hctx = hctx,
1029                 .rq   = NULL,
1030         };
1031
1032         __sbitmap_for_each_set(&hctx->ctx_map, off,
1033                                dispatch_rq_from_ctx, &data);
1034
1035         return data.rq;
1036 }
1037
1038 static inline unsigned int queued_to_index(unsigned int queued)
1039 {
1040         if (!queued)
1041                 return 0;
1042
1043         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1044 }
1045
1046 bool blk_mq_get_driver_tag(struct request *rq)
1047 {
1048         struct blk_mq_alloc_data data = {
1049                 .q = rq->q,
1050                 .hctx = rq->mq_hctx,
1051                 .flags = BLK_MQ_REQ_NOWAIT,
1052                 .cmd_flags = rq->cmd_flags,
1053         };
1054         bool shared;
1055
1056         if (rq->tag != -1)
1057                 goto done;
1058
1059         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1060                 data.flags |= BLK_MQ_REQ_RESERVED;
1061
1062         shared = blk_mq_tag_busy(data.hctx);
1063         rq->tag = blk_mq_get_tag(&data);
1064         if (rq->tag >= 0) {
1065                 if (shared) {
1066                         rq->rq_flags |= RQF_MQ_INFLIGHT;
1067                         atomic_inc(&data.hctx->nr_active);
1068                 }
1069                 data.hctx->tags->rqs[rq->tag] = rq;
1070         }
1071
1072 done:
1073         return rq->tag != -1;
1074 }
1075
1076 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1077                                 int flags, void *key)
1078 {
1079         struct blk_mq_hw_ctx *hctx;
1080
1081         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1082
1083         spin_lock(&hctx->dispatch_wait_lock);
1084         if (!list_empty(&wait->entry)) {
1085                 struct sbitmap_queue *sbq;
1086
1087                 list_del_init(&wait->entry);
1088                 sbq = &hctx->tags->bitmap_tags;
1089                 atomic_dec(&sbq->ws_active);
1090         }
1091         spin_unlock(&hctx->dispatch_wait_lock);
1092
1093         blk_mq_run_hw_queue(hctx, true);
1094         return 1;
1095 }
1096
1097 /*
1098  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1099  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1100  * restart. For both cases, take care to check the condition again after
1101  * marking us as waiting.
1102  */
1103 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1104                                  struct request *rq)
1105 {
1106         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1107         struct wait_queue_head *wq;
1108         wait_queue_entry_t *wait;
1109         bool ret;
1110
1111         if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1112                 blk_mq_sched_mark_restart_hctx(hctx);
1113
1114                 /*
1115                  * It's possible that a tag was freed in the window between the
1116                  * allocation failure and adding the hardware queue to the wait
1117                  * queue.
1118                  *
1119                  * Don't clear RESTART here, someone else could have set it.
1120                  * At most this will cost an extra queue run.
1121                  */
1122                 return blk_mq_get_driver_tag(rq);
1123         }
1124
1125         wait = &hctx->dispatch_wait;
1126         if (!list_empty_careful(&wait->entry))
1127                 return false;
1128
1129         wq = &bt_wait_ptr(sbq, hctx)->wait;
1130
1131         spin_lock_irq(&wq->lock);
1132         spin_lock(&hctx->dispatch_wait_lock);
1133         if (!list_empty(&wait->entry)) {
1134                 spin_unlock(&hctx->dispatch_wait_lock);
1135                 spin_unlock_irq(&wq->lock);
1136                 return false;
1137         }
1138
1139         atomic_inc(&sbq->ws_active);
1140         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1141         __add_wait_queue(wq, wait);
1142
1143         /*
1144          * It's possible that a tag was freed in the window between the
1145          * allocation failure and adding the hardware queue to the wait
1146          * queue.
1147          */
1148         ret = blk_mq_get_driver_tag(rq);
1149         if (!ret) {
1150                 spin_unlock(&hctx->dispatch_wait_lock);
1151                 spin_unlock_irq(&wq->lock);
1152                 return false;
1153         }
1154
1155         /*
1156          * We got a tag, remove ourselves from the wait queue to ensure
1157          * someone else gets the wakeup.
1158          */
1159         list_del_init(&wait->entry);
1160         atomic_dec(&sbq->ws_active);
1161         spin_unlock(&hctx->dispatch_wait_lock);
1162         spin_unlock_irq(&wq->lock);
1163
1164         return true;
1165 }
1166
1167 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1168 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1169 /*
1170  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1171  * - EWMA is one simple way to compute running average value
1172  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1173  * - take 4 as factor for avoiding to get too small(0) result, and this
1174  *   factor doesn't matter because EWMA decreases exponentially
1175  */
1176 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1177 {
1178         unsigned int ewma;
1179
1180         if (hctx->queue->elevator)
1181                 return;
1182
1183         ewma = hctx->dispatch_busy;
1184
1185         if (!ewma && !busy)
1186                 return;
1187
1188         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1189         if (busy)
1190                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1191         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1192
1193         hctx->dispatch_busy = ewma;
1194 }
1195
1196 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1197
1198 /*
1199  * Returns true if we did some work AND can potentially do more.
1200  */
1201 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1202                              bool got_budget)
1203 {
1204         struct blk_mq_hw_ctx *hctx;
1205         struct request *rq, *nxt;
1206         bool no_tag = false;
1207         int errors, queued;
1208         blk_status_t ret = BLK_STS_OK;
1209
1210         if (list_empty(list))
1211                 return false;
1212
1213         WARN_ON(!list_is_singular(list) && got_budget);
1214
1215         /*
1216          * Now process all the entries, sending them to the driver.
1217          */
1218         errors = queued = 0;
1219         do {
1220                 struct blk_mq_queue_data bd;
1221
1222                 rq = list_first_entry(list, struct request, queuelist);
1223
1224                 hctx = rq->mq_hctx;
1225                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1226                         break;
1227
1228                 if (!blk_mq_get_driver_tag(rq)) {
1229                         /*
1230                          * The initial allocation attempt failed, so we need to
1231                          * rerun the hardware queue when a tag is freed. The
1232                          * waitqueue takes care of that. If the queue is run
1233                          * before we add this entry back on the dispatch list,
1234                          * we'll re-run it below.
1235                          */
1236                         if (!blk_mq_mark_tag_wait(hctx, rq)) {
1237                                 blk_mq_put_dispatch_budget(hctx);
1238                                 /*
1239                                  * For non-shared tags, the RESTART check
1240                                  * will suffice.
1241                                  */
1242                                 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1243                                         no_tag = true;
1244                                 break;
1245                         }
1246                 }
1247
1248                 list_del_init(&rq->queuelist);
1249
1250                 bd.rq = rq;
1251
1252                 /*
1253                  * Flag last if we have no more requests, or if we have more
1254                  * but can't assign a driver tag to it.
1255                  */
1256                 if (list_empty(list))
1257                         bd.last = true;
1258                 else {
1259                         nxt = list_first_entry(list, struct request, queuelist);
1260                         bd.last = !blk_mq_get_driver_tag(nxt);
1261                 }
1262
1263                 ret = q->mq_ops->queue_rq(hctx, &bd);
1264                 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1265                         /*
1266                          * If an I/O scheduler has been configured and we got a
1267                          * driver tag for the next request already, free it
1268                          * again.
1269                          */
1270                         if (!list_empty(list)) {
1271                                 nxt = list_first_entry(list, struct request, queuelist);
1272                                 blk_mq_put_driver_tag(nxt);
1273                         }
1274                         list_add(&rq->queuelist, list);
1275                         __blk_mq_requeue_request(rq);
1276                         break;
1277                 }
1278
1279                 if (unlikely(ret != BLK_STS_OK)) {
1280                         errors++;
1281                         blk_mq_end_request(rq, BLK_STS_IOERR);
1282                         continue;
1283                 }
1284
1285                 queued++;
1286         } while (!list_empty(list));
1287
1288         hctx->dispatched[queued_to_index(queued)]++;
1289
1290         /*
1291          * Any items that need requeuing? Stuff them into hctx->dispatch,
1292          * that is where we will continue on next queue run.
1293          */
1294         if (!list_empty(list)) {
1295                 bool needs_restart;
1296
1297                 /*
1298                  * If we didn't flush the entire list, we could have told
1299                  * the driver there was more coming, but that turned out to
1300                  * be a lie.
1301                  */
1302                 if (q->mq_ops->commit_rqs)
1303                         q->mq_ops->commit_rqs(hctx);
1304
1305                 spin_lock(&hctx->lock);
1306                 list_splice_init(list, &hctx->dispatch);
1307                 spin_unlock(&hctx->lock);
1308
1309                 /*
1310                  * If SCHED_RESTART was set by the caller of this function and
1311                  * it is no longer set that means that it was cleared by another
1312                  * thread and hence that a queue rerun is needed.
1313                  *
1314                  * If 'no_tag' is set, that means that we failed getting
1315                  * a driver tag with an I/O scheduler attached. If our dispatch
1316                  * waitqueue is no longer active, ensure that we run the queue
1317                  * AFTER adding our entries back to the list.
1318                  *
1319                  * If no I/O scheduler has been configured it is possible that
1320                  * the hardware queue got stopped and restarted before requests
1321                  * were pushed back onto the dispatch list. Rerun the queue to
1322                  * avoid starvation. Notes:
1323                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1324                  *   been stopped before rerunning a queue.
1325                  * - Some but not all block drivers stop a queue before
1326                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1327                  *   and dm-rq.
1328                  *
1329                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1330                  * bit is set, run queue after a delay to avoid IO stalls
1331                  * that could otherwise occur if the queue is idle.
1332                  */
1333                 needs_restart = blk_mq_sched_needs_restart(hctx);
1334                 if (!needs_restart ||
1335                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1336                         blk_mq_run_hw_queue(hctx, true);
1337                 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1338                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1339
1340                 blk_mq_update_dispatch_busy(hctx, true);
1341                 return false;
1342         } else
1343                 blk_mq_update_dispatch_busy(hctx, false);
1344
1345         /*
1346          * If the host/device is unable to accept more work, inform the
1347          * caller of that.
1348          */
1349         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1350                 return false;
1351
1352         return (queued + errors) != 0;
1353 }
1354
1355 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1356 {
1357         int srcu_idx;
1358
1359         /*
1360          * We should be running this queue from one of the CPUs that
1361          * are mapped to it.
1362          *
1363          * There are at least two related races now between setting
1364          * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1365          * __blk_mq_run_hw_queue():
1366          *
1367          * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1368          *   but later it becomes online, then this warning is harmless
1369          *   at all
1370          *
1371          * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1372          *   but later it becomes offline, then the warning can't be
1373          *   triggered, and we depend on blk-mq timeout handler to
1374          *   handle dispatched requests to this hctx
1375          */
1376         if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1377                 cpu_online(hctx->next_cpu)) {
1378                 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1379                         raw_smp_processor_id(),
1380                         cpumask_empty(hctx->cpumask) ? "inactive": "active");
1381                 dump_stack();
1382         }
1383
1384         /*
1385          * We can't run the queue inline with ints disabled. Ensure that
1386          * we catch bad users of this early.
1387          */
1388         WARN_ON_ONCE(in_interrupt());
1389
1390         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1391
1392         hctx_lock(hctx, &srcu_idx);
1393         blk_mq_sched_dispatch_requests(hctx);
1394         hctx_unlock(hctx, srcu_idx);
1395 }
1396
1397 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1398 {
1399         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1400
1401         if (cpu >= nr_cpu_ids)
1402                 cpu = cpumask_first(hctx->cpumask);
1403         return cpu;
1404 }
1405
1406 /*
1407  * It'd be great if the workqueue API had a way to pass
1408  * in a mask and had some smarts for more clever placement.
1409  * For now we just round-robin here, switching for every
1410  * BLK_MQ_CPU_WORK_BATCH queued items.
1411  */
1412 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1413 {
1414         bool tried = false;
1415         int next_cpu = hctx->next_cpu;
1416
1417         if (hctx->queue->nr_hw_queues == 1)
1418                 return WORK_CPU_UNBOUND;
1419
1420         if (--hctx->next_cpu_batch <= 0) {
1421 select_cpu:
1422                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1423                                 cpu_online_mask);
1424                 if (next_cpu >= nr_cpu_ids)
1425                         next_cpu = blk_mq_first_mapped_cpu(hctx);
1426                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1427         }
1428
1429         /*
1430          * Do unbound schedule if we can't find a online CPU for this hctx,
1431          * and it should only happen in the path of handling CPU DEAD.
1432          */
1433         if (!cpu_online(next_cpu)) {
1434                 if (!tried) {
1435                         tried = true;
1436                         goto select_cpu;
1437                 }
1438
1439                 /*
1440                  * Make sure to re-select CPU next time once after CPUs
1441                  * in hctx->cpumask become online again.
1442                  */
1443                 hctx->next_cpu = next_cpu;
1444                 hctx->next_cpu_batch = 1;
1445                 return WORK_CPU_UNBOUND;
1446         }
1447
1448         hctx->next_cpu = next_cpu;
1449         return next_cpu;
1450 }
1451
1452 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1453                                         unsigned long msecs)
1454 {
1455         if (unlikely(blk_mq_hctx_stopped(hctx)))
1456                 return;
1457
1458         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1459                 int cpu = get_cpu();
1460                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1461                         __blk_mq_run_hw_queue(hctx);
1462                         put_cpu();
1463                         return;
1464                 }
1465
1466                 put_cpu();
1467         }
1468
1469         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1470                                     msecs_to_jiffies(msecs));
1471 }
1472
1473 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1474 {
1475         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1476 }
1477 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1478
1479 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1480 {
1481         int srcu_idx;
1482         bool need_run;
1483
1484         /*
1485          * When queue is quiesced, we may be switching io scheduler, or
1486          * updating nr_hw_queues, or other things, and we can't run queue
1487          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1488          *
1489          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1490          * quiesced.
1491          */
1492         hctx_lock(hctx, &srcu_idx);
1493         need_run = !blk_queue_quiesced(hctx->queue) &&
1494                 blk_mq_hctx_has_pending(hctx);
1495         hctx_unlock(hctx, srcu_idx);
1496
1497         if (need_run) {
1498                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1499                 return true;
1500         }
1501
1502         return false;
1503 }
1504 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1505
1506 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1507 {
1508         struct blk_mq_hw_ctx *hctx;
1509         int i;
1510
1511         queue_for_each_hw_ctx(q, hctx, i) {
1512                 if (blk_mq_hctx_stopped(hctx))
1513                         continue;
1514
1515                 blk_mq_run_hw_queue(hctx, async);
1516         }
1517 }
1518 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1519
1520 /**
1521  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1522  * @q: request queue.
1523  *
1524  * The caller is responsible for serializing this function against
1525  * blk_mq_{start,stop}_hw_queue().
1526  */
1527 bool blk_mq_queue_stopped(struct request_queue *q)
1528 {
1529         struct blk_mq_hw_ctx *hctx;
1530         int i;
1531
1532         queue_for_each_hw_ctx(q, hctx, i)
1533                 if (blk_mq_hctx_stopped(hctx))
1534                         return true;
1535
1536         return false;
1537 }
1538 EXPORT_SYMBOL(blk_mq_queue_stopped);
1539
1540 /*
1541  * This function is often used for pausing .queue_rq() by driver when
1542  * there isn't enough resource or some conditions aren't satisfied, and
1543  * BLK_STS_RESOURCE is usually returned.
1544  *
1545  * We do not guarantee that dispatch can be drained or blocked
1546  * after blk_mq_stop_hw_queue() returns. Please use
1547  * blk_mq_quiesce_queue() for that requirement.
1548  */
1549 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1550 {
1551         cancel_delayed_work(&hctx->run_work);
1552
1553         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1554 }
1555 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1556
1557 /*
1558  * This function is often used for pausing .queue_rq() by driver when
1559  * there isn't enough resource or some conditions aren't satisfied, and
1560  * BLK_STS_RESOURCE is usually returned.
1561  *
1562  * We do not guarantee that dispatch can be drained or blocked
1563  * after blk_mq_stop_hw_queues() returns. Please use
1564  * blk_mq_quiesce_queue() for that requirement.
1565  */
1566 void blk_mq_stop_hw_queues(struct request_queue *q)
1567 {
1568         struct blk_mq_hw_ctx *hctx;
1569         int i;
1570
1571         queue_for_each_hw_ctx(q, hctx, i)
1572                 blk_mq_stop_hw_queue(hctx);
1573 }
1574 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1575
1576 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1577 {
1578         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1579
1580         blk_mq_run_hw_queue(hctx, false);
1581 }
1582 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1583
1584 void blk_mq_start_hw_queues(struct request_queue *q)
1585 {
1586         struct blk_mq_hw_ctx *hctx;
1587         int i;
1588
1589         queue_for_each_hw_ctx(q, hctx, i)
1590                 blk_mq_start_hw_queue(hctx);
1591 }
1592 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1593
1594 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1595 {
1596         if (!blk_mq_hctx_stopped(hctx))
1597                 return;
1598
1599         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1600         blk_mq_run_hw_queue(hctx, async);
1601 }
1602 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1603
1604 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1605 {
1606         struct blk_mq_hw_ctx *hctx;
1607         int i;
1608
1609         queue_for_each_hw_ctx(q, hctx, i)
1610                 blk_mq_start_stopped_hw_queue(hctx, async);
1611 }
1612 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1613
1614 static void blk_mq_run_work_fn(struct work_struct *work)
1615 {
1616         struct blk_mq_hw_ctx *hctx;
1617
1618         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1619
1620         /*
1621          * If we are stopped, don't run the queue.
1622          */
1623         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1624                 return;
1625
1626         __blk_mq_run_hw_queue(hctx);
1627 }
1628
1629 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1630                                             struct request *rq,
1631                                             bool at_head)
1632 {
1633         struct blk_mq_ctx *ctx = rq->mq_ctx;
1634         enum hctx_type type = hctx->type;
1635
1636         lockdep_assert_held(&ctx->lock);
1637
1638         trace_block_rq_insert(hctx->queue, rq);
1639
1640         if (at_head)
1641                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1642         else
1643                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1644 }
1645
1646 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1647                              bool at_head)
1648 {
1649         struct blk_mq_ctx *ctx = rq->mq_ctx;
1650
1651         lockdep_assert_held(&ctx->lock);
1652
1653         __blk_mq_insert_req_list(hctx, rq, at_head);
1654         blk_mq_hctx_mark_pending(hctx, ctx);
1655 }
1656
1657 /*
1658  * Should only be used carefully, when the caller knows we want to
1659  * bypass a potential IO scheduler on the target device.
1660  */
1661 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1662 {
1663         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1664
1665         spin_lock(&hctx->lock);
1666         list_add_tail(&rq->queuelist, &hctx->dispatch);
1667         spin_unlock(&hctx->lock);
1668
1669         if (run_queue)
1670                 blk_mq_run_hw_queue(hctx, false);
1671 }
1672
1673 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1674                             struct list_head *list)
1675
1676 {
1677         struct request *rq;
1678         enum hctx_type type = hctx->type;
1679
1680         /*
1681          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1682          * offline now
1683          */
1684         list_for_each_entry(rq, list, queuelist) {
1685                 BUG_ON(rq->mq_ctx != ctx);
1686                 trace_block_rq_insert(hctx->queue, rq);
1687         }
1688
1689         spin_lock(&ctx->lock);
1690         list_splice_tail_init(list, &ctx->rq_lists[type]);
1691         blk_mq_hctx_mark_pending(hctx, ctx);
1692         spin_unlock(&ctx->lock);
1693 }
1694
1695 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1696 {
1697         struct request *rqa = container_of(a, struct request, queuelist);
1698         struct request *rqb = container_of(b, struct request, queuelist);
1699
1700         if (rqa->mq_ctx < rqb->mq_ctx)
1701                 return -1;
1702         else if (rqa->mq_ctx > rqb->mq_ctx)
1703                 return 1;
1704         else if (rqa->mq_hctx < rqb->mq_hctx)
1705                 return -1;
1706         else if (rqa->mq_hctx > rqb->mq_hctx)
1707                 return 1;
1708
1709         return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1710 }
1711
1712 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1713 {
1714         struct blk_mq_hw_ctx *this_hctx;
1715         struct blk_mq_ctx *this_ctx;
1716         struct request_queue *this_q;
1717         struct request *rq;
1718         LIST_HEAD(list);
1719         LIST_HEAD(rq_list);
1720         unsigned int depth;
1721
1722         list_splice_init(&plug->mq_list, &list);
1723
1724         if (plug->rq_count > 2 && plug->multiple_queues)
1725                 list_sort(NULL, &list, plug_rq_cmp);
1726
1727         plug->rq_count = 0;
1728
1729         this_q = NULL;
1730         this_hctx = NULL;
1731         this_ctx = NULL;
1732         depth = 0;
1733
1734         while (!list_empty(&list)) {
1735                 rq = list_entry_rq(list.next);
1736                 list_del_init(&rq->queuelist);
1737                 BUG_ON(!rq->q);
1738                 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1739                         if (this_hctx) {
1740                                 trace_block_unplug(this_q, depth, !from_schedule);
1741                                 blk_mq_sched_insert_requests(this_hctx, this_ctx,
1742                                                                 &rq_list,
1743                                                                 from_schedule);
1744                         }
1745
1746                         this_q = rq->q;
1747                         this_ctx = rq->mq_ctx;
1748                         this_hctx = rq->mq_hctx;
1749                         depth = 0;
1750                 }
1751
1752                 depth++;
1753                 list_add_tail(&rq->queuelist, &rq_list);
1754         }
1755
1756         /*
1757          * If 'this_hctx' is set, we know we have entries to complete
1758          * on 'rq_list'. Do those.
1759          */
1760         if (this_hctx) {
1761                 trace_block_unplug(this_q, depth, !from_schedule);
1762                 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1763                                                 from_schedule);
1764         }
1765 }
1766
1767 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1768 {
1769         blk_init_request_from_bio(rq, bio);
1770
1771         blk_account_io_start(rq, true);
1772 }
1773
1774 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1775                                             struct request *rq,
1776                                             blk_qc_t *cookie, bool last)
1777 {
1778         struct request_queue *q = rq->q;
1779         struct blk_mq_queue_data bd = {
1780                 .rq = rq,
1781                 .last = last,
1782         };
1783         blk_qc_t new_cookie;
1784         blk_status_t ret;
1785
1786         new_cookie = request_to_qc_t(hctx, rq);
1787
1788         /*
1789          * For OK queue, we are done. For error, caller may kill it.
1790          * Any other error (busy), just add it to our list as we
1791          * previously would have done.
1792          */
1793         ret = q->mq_ops->queue_rq(hctx, &bd);
1794         switch (ret) {
1795         case BLK_STS_OK:
1796                 blk_mq_update_dispatch_busy(hctx, false);
1797                 *cookie = new_cookie;
1798                 break;
1799         case BLK_STS_RESOURCE:
1800         case BLK_STS_DEV_RESOURCE:
1801                 blk_mq_update_dispatch_busy(hctx, true);
1802                 __blk_mq_requeue_request(rq);
1803                 break;
1804         default:
1805                 blk_mq_update_dispatch_busy(hctx, false);
1806                 *cookie = BLK_QC_T_NONE;
1807                 break;
1808         }
1809
1810         return ret;
1811 }
1812
1813 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1814                                                 struct request *rq,
1815                                                 blk_qc_t *cookie,
1816                                                 bool bypass_insert, bool last)
1817 {
1818         struct request_queue *q = rq->q;
1819         bool run_queue = true;
1820
1821         /*
1822          * RCU or SRCU read lock is needed before checking quiesced flag.
1823          *
1824          * When queue is stopped or quiesced, ignore 'bypass_insert' from
1825          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1826          * and avoid driver to try to dispatch again.
1827          */
1828         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1829                 run_queue = false;
1830                 bypass_insert = false;
1831                 goto insert;
1832         }
1833
1834         if (q->elevator && !bypass_insert)
1835                 goto insert;
1836
1837         if (!blk_mq_get_dispatch_budget(hctx))
1838                 goto insert;
1839
1840         if (!blk_mq_get_driver_tag(rq)) {
1841                 blk_mq_put_dispatch_budget(hctx);
1842                 goto insert;
1843         }
1844
1845         return __blk_mq_issue_directly(hctx, rq, cookie, last);
1846 insert:
1847         if (bypass_insert)
1848                 return BLK_STS_RESOURCE;
1849
1850         blk_mq_request_bypass_insert(rq, run_queue);
1851         return BLK_STS_OK;
1852 }
1853
1854 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1855                 struct request *rq, blk_qc_t *cookie)
1856 {
1857         blk_status_t ret;
1858         int srcu_idx;
1859
1860         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1861
1862         hctx_lock(hctx, &srcu_idx);
1863
1864         ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1865         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1866                 blk_mq_request_bypass_insert(rq, true);
1867         else if (ret != BLK_STS_OK)
1868                 blk_mq_end_request(rq, ret);
1869
1870         hctx_unlock(hctx, srcu_idx);
1871 }
1872
1873 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1874 {
1875         blk_status_t ret;
1876         int srcu_idx;
1877         blk_qc_t unused_cookie;
1878         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1879
1880         hctx_lock(hctx, &srcu_idx);
1881         ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1882         hctx_unlock(hctx, srcu_idx);
1883
1884         return ret;
1885 }
1886
1887 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1888                 struct list_head *list)
1889 {
1890         while (!list_empty(list)) {
1891                 blk_status_t ret;
1892                 struct request *rq = list_first_entry(list, struct request,
1893                                 queuelist);
1894
1895                 list_del_init(&rq->queuelist);
1896                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1897                 if (ret != BLK_STS_OK) {
1898                         if (ret == BLK_STS_RESOURCE ||
1899                                         ret == BLK_STS_DEV_RESOURCE) {
1900                                 blk_mq_request_bypass_insert(rq,
1901                                                         list_empty(list));
1902                                 break;
1903                         }
1904                         blk_mq_end_request(rq, ret);
1905                 }
1906         }
1907
1908         /*
1909          * If we didn't flush the entire list, we could have told
1910          * the driver there was more coming, but that turned out to
1911          * be a lie.
1912          */
1913         if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1914                 hctx->queue->mq_ops->commit_rqs(hctx);
1915 }
1916
1917 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1918 {
1919         list_add_tail(&rq->queuelist, &plug->mq_list);
1920         plug->rq_count++;
1921         if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
1922                 struct request *tmp;
1923
1924                 tmp = list_first_entry(&plug->mq_list, struct request,
1925                                                 queuelist);
1926                 if (tmp->q != rq->q)
1927                         plug->multiple_queues = true;
1928         }
1929 }
1930
1931 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1932 {
1933         const int is_sync = op_is_sync(bio->bi_opf);
1934         const int is_flush_fua = op_is_flush(bio->bi_opf);
1935         struct blk_mq_alloc_data data = { .flags = 0};
1936         struct request *rq;
1937         struct blk_plug *plug;
1938         struct request *same_queue_rq = NULL;
1939         blk_qc_t cookie;
1940
1941         blk_queue_bounce(q, &bio);
1942
1943         blk_queue_split(q, &bio);
1944
1945         if (!bio_integrity_prep(bio))
1946                 return BLK_QC_T_NONE;
1947
1948         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1949             blk_attempt_plug_merge(q, bio, &same_queue_rq))
1950                 return BLK_QC_T_NONE;
1951
1952         if (blk_mq_sched_bio_merge(q, bio))
1953                 return BLK_QC_T_NONE;
1954
1955         rq_qos_throttle(q, bio);
1956
1957         data.cmd_flags = bio->bi_opf;
1958         rq = blk_mq_get_request(q, bio, &data);
1959         if (unlikely(!rq)) {
1960                 rq_qos_cleanup(q, bio);
1961                 if (bio->bi_opf & REQ_NOWAIT)
1962                         bio_wouldblock_error(bio);
1963                 return BLK_QC_T_NONE;
1964         }
1965
1966         trace_block_getrq(q, bio, bio->bi_opf);
1967
1968         rq_qos_track(q, rq, bio);
1969
1970         cookie = request_to_qc_t(data.hctx, rq);
1971
1972         plug = current->plug;
1973         if (unlikely(is_flush_fua)) {
1974                 blk_mq_put_ctx(data.ctx);
1975                 blk_mq_bio_to_request(rq, bio);
1976
1977                 /* bypass scheduler for flush rq */
1978                 blk_insert_flush(rq);
1979                 blk_mq_run_hw_queue(data.hctx, true);
1980         } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
1981                 /*
1982                  * Use plugging if we have a ->commit_rqs() hook as well, as
1983                  * we know the driver uses bd->last in a smart fashion.
1984                  */
1985                 unsigned int request_count = plug->rq_count;
1986                 struct request *last = NULL;
1987
1988                 blk_mq_put_ctx(data.ctx);
1989                 blk_mq_bio_to_request(rq, bio);
1990
1991                 if (!request_count)
1992                         trace_block_plug(q);
1993                 else
1994                         last = list_entry_rq(plug->mq_list.prev);
1995
1996                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1997                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1998                         blk_flush_plug_list(plug, false);
1999                         trace_block_plug(q);
2000                 }
2001
2002                 blk_add_rq_to_plug(plug, rq);
2003         } else if (plug && !blk_queue_nomerges(q)) {
2004                 blk_mq_bio_to_request(rq, bio);
2005
2006                 /*
2007                  * We do limited plugging. If the bio can be merged, do that.
2008                  * Otherwise the existing request in the plug list will be
2009                  * issued. So the plug list will have one request at most
2010                  * The plug list might get flushed before this. If that happens,
2011                  * the plug list is empty, and same_queue_rq is invalid.
2012                  */
2013                 if (list_empty(&plug->mq_list))
2014                         same_queue_rq = NULL;
2015                 if (same_queue_rq) {
2016                         list_del_init(&same_queue_rq->queuelist);
2017                         plug->rq_count--;
2018                 }
2019                 blk_add_rq_to_plug(plug, rq);
2020                 trace_block_plug(q);
2021
2022                 blk_mq_put_ctx(data.ctx);
2023
2024                 if (same_queue_rq) {
2025                         data.hctx = same_queue_rq->mq_hctx;
2026                         trace_block_unplug(q, 1, true);
2027                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2028                                         &cookie);
2029                 }
2030         } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2031                         !data.hctx->dispatch_busy)) {
2032                 blk_mq_put_ctx(data.ctx);
2033                 blk_mq_bio_to_request(rq, bio);
2034                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2035         } else {
2036                 blk_mq_put_ctx(data.ctx);
2037                 blk_mq_bio_to_request(rq, bio);
2038                 blk_mq_sched_insert_request(rq, false, true, true);
2039         }
2040
2041         return cookie;
2042 }
2043
2044 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2045                      unsigned int hctx_idx)
2046 {
2047         struct page *page;
2048
2049         if (tags->rqs && set->ops->exit_request) {
2050                 int i;
2051
2052                 for (i = 0; i < tags->nr_tags; i++) {
2053                         struct request *rq = tags->static_rqs[i];
2054
2055                         if (!rq)
2056                                 continue;
2057                         set->ops->exit_request(set, rq, hctx_idx);
2058                         tags->static_rqs[i] = NULL;
2059                 }
2060         }
2061
2062         while (!list_empty(&tags->page_list)) {
2063                 page = list_first_entry(&tags->page_list, struct page, lru);
2064                 list_del_init(&page->lru);
2065                 /*
2066                  * Remove kmemleak object previously allocated in
2067                  * blk_mq_alloc_rqs().
2068                  */
2069                 kmemleak_free(page_address(page));
2070                 __free_pages(page, page->private);
2071         }
2072 }
2073
2074 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2075 {
2076         kfree(tags->rqs);
2077         tags->rqs = NULL;
2078         kfree(tags->static_rqs);
2079         tags->static_rqs = NULL;
2080
2081         blk_mq_free_tags(tags);
2082 }
2083
2084 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2085                                         unsigned int hctx_idx,
2086                                         unsigned int nr_tags,
2087                                         unsigned int reserved_tags)
2088 {
2089         struct blk_mq_tags *tags;
2090         int node;
2091
2092         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2093         if (node == NUMA_NO_NODE)
2094                 node = set->numa_node;
2095
2096         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2097                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2098         if (!tags)
2099                 return NULL;
2100
2101         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2102                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2103                                  node);
2104         if (!tags->rqs) {
2105                 blk_mq_free_tags(tags);
2106                 return NULL;
2107         }
2108
2109         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2110                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2111                                         node);
2112         if (!tags->static_rqs) {
2113                 kfree(tags->rqs);
2114                 blk_mq_free_tags(tags);
2115                 return NULL;
2116         }
2117
2118         return tags;
2119 }
2120
2121 static size_t order_to_size(unsigned int order)
2122 {
2123         return (size_t)PAGE_SIZE << order;
2124 }
2125
2126 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2127                                unsigned int hctx_idx, int node)
2128 {
2129         int ret;
2130
2131         if (set->ops->init_request) {
2132                 ret = set->ops->init_request(set, rq, hctx_idx, node);
2133                 if (ret)
2134                         return ret;
2135         }
2136
2137         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2138         return 0;
2139 }
2140
2141 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2142                      unsigned int hctx_idx, unsigned int depth)
2143 {
2144         unsigned int i, j, entries_per_page, max_order = 4;
2145         size_t rq_size, left;
2146         int node;
2147
2148         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2149         if (node == NUMA_NO_NODE)
2150                 node = set->numa_node;
2151
2152         INIT_LIST_HEAD(&tags->page_list);
2153
2154         /*
2155          * rq_size is the size of the request plus driver payload, rounded
2156          * to the cacheline size
2157          */
2158         rq_size = round_up(sizeof(struct request) + set->cmd_size,
2159                                 cache_line_size());
2160         left = rq_size * depth;
2161
2162         for (i = 0; i < depth; ) {
2163                 int this_order = max_order;
2164                 struct page *page;
2165                 int to_do;
2166                 void *p;
2167
2168                 while (this_order && left < order_to_size(this_order - 1))
2169                         this_order--;
2170
2171                 do {
2172                         page = alloc_pages_node(node,
2173                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2174                                 this_order);
2175                         if (page)
2176                                 break;
2177                         if (!this_order--)
2178                                 break;
2179                         if (order_to_size(this_order) < rq_size)
2180                                 break;
2181                 } while (1);
2182
2183                 if (!page)
2184                         goto fail;
2185
2186                 page->private = this_order;
2187                 list_add_tail(&page->lru, &tags->page_list);
2188
2189                 p = page_address(page);
2190                 /*
2191                  * Allow kmemleak to scan these pages as they contain pointers
2192                  * to additional allocations like via ops->init_request().
2193                  */
2194                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2195                 entries_per_page = order_to_size(this_order) / rq_size;
2196                 to_do = min(entries_per_page, depth - i);
2197                 left -= to_do * rq_size;
2198                 for (j = 0; j < to_do; j++) {
2199                         struct request *rq = p;
2200
2201                         tags->static_rqs[i] = rq;
2202                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2203                                 tags->static_rqs[i] = NULL;
2204                                 goto fail;
2205                         }
2206
2207                         p += rq_size;
2208                         i++;
2209                 }
2210         }
2211         return 0;
2212
2213 fail:
2214         blk_mq_free_rqs(set, tags, hctx_idx);
2215         return -ENOMEM;
2216 }
2217
2218 /*
2219  * 'cpu' is going away. splice any existing rq_list entries from this
2220  * software queue to the hw queue dispatch list, and ensure that it
2221  * gets run.
2222  */
2223 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2224 {
2225         struct blk_mq_hw_ctx *hctx;
2226         struct blk_mq_ctx *ctx;
2227         LIST_HEAD(tmp);
2228         enum hctx_type type;
2229
2230         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2231         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2232         type = hctx->type;
2233
2234         spin_lock(&ctx->lock);
2235         if (!list_empty(&ctx->rq_lists[type])) {
2236                 list_splice_init(&ctx->rq_lists[type], &tmp);
2237                 blk_mq_hctx_clear_pending(hctx, ctx);
2238         }
2239         spin_unlock(&ctx->lock);
2240
2241         if (list_empty(&tmp))
2242                 return 0;
2243
2244         spin_lock(&hctx->lock);
2245         list_splice_tail_init(&tmp, &hctx->dispatch);
2246         spin_unlock(&hctx->lock);
2247
2248         blk_mq_run_hw_queue(hctx, true);
2249         return 0;
2250 }
2251
2252 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2253 {
2254         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2255                                             &hctx->cpuhp_dead);
2256 }
2257
2258 /* hctx->ctxs will be freed in queue's release handler */
2259 static void blk_mq_exit_hctx(struct request_queue *q,
2260                 struct blk_mq_tag_set *set,
2261                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2262 {
2263         if (blk_mq_hw_queue_mapped(hctx))
2264                 blk_mq_tag_idle(hctx);
2265
2266         if (set->ops->exit_request)
2267                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2268
2269         if (set->ops->exit_hctx)
2270                 set->ops->exit_hctx(hctx, hctx_idx);
2271
2272         blk_mq_remove_cpuhp(hctx);
2273
2274         spin_lock(&q->unused_hctx_lock);
2275         list_add(&hctx->hctx_list, &q->unused_hctx_list);
2276         spin_unlock(&q->unused_hctx_lock);
2277 }
2278
2279 static void blk_mq_exit_hw_queues(struct request_queue *q,
2280                 struct blk_mq_tag_set *set, int nr_queue)
2281 {
2282         struct blk_mq_hw_ctx *hctx;
2283         unsigned int i;
2284
2285         queue_for_each_hw_ctx(q, hctx, i) {
2286                 if (i == nr_queue)
2287                         break;
2288                 blk_mq_debugfs_unregister_hctx(hctx);
2289                 blk_mq_exit_hctx(q, set, hctx, i);
2290         }
2291 }
2292
2293 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2294 {
2295         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2296
2297         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2298                            __alignof__(struct blk_mq_hw_ctx)) !=
2299                      sizeof(struct blk_mq_hw_ctx));
2300
2301         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2302                 hw_ctx_size += sizeof(struct srcu_struct);
2303
2304         return hw_ctx_size;
2305 }
2306
2307 static int blk_mq_init_hctx(struct request_queue *q,
2308                 struct blk_mq_tag_set *set,
2309                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2310 {
2311         hctx->queue_num = hctx_idx;
2312
2313         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2314
2315         hctx->tags = set->tags[hctx_idx];
2316
2317         if (set->ops->init_hctx &&
2318             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2319                 goto unregister_cpu_notifier;
2320
2321         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2322                                 hctx->numa_node))
2323                 goto exit_hctx;
2324         return 0;
2325
2326  exit_hctx:
2327         if (set->ops->exit_hctx)
2328                 set->ops->exit_hctx(hctx, hctx_idx);
2329  unregister_cpu_notifier:
2330         blk_mq_remove_cpuhp(hctx);
2331         return -1;
2332 }
2333
2334 static struct blk_mq_hw_ctx *
2335 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2336                 int node)
2337 {
2338         struct blk_mq_hw_ctx *hctx;
2339         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2340
2341         hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2342         if (!hctx)
2343                 goto fail_alloc_hctx;
2344
2345         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2346                 goto free_hctx;
2347
2348         atomic_set(&hctx->nr_active, 0);
2349         if (node == NUMA_NO_NODE)
2350                 node = set->numa_node;
2351         hctx->numa_node = node;
2352
2353         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2354         spin_lock_init(&hctx->lock);
2355         INIT_LIST_HEAD(&hctx->dispatch);
2356         hctx->queue = q;
2357         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2358
2359         INIT_LIST_HEAD(&hctx->hctx_list);
2360
2361         /*
2362          * Allocate space for all possible cpus to avoid allocation at
2363          * runtime
2364          */
2365         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2366                         gfp, node);
2367         if (!hctx->ctxs)
2368                 goto free_cpumask;
2369
2370         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2371                                 gfp, node))
2372                 goto free_ctxs;
2373         hctx->nr_ctx = 0;
2374
2375         spin_lock_init(&hctx->dispatch_wait_lock);
2376         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2377         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2378
2379         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2380                         gfp);
2381         if (!hctx->fq)
2382                 goto free_bitmap;
2383
2384         if (hctx->flags & BLK_MQ_F_BLOCKING)
2385                 init_srcu_struct(hctx->srcu);
2386         blk_mq_hctx_kobj_init(hctx);
2387
2388         return hctx;
2389
2390  free_bitmap:
2391         sbitmap_free(&hctx->ctx_map);
2392  free_ctxs:
2393         kfree(hctx->ctxs);
2394  free_cpumask:
2395         free_cpumask_var(hctx->cpumask);
2396  free_hctx:
2397         kfree(hctx);
2398  fail_alloc_hctx:
2399         return NULL;
2400 }
2401
2402 static void blk_mq_init_cpu_queues(struct request_queue *q,
2403                                    unsigned int nr_hw_queues)
2404 {
2405         struct blk_mq_tag_set *set = q->tag_set;
2406         unsigned int i, j;
2407
2408         for_each_possible_cpu(i) {
2409                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2410                 struct blk_mq_hw_ctx *hctx;
2411                 int k;
2412
2413                 __ctx->cpu = i;
2414                 spin_lock_init(&__ctx->lock);
2415                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2416                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2417
2418                 __ctx->queue = q;
2419
2420                 /*
2421                  * Set local node, IFF we have more than one hw queue. If
2422                  * not, we remain on the home node of the device
2423                  */
2424                 for (j = 0; j < set->nr_maps; j++) {
2425                         hctx = blk_mq_map_queue_type(q, j, i);
2426                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2427                                 hctx->numa_node = local_memory_node(cpu_to_node(i));
2428                 }
2429         }
2430 }
2431
2432 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2433 {
2434         int ret = 0;
2435
2436         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2437                                         set->queue_depth, set->reserved_tags);
2438         if (!set->tags[hctx_idx])
2439                 return false;
2440
2441         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2442                                 set->queue_depth);
2443         if (!ret)
2444                 return true;
2445
2446         blk_mq_free_rq_map(set->tags[hctx_idx]);
2447         set->tags[hctx_idx] = NULL;
2448         return false;
2449 }
2450
2451 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2452                                          unsigned int hctx_idx)
2453 {
2454         if (set->tags && set->tags[hctx_idx]) {
2455                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2456                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2457                 set->tags[hctx_idx] = NULL;
2458         }
2459 }
2460
2461 static void blk_mq_map_swqueue(struct request_queue *q)
2462 {
2463         unsigned int i, j, hctx_idx;
2464         struct blk_mq_hw_ctx *hctx;
2465         struct blk_mq_ctx *ctx;
2466         struct blk_mq_tag_set *set = q->tag_set;
2467
2468         /*
2469          * Avoid others reading imcomplete hctx->cpumask through sysfs
2470          */
2471         mutex_lock(&q->sysfs_lock);
2472
2473         queue_for_each_hw_ctx(q, hctx, i) {
2474                 cpumask_clear(hctx->cpumask);
2475                 hctx->nr_ctx = 0;
2476                 hctx->dispatch_from = NULL;
2477         }
2478
2479         /*
2480          * Map software to hardware queues.
2481          *
2482          * If the cpu isn't present, the cpu is mapped to first hctx.
2483          */
2484         for_each_possible_cpu(i) {
2485                 hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2486                 /* unmapped hw queue can be remapped after CPU topo changed */
2487                 if (!set->tags[hctx_idx] &&
2488                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2489                         /*
2490                          * If tags initialization fail for some hctx,
2491                          * that hctx won't be brought online.  In this
2492                          * case, remap the current ctx to hctx[0] which
2493                          * is guaranteed to always have tags allocated
2494                          */
2495                         set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2496                 }
2497
2498                 ctx = per_cpu_ptr(q->queue_ctx, i);
2499                 for (j = 0; j < set->nr_maps; j++) {
2500                         if (!set->map[j].nr_queues) {
2501                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2502                                                 HCTX_TYPE_DEFAULT, i);
2503                                 continue;
2504                         }
2505
2506                         hctx = blk_mq_map_queue_type(q, j, i);
2507                         ctx->hctxs[j] = hctx;
2508                         /*
2509                          * If the CPU is already set in the mask, then we've
2510                          * mapped this one already. This can happen if
2511                          * devices share queues across queue maps.
2512                          */
2513                         if (cpumask_test_cpu(i, hctx->cpumask))
2514                                 continue;
2515
2516                         cpumask_set_cpu(i, hctx->cpumask);
2517                         hctx->type = j;
2518                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
2519                         hctx->ctxs[hctx->nr_ctx++] = ctx;
2520
2521                         /*
2522                          * If the nr_ctx type overflows, we have exceeded the
2523                          * amount of sw queues we can support.
2524                          */
2525                         BUG_ON(!hctx->nr_ctx);
2526                 }
2527
2528                 for (; j < HCTX_MAX_TYPES; j++)
2529                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
2530                                         HCTX_TYPE_DEFAULT, i);
2531         }
2532
2533         mutex_unlock(&q->sysfs_lock);
2534
2535         queue_for_each_hw_ctx(q, hctx, i) {
2536                 /*
2537                  * If no software queues are mapped to this hardware queue,
2538                  * disable it and free the request entries.
2539                  */
2540                 if (!hctx->nr_ctx) {
2541                         /* Never unmap queue 0.  We need it as a
2542                          * fallback in case of a new remap fails
2543                          * allocation
2544                          */
2545                         if (i && set->tags[i])
2546                                 blk_mq_free_map_and_requests(set, i);
2547
2548                         hctx->tags = NULL;
2549                         continue;
2550                 }
2551
2552                 hctx->tags = set->tags[i];
2553                 WARN_ON(!hctx->tags);
2554
2555                 /*
2556                  * Set the map size to the number of mapped software queues.
2557                  * This is more accurate and more efficient than looping
2558                  * over all possibly mapped software queues.
2559                  */
2560                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2561
2562                 /*
2563                  * Initialize batch roundrobin counts
2564                  */
2565                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2566                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2567         }
2568 }
2569
2570 /*
2571  * Caller needs to ensure that we're either frozen/quiesced, or that
2572  * the queue isn't live yet.
2573  */
2574 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2575 {
2576         struct blk_mq_hw_ctx *hctx;
2577         int i;
2578
2579         queue_for_each_hw_ctx(q, hctx, i) {
2580                 if (shared)
2581                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2582                 else
2583                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2584         }
2585 }
2586
2587 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2588                                         bool shared)
2589 {
2590         struct request_queue *q;
2591
2592         lockdep_assert_held(&set->tag_list_lock);
2593
2594         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2595                 blk_mq_freeze_queue(q);
2596                 queue_set_hctx_shared(q, shared);
2597                 blk_mq_unfreeze_queue(q);
2598         }
2599 }
2600
2601 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2602 {
2603         struct blk_mq_tag_set *set = q->tag_set;
2604
2605         mutex_lock(&set->tag_list_lock);
2606         list_del_rcu(&q->tag_set_list);
2607         if (list_is_singular(&set->tag_list)) {
2608                 /* just transitioned to unshared */
2609                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2610                 /* update existing queue */
2611                 blk_mq_update_tag_set_depth(set, false);
2612         }
2613         mutex_unlock(&set->tag_list_lock);
2614         INIT_LIST_HEAD(&q->tag_set_list);
2615 }
2616
2617 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2618                                      struct request_queue *q)
2619 {
2620         mutex_lock(&set->tag_list_lock);
2621
2622         /*
2623          * Check to see if we're transitioning to shared (from 1 to 2 queues).
2624          */
2625         if (!list_empty(&set->tag_list) &&
2626             !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2627                 set->flags |= BLK_MQ_F_TAG_SHARED;
2628                 /* update existing queue */
2629                 blk_mq_update_tag_set_depth(set, true);
2630         }
2631         if (set->flags & BLK_MQ_F_TAG_SHARED)
2632                 queue_set_hctx_shared(q, true);
2633         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2634
2635         mutex_unlock(&set->tag_list_lock);
2636 }
2637
2638 /* All allocations will be freed in release handler of q->mq_kobj */
2639 static int blk_mq_alloc_ctxs(struct request_queue *q)
2640 {
2641         struct blk_mq_ctxs *ctxs;
2642         int cpu;
2643
2644         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
2645         if (!ctxs)
2646                 return -ENOMEM;
2647
2648         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2649         if (!ctxs->queue_ctx)
2650                 goto fail;
2651
2652         for_each_possible_cpu(cpu) {
2653                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
2654                 ctx->ctxs = ctxs;
2655         }
2656
2657         q->mq_kobj = &ctxs->kobj;
2658         q->queue_ctx = ctxs->queue_ctx;
2659
2660         return 0;
2661  fail:
2662         kfree(ctxs);
2663         return -ENOMEM;
2664 }
2665
2666 /*
2667  * It is the actual release handler for mq, but we do it from
2668  * request queue's release handler for avoiding use-after-free
2669  * and headache because q->mq_kobj shouldn't have been introduced,
2670  * but we can't group ctx/kctx kobj without it.
2671  */
2672 void blk_mq_release(struct request_queue *q)
2673 {
2674         struct blk_mq_hw_ctx *hctx, *next;
2675         int i;
2676
2677         cancel_delayed_work_sync(&q->requeue_work);
2678
2679         queue_for_each_hw_ctx(q, hctx, i)
2680                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2681
2682         /* all hctx are in .unused_hctx_list now */
2683         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
2684                 list_del_init(&hctx->hctx_list);
2685                 kobject_put(&hctx->kobj);
2686         }
2687
2688         kfree(q->queue_hw_ctx);
2689
2690         /*
2691          * release .mq_kobj and sw queue's kobject now because
2692          * both share lifetime with request queue.
2693          */
2694         blk_mq_sysfs_deinit(q);
2695 }
2696
2697 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2698 {
2699         struct request_queue *uninit_q, *q;
2700
2701         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2702         if (!uninit_q)
2703                 return ERR_PTR(-ENOMEM);
2704
2705         q = blk_mq_init_allocated_queue(set, uninit_q);
2706         if (IS_ERR(q))
2707                 blk_cleanup_queue(uninit_q);
2708
2709         return q;
2710 }
2711 EXPORT_SYMBOL(blk_mq_init_queue);
2712
2713 /*
2714  * Helper for setting up a queue with mq ops, given queue depth, and
2715  * the passed in mq ops flags.
2716  */
2717 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
2718                                            const struct blk_mq_ops *ops,
2719                                            unsigned int queue_depth,
2720                                            unsigned int set_flags)
2721 {
2722         struct request_queue *q;
2723         int ret;
2724
2725         memset(set, 0, sizeof(*set));
2726         set->ops = ops;
2727         set->nr_hw_queues = 1;
2728         set->nr_maps = 1;
2729         set->queue_depth = queue_depth;
2730         set->numa_node = NUMA_NO_NODE;
2731         set->flags = set_flags;
2732
2733         ret = blk_mq_alloc_tag_set(set);
2734         if (ret)
2735                 return ERR_PTR(ret);
2736
2737         q = blk_mq_init_queue(set);
2738         if (IS_ERR(q)) {
2739                 blk_mq_free_tag_set(set);
2740                 return q;
2741         }
2742
2743         return q;
2744 }
2745 EXPORT_SYMBOL(blk_mq_init_sq_queue);
2746
2747 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
2748                 struct blk_mq_tag_set *set, struct request_queue *q,
2749                 int hctx_idx, int node)
2750 {
2751         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
2752
2753         /* reuse dead hctx first */
2754         spin_lock(&q->unused_hctx_lock);
2755         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
2756                 if (tmp->numa_node == node) {
2757                         hctx = tmp;
2758                         break;
2759                 }
2760         }
2761         if (hctx)
2762                 list_del_init(&hctx->hctx_list);
2763         spin_unlock(&q->unused_hctx_lock);
2764
2765         if (!hctx)
2766                 hctx = blk_mq_alloc_hctx(q, set, node);
2767         if (!hctx)
2768                 goto fail;
2769
2770         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
2771                 goto free_hctx;
2772
2773         return hctx;
2774
2775  free_hctx:
2776         kobject_put(&hctx->kobj);
2777  fail:
2778         return NULL;
2779 }
2780
2781 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2782                                                 struct request_queue *q)
2783 {
2784         int i, j, end;
2785         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2786
2787         /* protect against switching io scheduler  */
2788         mutex_lock(&q->sysfs_lock);
2789         for (i = 0; i < set->nr_hw_queues; i++) {
2790                 int node;
2791                 struct blk_mq_hw_ctx *hctx;
2792
2793                 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
2794                 /*
2795                  * If the hw queue has been mapped to another numa node,
2796                  * we need to realloc the hctx. If allocation fails, fallback
2797                  * to use the previous one.
2798                  */
2799                 if (hctxs[i] && (hctxs[i]->numa_node == node))
2800                         continue;
2801
2802                 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
2803                 if (hctx) {
2804                         if (hctxs[i])
2805                                 blk_mq_exit_hctx(q, set, hctxs[i], i);
2806                         hctxs[i] = hctx;
2807                 } else {
2808                         if (hctxs[i])
2809                                 pr_warn("Allocate new hctx on node %d fails,\
2810                                                 fallback to previous one on node %d\n",
2811                                                 node, hctxs[i]->numa_node);
2812                         else
2813                                 break;
2814                 }
2815         }
2816         /*
2817          * Increasing nr_hw_queues fails. Free the newly allocated
2818          * hctxs and keep the previous q->nr_hw_queues.
2819          */
2820         if (i != set->nr_hw_queues) {
2821                 j = q->nr_hw_queues;
2822                 end = i;
2823         } else {
2824                 j = i;
2825                 end = q->nr_hw_queues;
2826                 q->nr_hw_queues = set->nr_hw_queues;
2827         }
2828
2829         for (; j < end; j++) {
2830                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2831
2832                 if (hctx) {
2833                         if (hctx->tags)
2834                                 blk_mq_free_map_and_requests(set, j);
2835                         blk_mq_exit_hctx(q, set, hctx, j);
2836                         hctxs[j] = NULL;
2837                 }
2838         }
2839         mutex_unlock(&q->sysfs_lock);
2840 }
2841
2842 /*
2843  * Maximum number of hardware queues we support. For single sets, we'll never
2844  * have more than the CPUs (software queues). For multiple sets, the tag_set
2845  * user may have set ->nr_hw_queues larger.
2846  */
2847 static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2848 {
2849         if (set->nr_maps == 1)
2850                 return nr_cpu_ids;
2851
2852         return max(set->nr_hw_queues, nr_cpu_ids);
2853 }
2854
2855 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2856                                                   struct request_queue *q)
2857 {
2858         /* mark the queue as mq asap */
2859         q->mq_ops = set->ops;
2860
2861         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2862                                              blk_mq_poll_stats_bkt,
2863                                              BLK_MQ_POLL_STATS_BKTS, q);
2864         if (!q->poll_cb)
2865                 goto err_exit;
2866
2867         if (blk_mq_alloc_ctxs(q))
2868                 goto err_exit;
2869
2870         /* init q->mq_kobj and sw queues' kobjects */
2871         blk_mq_sysfs_init(q);
2872
2873         q->nr_queues = nr_hw_queues(set);
2874         q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
2875                                                 GFP_KERNEL, set->numa_node);
2876         if (!q->queue_hw_ctx)
2877                 goto err_sys_init;
2878
2879         INIT_LIST_HEAD(&q->unused_hctx_list);
2880         spin_lock_init(&q->unused_hctx_lock);
2881
2882         blk_mq_realloc_hw_ctxs(set, q);
2883         if (!q->nr_hw_queues)
2884                 goto err_hctxs;
2885
2886         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2887         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2888
2889         q->tag_set = set;
2890
2891         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2892         if (set->nr_maps > HCTX_TYPE_POLL &&
2893             set->map[HCTX_TYPE_POLL].nr_queues)
2894                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2895
2896         q->sg_reserved_size = INT_MAX;
2897
2898         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2899         INIT_LIST_HEAD(&q->requeue_list);
2900         spin_lock_init(&q->requeue_lock);
2901
2902         blk_queue_make_request(q, blk_mq_make_request);
2903
2904         /*
2905          * Do this after blk_queue_make_request() overrides it...
2906          */
2907         q->nr_requests = set->queue_depth;
2908
2909         /*
2910          * Default to classic polling
2911          */
2912         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2913
2914         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2915         blk_mq_add_queue_tag_set(set, q);
2916         blk_mq_map_swqueue(q);
2917
2918         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2919                 int ret;
2920
2921                 ret = elevator_init_mq(q);
2922                 if (ret)
2923                         return ERR_PTR(ret);
2924         }
2925
2926         return q;
2927
2928 err_hctxs:
2929         kfree(q->queue_hw_ctx);
2930 err_sys_init:
2931         blk_mq_sysfs_deinit(q);
2932 err_exit:
2933         q->mq_ops = NULL;
2934         return ERR_PTR(-ENOMEM);
2935 }
2936 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2937
2938 /* tags can _not_ be used after returning from blk_mq_exit_queue */
2939 void blk_mq_exit_queue(struct request_queue *q)
2940 {
2941         struct blk_mq_tag_set   *set = q->tag_set;
2942
2943         blk_mq_del_queue_tag_set(q);
2944         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2945 }
2946
2947 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2948 {
2949         int i;
2950
2951         for (i = 0; i < set->nr_hw_queues; i++)
2952                 if (!__blk_mq_alloc_rq_map(set, i))
2953                         goto out_unwind;
2954
2955         return 0;
2956
2957 out_unwind:
2958         while (--i >= 0)
2959                 blk_mq_free_rq_map(set->tags[i]);
2960
2961         return -ENOMEM;
2962 }
2963
2964 /*
2965  * Allocate the request maps associated with this tag_set. Note that this
2966  * may reduce the depth asked for, if memory is tight. set->queue_depth
2967  * will be updated to reflect the allocated depth.
2968  */
2969 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2970 {
2971         unsigned int depth;
2972         int err;
2973
2974         depth = set->queue_depth;
2975         do {
2976                 err = __blk_mq_alloc_rq_maps(set);
2977                 if (!err)
2978                         break;
2979
2980                 set->queue_depth >>= 1;
2981                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2982                         err = -ENOMEM;
2983                         break;
2984                 }
2985         } while (set->queue_depth);
2986
2987         if (!set->queue_depth || err) {
2988                 pr_err("blk-mq: failed to allocate request map\n");
2989                 return -ENOMEM;
2990         }
2991
2992         if (depth != set->queue_depth)
2993                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2994                                                 depth, set->queue_depth);
2995
2996         return 0;
2997 }
2998
2999 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
3000 {
3001         if (set->ops->map_queues && !is_kdump_kernel()) {
3002                 int i;
3003
3004                 /*
3005                  * transport .map_queues is usually done in the following
3006                  * way:
3007                  *
3008                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3009                  *      mask = get_cpu_mask(queue)
3010                  *      for_each_cpu(cpu, mask)
3011                  *              set->map[x].mq_map[cpu] = queue;
3012                  * }
3013                  *
3014                  * When we need to remap, the table has to be cleared for
3015                  * killing stale mapping since one CPU may not be mapped
3016                  * to any hw queue.
3017                  */
3018                 for (i = 0; i < set->nr_maps; i++)
3019                         blk_mq_clear_mq_map(&set->map[i]);
3020
3021                 return set->ops->map_queues(set);
3022         } else {
3023                 BUG_ON(set->nr_maps > 1);
3024                 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3025         }
3026 }
3027
3028 /*
3029  * Alloc a tag set to be associated with one or more request queues.
3030  * May fail with EINVAL for various error conditions. May adjust the
3031  * requested depth down, if it's too large. In that case, the set
3032  * value will be stored in set->queue_depth.
3033  */
3034 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3035 {
3036         int i, ret;
3037
3038         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3039
3040         if (!set->nr_hw_queues)
3041                 return -EINVAL;
3042         if (!set->queue_depth)
3043                 return -EINVAL;
3044         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3045                 return -EINVAL;
3046
3047         if (!set->ops->queue_rq)
3048                 return -EINVAL;
3049
3050         if (!set->ops->get_budget ^ !set->ops->put_budget)
3051                 return -EINVAL;
3052
3053         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3054                 pr_info("blk-mq: reduced tag depth to %u\n",
3055                         BLK_MQ_MAX_DEPTH);
3056                 set->queue_depth = BLK_MQ_MAX_DEPTH;
3057         }
3058
3059         if (!set->nr_maps)
3060                 set->nr_maps = 1;
3061         else if (set->nr_maps > HCTX_MAX_TYPES)
3062                 return -EINVAL;
3063
3064         /*
3065          * If a crashdump is active, then we are potentially in a very
3066          * memory constrained environment. Limit us to 1 queue and
3067          * 64 tags to prevent using too much memory.
3068          */
3069         if (is_kdump_kernel()) {
3070                 set->nr_hw_queues = 1;
3071                 set->nr_maps = 1;
3072                 set->queue_depth = min(64U, set->queue_depth);
3073         }
3074         /*
3075          * There is no use for more h/w queues than cpus if we just have
3076          * a single map
3077          */
3078         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3079                 set->nr_hw_queues = nr_cpu_ids;
3080
3081         set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
3082                                  GFP_KERNEL, set->numa_node);
3083         if (!set->tags)
3084                 return -ENOMEM;
3085
3086         ret = -ENOMEM;
3087         for (i = 0; i < set->nr_maps; i++) {
3088                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3089                                                   sizeof(set->map[i].mq_map[0]),
3090                                                   GFP_KERNEL, set->numa_node);
3091                 if (!set->map[i].mq_map)
3092                         goto out_free_mq_map;
3093                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3094         }
3095
3096         ret = blk_mq_update_queue_map(set);
3097         if (ret)
3098                 goto out_free_mq_map;
3099
3100         ret = blk_mq_alloc_rq_maps(set);
3101         if (ret)
3102                 goto out_free_mq_map;
3103
3104         mutex_init(&set->tag_list_lock);
3105         INIT_LIST_HEAD(&set->tag_list);
3106
3107         return 0;
3108
3109 out_free_mq_map:
3110         for (i = 0; i < set->nr_maps; i++) {
3111                 kfree(set->map[i].mq_map);
3112                 set->map[i].mq_map = NULL;
3113         }
3114         kfree(set->tags);
3115         set->tags = NULL;
3116         return ret;
3117 }
3118 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3119
3120 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3121 {
3122         int i, j;
3123
3124         for (i = 0; i < nr_hw_queues(set); i++)
3125                 blk_mq_free_map_and_requests(set, i);
3126
3127         for (j = 0; j < set->nr_maps; j++) {
3128                 kfree(set->map[j].mq_map);
3129                 set->map[j].mq_map = NULL;
3130         }
3131
3132         kfree(set->tags);
3133         set->tags = NULL;
3134 }
3135 EXPORT_SYMBOL(blk_mq_free_tag_set);
3136
3137 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3138 {
3139         struct blk_mq_tag_set *set = q->tag_set;
3140         struct blk_mq_hw_ctx *hctx;
3141         int i, ret;
3142
3143         if (!set)
3144                 return -EINVAL;
3145
3146         if (q->nr_requests == nr)
3147                 return 0;
3148
3149         blk_mq_freeze_queue(q);
3150         blk_mq_quiesce_queue(q);
3151
3152         ret = 0;
3153         queue_for_each_hw_ctx(q, hctx, i) {
3154                 if (!hctx->tags)
3155                         continue;
3156                 /*
3157                  * If we're using an MQ scheduler, just update the scheduler
3158                  * queue depth. This is similar to what the old code would do.
3159                  */
3160                 if (!hctx->sched_tags) {
3161                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3162                                                         false);
3163                 } else {
3164                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3165                                                         nr, true);
3166                 }
3167                 if (ret)
3168                         break;
3169                 if (q->elevator && q->elevator->type->ops.depth_updated)
3170                         q->elevator->type->ops.depth_updated(hctx);
3171         }
3172
3173         if (!ret)
3174                 q->nr_requests = nr;
3175
3176         blk_mq_unquiesce_queue(q);
3177         blk_mq_unfreeze_queue(q);
3178
3179         return ret;
3180 }
3181
3182 /*
3183  * request_queue and elevator_type pair.
3184  * It is just used by __blk_mq_update_nr_hw_queues to cache
3185  * the elevator_type associated with a request_queue.
3186  */
3187 struct blk_mq_qe_pair {
3188         struct list_head node;
3189         struct request_queue *q;
3190         struct elevator_type *type;
3191 };
3192
3193 /*
3194  * Cache the elevator_type in qe pair list and switch the
3195  * io scheduler to 'none'
3196  */
3197 static bool blk_mq_elv_switch_none(struct list_head *head,
3198                 struct request_queue *q)
3199 {
3200         struct blk_mq_qe_pair *qe;
3201
3202         if (!q->elevator)
3203                 return true;
3204
3205         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3206         if (!qe)
3207                 return false;
3208
3209         INIT_LIST_HEAD(&qe->node);
3210         qe->q = q;
3211         qe->type = q->elevator->type;
3212         list_add(&qe->node, head);
3213
3214         mutex_lock(&q->sysfs_lock);
3215         /*
3216          * After elevator_switch_mq, the previous elevator_queue will be
3217          * released by elevator_release. The reference of the io scheduler
3218          * module get by elevator_get will also be put. So we need to get
3219          * a reference of the io scheduler module here to prevent it to be
3220          * removed.
3221          */
3222         __module_get(qe->type->elevator_owner);
3223         elevator_switch_mq(q, NULL);
3224         mutex_unlock(&q->sysfs_lock);
3225
3226         return true;
3227 }
3228
3229 static void blk_mq_elv_switch_back(struct list_head *head,
3230                 struct request_queue *q)
3231 {
3232         struct blk_mq_qe_pair *qe;
3233         struct elevator_type *t = NULL;
3234
3235         list_for_each_entry(qe, head, node)
3236                 if (qe->q == q) {
3237                         t = qe->type;
3238                         break;
3239                 }
3240
3241         if (!t)
3242                 return;
3243
3244         list_del(&qe->node);
3245         kfree(qe);
3246
3247         mutex_lock(&q->sysfs_lock);
3248         elevator_switch_mq(q, t);
3249         mutex_unlock(&q->sysfs_lock);
3250 }
3251
3252 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3253                                                         int nr_hw_queues)
3254 {
3255         struct request_queue *q;
3256         LIST_HEAD(head);
3257         int prev_nr_hw_queues;
3258
3259         lockdep_assert_held(&set->tag_list_lock);
3260
3261         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3262                 nr_hw_queues = nr_cpu_ids;
3263         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
3264                 return;
3265
3266         list_for_each_entry(q, &set->tag_list, tag_set_list)
3267                 blk_mq_freeze_queue(q);
3268         /*
3269          * Sync with blk_mq_queue_tag_busy_iter.
3270          */
3271         synchronize_rcu();
3272         /*
3273          * Switch IO scheduler to 'none', cleaning up the data associated
3274          * with the previous scheduler. We will switch back once we are done
3275          * updating the new sw to hw queue mappings.
3276          */
3277         list_for_each_entry(q, &set->tag_list, tag_set_list)
3278                 if (!blk_mq_elv_switch_none(&head, q))
3279                         goto switch_back;
3280
3281         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3282                 blk_mq_debugfs_unregister_hctxs(q);
3283                 blk_mq_sysfs_unregister(q);
3284         }
3285
3286         prev_nr_hw_queues = set->nr_hw_queues;
3287         set->nr_hw_queues = nr_hw_queues;
3288         blk_mq_update_queue_map(set);
3289 fallback:
3290         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3291                 blk_mq_realloc_hw_ctxs(set, q);
3292                 if (q->nr_hw_queues != set->nr_hw_queues) {
3293                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3294                                         nr_hw_queues, prev_nr_hw_queues);
3295                         set->nr_hw_queues = prev_nr_hw_queues;
3296                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3297                         goto fallback;
3298                 }
3299                 blk_mq_map_swqueue(q);
3300         }
3301
3302         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3303                 blk_mq_sysfs_register(q);
3304                 blk_mq_debugfs_register_hctxs(q);
3305         }
3306
3307 switch_back:
3308         list_for_each_entry(q, &set->tag_list, tag_set_list)
3309                 blk_mq_elv_switch_back(&head, q);
3310
3311         list_for_each_entry(q, &set->tag_list, tag_set_list)
3312                 blk_mq_unfreeze_queue(q);
3313 }
3314
3315 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3316 {
3317         mutex_lock(&set->tag_list_lock);
3318         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3319         mutex_unlock(&set->tag_list_lock);
3320 }
3321 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3322
3323 /* Enable polling stats and return whether they were already enabled. */
3324 static bool blk_poll_stats_enable(struct request_queue *q)
3325 {
3326         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3327             blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3328                 return true;
3329         blk_stat_add_callback(q, q->poll_cb);
3330         return false;
3331 }
3332
3333 static void blk_mq_poll_stats_start(struct request_queue *q)
3334 {
3335         /*
3336          * We don't arm the callback if polling stats are not enabled or the
3337          * callback is already active.
3338          */
3339         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3340             blk_stat_is_active(q->poll_cb))
3341                 return;
3342
3343         blk_stat_activate_msecs(q->poll_cb, 100);
3344 }
3345
3346 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3347 {
3348         struct request_queue *q = cb->data;
3349         int bucket;
3350
3351         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3352                 if (cb->stat[bucket].nr_samples)
3353                         q->poll_stat[bucket] = cb->stat[bucket];
3354         }
3355 }
3356
3357 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3358                                        struct blk_mq_hw_ctx *hctx,
3359                                        struct request *rq)
3360 {
3361         unsigned long ret = 0;
3362         int bucket;
3363
3364         /*
3365          * If stats collection isn't on, don't sleep but turn it on for
3366          * future users
3367          */
3368         if (!blk_poll_stats_enable(q))
3369                 return 0;
3370
3371         /*
3372          * As an optimistic guess, use half of the mean service time
3373          * for this type of request. We can (and should) make this smarter.
3374          * For instance, if the completion latencies are tight, we can
3375          * get closer than just half the mean. This is especially
3376          * important on devices where the completion latencies are longer
3377          * than ~10 usec. We do use the stats for the relevant IO size
3378          * if available which does lead to better estimates.
3379          */
3380         bucket = blk_mq_poll_stats_bkt(rq);
3381         if (bucket < 0)
3382                 return ret;
3383
3384         if (q->poll_stat[bucket].nr_samples)
3385                 ret = (q->poll_stat[bucket].mean + 1) / 2;
3386
3387         return ret;
3388 }
3389
3390 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3391                                      struct blk_mq_hw_ctx *hctx,
3392                                      struct request *rq)
3393 {
3394         struct hrtimer_sleeper hs;
3395         enum hrtimer_mode mode;
3396         unsigned int nsecs;
3397         ktime_t kt;
3398
3399         if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3400                 return false;
3401
3402         /*
3403          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3404          *
3405          *  0:  use half of prev avg
3406          * >0:  use this specific value
3407          */
3408         if (q->poll_nsec > 0)
3409                 nsecs = q->poll_nsec;
3410         else
3411                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3412
3413         if (!nsecs)
3414                 return false;
3415
3416         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3417
3418         /*
3419          * This will be replaced with the stats tracking code, using
3420          * 'avg_completion_time / 2' as the pre-sleep target.
3421          */
3422         kt = nsecs;
3423
3424         mode = HRTIMER_MODE_REL;
3425         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3426         hrtimer_set_expires(&hs.timer, kt);
3427
3428         hrtimer_init_sleeper(&hs, current);
3429         do {
3430                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3431                         break;
3432                 set_current_state(TASK_UNINTERRUPTIBLE);
3433                 hrtimer_start_expires(&hs.timer, mode);
3434                 if (hs.task)
3435                         io_schedule();
3436                 hrtimer_cancel(&hs.timer);
3437                 mode = HRTIMER_MODE_ABS;
3438         } while (hs.task && !signal_pending(current));
3439
3440         __set_current_state(TASK_RUNNING);
3441         destroy_hrtimer_on_stack(&hs.timer);
3442         return true;
3443 }
3444
3445 static bool blk_mq_poll_hybrid(struct request_queue *q,
3446                                struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3447 {
3448         struct request *rq;
3449
3450         if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3451                 return false;
3452
3453         if (!blk_qc_t_is_internal(cookie))
3454                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3455         else {
3456                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3457                 /*
3458                  * With scheduling, if the request has completed, we'll
3459                  * get a NULL return here, as we clear the sched tag when
3460                  * that happens. The request still remains valid, like always,
3461                  * so we should be safe with just the NULL check.
3462                  */
3463                 if (!rq)
3464                         return false;
3465         }
3466
3467         return blk_mq_poll_hybrid_sleep(q, hctx, rq);
3468 }
3469
3470 /**
3471  * blk_poll - poll for IO completions
3472  * @q:  the queue
3473  * @cookie: cookie passed back at IO submission time
3474  * @spin: whether to spin for completions
3475  *
3476  * Description:
3477  *    Poll for completions on the passed in queue. Returns number of
3478  *    completed entries found. If @spin is true, then blk_poll will continue
3479  *    looping until at least one completion is found, unless the task is
3480  *    otherwise marked running (or we need to reschedule).
3481  */
3482 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3483 {
3484         struct blk_mq_hw_ctx *hctx;
3485         long state;
3486
3487         if (!blk_qc_t_valid(cookie) ||
3488             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3489                 return 0;
3490
3491         if (current->plug)
3492                 blk_flush_plug_list(current->plug, false);
3493
3494         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3495
3496         /*
3497          * If we sleep, have the caller restart the poll loop to reset
3498          * the state. Like for the other success return cases, the
3499          * caller is responsible for checking if the IO completed. If
3500          * the IO isn't complete, we'll get called again and will go
3501          * straight to the busy poll loop.
3502          */
3503         if (blk_mq_poll_hybrid(q, hctx, cookie))
3504                 return 1;
3505
3506         hctx->poll_considered++;
3507
3508         state = current->state;
3509         do {
3510                 int ret;
3511
3512                 hctx->poll_invoked++;
3513
3514                 ret = q->mq_ops->poll(hctx);
3515                 if (ret > 0) {
3516                         hctx->poll_success++;
3517                         __set_current_state(TASK_RUNNING);
3518                         return ret;
3519                 }
3520
3521                 if (signal_pending_state(state, current))
3522                         __set_current_state(TASK_RUNNING);
3523
3524                 if (current->state == TASK_RUNNING)
3525                         return 1;
3526                 if (ret < 0 || !spin)
3527                         break;
3528                 cpu_relax();
3529         } while (!need_resched());
3530
3531         __set_current_state(TASK_RUNNING);
3532         return 0;
3533 }
3534 EXPORT_SYMBOL_GPL(blk_poll);
3535
3536 unsigned int blk_mq_rq_cpu(struct request *rq)
3537 {
3538         return rq->mq_ctx->cpu;
3539 }
3540 EXPORT_SYMBOL(blk_mq_rq_cpu);
3541
3542 static int __init blk_mq_init(void)
3543 {
3544         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3545                                 blk_mq_hctx_notify_dead);
3546         return 0;
3547 }
3548 subsys_initcall(blk_mq_init);