block: enable batched allocation for blk_mq_alloc_request()
[platform/kernel/linux-starfive.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/blk-mq.h>
36 #include <linux/t10-pi.h>
37 #include "blk.h"
38 #include "blk-mq.h"
39 #include "blk-mq-debugfs.h"
40 #include "blk-mq-tag.h"
41 #include "blk-pm.h"
42 #include "blk-stat.h"
43 #include "blk-mq-sched.h"
44 #include "blk-rq-qos.h"
45 #include "blk-ioprio.h"
46
47 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
48
49 static void blk_mq_poll_stats_start(struct request_queue *q);
50 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
51
52 static int blk_mq_poll_stats_bkt(const struct request *rq)
53 {
54         int ddir, sectors, bucket;
55
56         ddir = rq_data_dir(rq);
57         sectors = blk_rq_stats_sectors(rq);
58
59         bucket = ddir + 2 * ilog2(sectors);
60
61         if (bucket < 0)
62                 return -1;
63         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
64                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
65
66         return bucket;
67 }
68
69 #define BLK_QC_T_SHIFT          16
70 #define BLK_QC_T_INTERNAL       (1U << 31)
71
72 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
73                 blk_qc_t qc)
74 {
75         return xa_load(&q->hctx_table,
76                         (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
77 }
78
79 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
80                 blk_qc_t qc)
81 {
82         unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
83
84         if (qc & BLK_QC_T_INTERNAL)
85                 return blk_mq_tag_to_rq(hctx->sched_tags, tag);
86         return blk_mq_tag_to_rq(hctx->tags, tag);
87 }
88
89 static inline blk_qc_t blk_rq_to_qc(struct request *rq)
90 {
91         return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
92                 (rq->tag != -1 ?
93                  rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
94 }
95
96 /*
97  * Check if any of the ctx, dispatch list or elevator
98  * have pending work in this hardware queue.
99  */
100 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
101 {
102         return !list_empty_careful(&hctx->dispatch) ||
103                 sbitmap_any_bit_set(&hctx->ctx_map) ||
104                         blk_mq_sched_has_work(hctx);
105 }
106
107 /*
108  * Mark this ctx as having pending work in this hardware queue
109  */
110 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
111                                      struct blk_mq_ctx *ctx)
112 {
113         const int bit = ctx->index_hw[hctx->type];
114
115         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
116                 sbitmap_set_bit(&hctx->ctx_map, bit);
117 }
118
119 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
120                                       struct blk_mq_ctx *ctx)
121 {
122         const int bit = ctx->index_hw[hctx->type];
123
124         sbitmap_clear_bit(&hctx->ctx_map, bit);
125 }
126
127 struct mq_inflight {
128         struct block_device *part;
129         unsigned int inflight[2];
130 };
131
132 static bool blk_mq_check_inflight(struct request *rq, void *priv)
133 {
134         struct mq_inflight *mi = priv;
135
136         if (rq->part && blk_do_io_stat(rq) &&
137             (!mi->part->bd_partno || rq->part == mi->part) &&
138             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
139                 mi->inflight[rq_data_dir(rq)]++;
140
141         return true;
142 }
143
144 unsigned int blk_mq_in_flight(struct request_queue *q,
145                 struct block_device *part)
146 {
147         struct mq_inflight mi = { .part = part };
148
149         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
150
151         return mi.inflight[0] + mi.inflight[1];
152 }
153
154 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
155                 unsigned int inflight[2])
156 {
157         struct mq_inflight mi = { .part = part };
158
159         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
160         inflight[0] = mi.inflight[0];
161         inflight[1] = mi.inflight[1];
162 }
163
164 void blk_freeze_queue_start(struct request_queue *q)
165 {
166         mutex_lock(&q->mq_freeze_lock);
167         if (++q->mq_freeze_depth == 1) {
168                 percpu_ref_kill(&q->q_usage_counter);
169                 mutex_unlock(&q->mq_freeze_lock);
170                 if (queue_is_mq(q))
171                         blk_mq_run_hw_queues(q, false);
172         } else {
173                 mutex_unlock(&q->mq_freeze_lock);
174         }
175 }
176 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
177
178 void blk_mq_freeze_queue_wait(struct request_queue *q)
179 {
180         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
181 }
182 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
183
184 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
185                                      unsigned long timeout)
186 {
187         return wait_event_timeout(q->mq_freeze_wq,
188                                         percpu_ref_is_zero(&q->q_usage_counter),
189                                         timeout);
190 }
191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
192
193 /*
194  * Guarantee no request is in use, so we can change any data structure of
195  * the queue afterward.
196  */
197 void blk_freeze_queue(struct request_queue *q)
198 {
199         /*
200          * In the !blk_mq case we are only calling this to kill the
201          * q_usage_counter, otherwise this increases the freeze depth
202          * and waits for it to return to zero.  For this reason there is
203          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
204          * exported to drivers as the only user for unfreeze is blk_mq.
205          */
206         blk_freeze_queue_start(q);
207         blk_mq_freeze_queue_wait(q);
208 }
209
210 void blk_mq_freeze_queue(struct request_queue *q)
211 {
212         /*
213          * ...just an alias to keep freeze and unfreeze actions balanced
214          * in the blk_mq_* namespace
215          */
216         blk_freeze_queue(q);
217 }
218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
219
220 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
221 {
222         mutex_lock(&q->mq_freeze_lock);
223         if (force_atomic)
224                 q->q_usage_counter.data->force_atomic = true;
225         q->mq_freeze_depth--;
226         WARN_ON_ONCE(q->mq_freeze_depth < 0);
227         if (!q->mq_freeze_depth) {
228                 percpu_ref_resurrect(&q->q_usage_counter);
229                 wake_up_all(&q->mq_freeze_wq);
230         }
231         mutex_unlock(&q->mq_freeze_lock);
232 }
233
234 void blk_mq_unfreeze_queue(struct request_queue *q)
235 {
236         __blk_mq_unfreeze_queue(q, false);
237 }
238 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
239
240 /*
241  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
242  * mpt3sas driver such that this function can be removed.
243  */
244 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
245 {
246         unsigned long flags;
247
248         spin_lock_irqsave(&q->queue_lock, flags);
249         if (!q->quiesce_depth++)
250                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
251         spin_unlock_irqrestore(&q->queue_lock, flags);
252 }
253 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
254
255 /**
256  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
257  * @q: request queue.
258  *
259  * Note: it is driver's responsibility for making sure that quiesce has
260  * been started.
261  */
262 void blk_mq_wait_quiesce_done(struct request_queue *q)
263 {
264         if (blk_queue_has_srcu(q))
265                 synchronize_srcu(q->srcu);
266         else
267                 synchronize_rcu();
268 }
269 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
270
271 /**
272  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
273  * @q: request queue.
274  *
275  * Note: this function does not prevent that the struct request end_io()
276  * callback function is invoked. Once this function is returned, we make
277  * sure no dispatch can happen until the queue is unquiesced via
278  * blk_mq_unquiesce_queue().
279  */
280 void blk_mq_quiesce_queue(struct request_queue *q)
281 {
282         blk_mq_quiesce_queue_nowait(q);
283         blk_mq_wait_quiesce_done(q);
284 }
285 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
286
287 /*
288  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
289  * @q: request queue.
290  *
291  * This function recovers queue into the state before quiescing
292  * which is done by blk_mq_quiesce_queue.
293  */
294 void blk_mq_unquiesce_queue(struct request_queue *q)
295 {
296         unsigned long flags;
297         bool run_queue = false;
298
299         spin_lock_irqsave(&q->queue_lock, flags);
300         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
301                 ;
302         } else if (!--q->quiesce_depth) {
303                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
304                 run_queue = true;
305         }
306         spin_unlock_irqrestore(&q->queue_lock, flags);
307
308         /* dispatch requests which are inserted during quiescing */
309         if (run_queue)
310                 blk_mq_run_hw_queues(q, true);
311 }
312 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
313
314 void blk_mq_wake_waiters(struct request_queue *q)
315 {
316         struct blk_mq_hw_ctx *hctx;
317         unsigned long i;
318
319         queue_for_each_hw_ctx(q, hctx, i)
320                 if (blk_mq_hw_queue_mapped(hctx))
321                         blk_mq_tag_wakeup_all(hctx->tags, true);
322 }
323
324 void blk_rq_init(struct request_queue *q, struct request *rq)
325 {
326         memset(rq, 0, sizeof(*rq));
327
328         INIT_LIST_HEAD(&rq->queuelist);
329         rq->q = q;
330         rq->__sector = (sector_t) -1;
331         INIT_HLIST_NODE(&rq->hash);
332         RB_CLEAR_NODE(&rq->rb_node);
333         rq->tag = BLK_MQ_NO_TAG;
334         rq->internal_tag = BLK_MQ_NO_TAG;
335         rq->start_time_ns = ktime_get_ns();
336         rq->part = NULL;
337         blk_crypto_rq_set_defaults(rq);
338 }
339 EXPORT_SYMBOL(blk_rq_init);
340
341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
342                 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
343 {
344         struct blk_mq_ctx *ctx = data->ctx;
345         struct blk_mq_hw_ctx *hctx = data->hctx;
346         struct request_queue *q = data->q;
347         struct request *rq = tags->static_rqs[tag];
348
349         rq->q = q;
350         rq->mq_ctx = ctx;
351         rq->mq_hctx = hctx;
352         rq->cmd_flags = data->cmd_flags;
353
354         if (data->flags & BLK_MQ_REQ_PM)
355                 data->rq_flags |= RQF_PM;
356         if (blk_queue_io_stat(q))
357                 data->rq_flags |= RQF_IO_STAT;
358         rq->rq_flags = data->rq_flags;
359
360         if (!(data->rq_flags & RQF_ELV)) {
361                 rq->tag = tag;
362                 rq->internal_tag = BLK_MQ_NO_TAG;
363         } else {
364                 rq->tag = BLK_MQ_NO_TAG;
365                 rq->internal_tag = tag;
366         }
367         rq->timeout = 0;
368
369         if (blk_mq_need_time_stamp(rq))
370                 rq->start_time_ns = ktime_get_ns();
371         else
372                 rq->start_time_ns = 0;
373         rq->part = NULL;
374 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
375         rq->alloc_time_ns = alloc_time_ns;
376 #endif
377         rq->io_start_time_ns = 0;
378         rq->stats_sectors = 0;
379         rq->nr_phys_segments = 0;
380 #if defined(CONFIG_BLK_DEV_INTEGRITY)
381         rq->nr_integrity_segments = 0;
382 #endif
383         rq->end_io = NULL;
384         rq->end_io_data = NULL;
385
386         blk_crypto_rq_set_defaults(rq);
387         INIT_LIST_HEAD(&rq->queuelist);
388         /* tag was already set */
389         WRITE_ONCE(rq->deadline, 0);
390         req_ref_set(rq, 1);
391
392         if (rq->rq_flags & RQF_ELV) {
393                 struct elevator_queue *e = data->q->elevator;
394
395                 INIT_HLIST_NODE(&rq->hash);
396                 RB_CLEAR_NODE(&rq->rb_node);
397
398                 if (!op_is_flush(data->cmd_flags) &&
399                     e->type->ops.prepare_request) {
400                         e->type->ops.prepare_request(rq);
401                         rq->rq_flags |= RQF_ELVPRIV;
402                 }
403         }
404
405         return rq;
406 }
407
408 static inline struct request *
409 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
410                 u64 alloc_time_ns)
411 {
412         unsigned int tag, tag_offset;
413         struct blk_mq_tags *tags;
414         struct request *rq;
415         unsigned long tag_mask;
416         int i, nr = 0;
417
418         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
419         if (unlikely(!tag_mask))
420                 return NULL;
421
422         tags = blk_mq_tags_from_data(data);
423         for (i = 0; tag_mask; i++) {
424                 if (!(tag_mask & (1UL << i)))
425                         continue;
426                 tag = tag_offset + i;
427                 prefetch(tags->static_rqs[tag]);
428                 tag_mask &= ~(1UL << i);
429                 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
430                 rq_list_add(data->cached_rq, rq);
431                 nr++;
432         }
433         /* caller already holds a reference, add for remainder */
434         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
435         data->nr_tags -= nr;
436
437         return rq_list_pop(data->cached_rq);
438 }
439
440 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
441 {
442         struct request_queue *q = data->q;
443         u64 alloc_time_ns = 0;
444         struct request *rq;
445         unsigned int tag;
446
447         /* alloc_time includes depth and tag waits */
448         if (blk_queue_rq_alloc_time(q))
449                 alloc_time_ns = ktime_get_ns();
450
451         if (data->cmd_flags & REQ_NOWAIT)
452                 data->flags |= BLK_MQ_REQ_NOWAIT;
453
454         if (q->elevator) {
455                 struct elevator_queue *e = q->elevator;
456
457                 data->rq_flags |= RQF_ELV;
458
459                 /*
460                  * Flush/passthrough requests are special and go directly to the
461                  * dispatch list. Don't include reserved tags in the
462                  * limiting, as it isn't useful.
463                  */
464                 if (!op_is_flush(data->cmd_flags) &&
465                     !blk_op_is_passthrough(data->cmd_flags) &&
466                     e->type->ops.limit_depth &&
467                     !(data->flags & BLK_MQ_REQ_RESERVED))
468                         e->type->ops.limit_depth(data->cmd_flags, data);
469         }
470
471 retry:
472         data->ctx = blk_mq_get_ctx(q);
473         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
474         if (!(data->rq_flags & RQF_ELV))
475                 blk_mq_tag_busy(data->hctx);
476
477         if (data->flags & BLK_MQ_REQ_RESERVED)
478                 data->rq_flags |= RQF_RESV;
479
480         /*
481          * Try batched alloc if we want more than 1 tag.
482          */
483         if (data->nr_tags > 1) {
484                 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
485                 if (rq)
486                         return rq;
487                 data->nr_tags = 1;
488         }
489
490         /*
491          * Waiting allocations only fail because of an inactive hctx.  In that
492          * case just retry the hctx assignment and tag allocation as CPU hotplug
493          * should have migrated us to an online CPU by now.
494          */
495         tag = blk_mq_get_tag(data);
496         if (tag == BLK_MQ_NO_TAG) {
497                 if (data->flags & BLK_MQ_REQ_NOWAIT)
498                         return NULL;
499                 /*
500                  * Give up the CPU and sleep for a random short time to
501                  * ensure that thread using a realtime scheduling class
502                  * are migrated off the CPU, and thus off the hctx that
503                  * is going away.
504                  */
505                 msleep(3);
506                 goto retry;
507         }
508
509         return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
510                                         alloc_time_ns);
511 }
512
513 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
514                                             struct blk_plug *plug,
515                                             blk_opf_t opf,
516                                             blk_mq_req_flags_t flags)
517 {
518         struct blk_mq_alloc_data data = {
519                 .q              = q,
520                 .flags          = flags,
521                 .cmd_flags      = opf,
522                 .nr_tags        = plug->nr_ios,
523                 .cached_rq      = &plug->cached_rq,
524         };
525         struct request *rq;
526
527         if (blk_queue_enter(q, flags))
528                 return NULL;
529
530         plug->nr_ios = 1;
531
532         rq = __blk_mq_alloc_requests(&data);
533         if (unlikely(!rq))
534                 blk_queue_exit(q);
535         return rq;
536 }
537
538 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
539                                                    blk_opf_t opf,
540                                                    blk_mq_req_flags_t flags)
541 {
542         struct blk_plug *plug = current->plug;
543         struct request *rq;
544
545         if (!plug)
546                 return NULL;
547         if (rq_list_empty(plug->cached_rq)) {
548                 if (plug->nr_ios == 1)
549                         return NULL;
550                 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
551                 if (rq)
552                         goto got_it;
553                 return NULL;
554         }
555         rq = rq_list_peek(&plug->cached_rq);
556         if (!rq || rq->q != q)
557                 return NULL;
558
559         if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
560                 return NULL;
561         if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
562                 return NULL;
563
564         plug->cached_rq = rq_list_next(rq);
565 got_it:
566         rq->cmd_flags = opf;
567         INIT_LIST_HEAD(&rq->queuelist);
568         return rq;
569 }
570
571 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
572                 blk_mq_req_flags_t flags)
573 {
574         struct request *rq;
575
576         rq = blk_mq_alloc_cached_request(q, opf, flags);
577         if (!rq) {
578                 struct blk_mq_alloc_data data = {
579                         .q              = q,
580                         .flags          = flags,
581                         .cmd_flags      = opf,
582                         .nr_tags        = 1,
583                 };
584                 int ret;
585
586                 ret = blk_queue_enter(q, flags);
587                 if (ret)
588                         return ERR_PTR(ret);
589
590                 rq = __blk_mq_alloc_requests(&data);
591                 if (!rq)
592                         goto out_queue_exit;
593         }
594         rq->__data_len = 0;
595         rq->__sector = (sector_t) -1;
596         rq->bio = rq->biotail = NULL;
597         return rq;
598 out_queue_exit:
599         blk_queue_exit(q);
600         return ERR_PTR(-EWOULDBLOCK);
601 }
602 EXPORT_SYMBOL(blk_mq_alloc_request);
603
604 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
605         blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
606 {
607         struct blk_mq_alloc_data data = {
608                 .q              = q,
609                 .flags          = flags,
610                 .cmd_flags      = opf,
611                 .nr_tags        = 1,
612         };
613         u64 alloc_time_ns = 0;
614         unsigned int cpu;
615         unsigned int tag;
616         int ret;
617
618         /* alloc_time includes depth and tag waits */
619         if (blk_queue_rq_alloc_time(q))
620                 alloc_time_ns = ktime_get_ns();
621
622         /*
623          * If the tag allocator sleeps we could get an allocation for a
624          * different hardware context.  No need to complicate the low level
625          * allocator for this for the rare use case of a command tied to
626          * a specific queue.
627          */
628         if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
629                 return ERR_PTR(-EINVAL);
630
631         if (hctx_idx >= q->nr_hw_queues)
632                 return ERR_PTR(-EIO);
633
634         ret = blk_queue_enter(q, flags);
635         if (ret)
636                 return ERR_PTR(ret);
637
638         /*
639          * Check if the hardware context is actually mapped to anything.
640          * If not tell the caller that it should skip this queue.
641          */
642         ret = -EXDEV;
643         data.hctx = xa_load(&q->hctx_table, hctx_idx);
644         if (!blk_mq_hw_queue_mapped(data.hctx))
645                 goto out_queue_exit;
646         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
647         if (cpu >= nr_cpu_ids)
648                 goto out_queue_exit;
649         data.ctx = __blk_mq_get_ctx(q, cpu);
650
651         if (!q->elevator)
652                 blk_mq_tag_busy(data.hctx);
653         else
654                 data.rq_flags |= RQF_ELV;
655
656         if (flags & BLK_MQ_REQ_RESERVED)
657                 data.rq_flags |= RQF_RESV;
658
659         ret = -EWOULDBLOCK;
660         tag = blk_mq_get_tag(&data);
661         if (tag == BLK_MQ_NO_TAG)
662                 goto out_queue_exit;
663         return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
664                                         alloc_time_ns);
665
666 out_queue_exit:
667         blk_queue_exit(q);
668         return ERR_PTR(ret);
669 }
670 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
671
672 static void __blk_mq_free_request(struct request *rq)
673 {
674         struct request_queue *q = rq->q;
675         struct blk_mq_ctx *ctx = rq->mq_ctx;
676         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
677         const int sched_tag = rq->internal_tag;
678
679         blk_crypto_free_request(rq);
680         blk_pm_mark_last_busy(rq);
681         rq->mq_hctx = NULL;
682         if (rq->tag != BLK_MQ_NO_TAG)
683                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
684         if (sched_tag != BLK_MQ_NO_TAG)
685                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
686         blk_mq_sched_restart(hctx);
687         blk_queue_exit(q);
688 }
689
690 void blk_mq_free_request(struct request *rq)
691 {
692         struct request_queue *q = rq->q;
693         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
694
695         if ((rq->rq_flags & RQF_ELVPRIV) &&
696             q->elevator->type->ops.finish_request)
697                 q->elevator->type->ops.finish_request(rq);
698
699         if (rq->rq_flags & RQF_MQ_INFLIGHT)
700                 __blk_mq_dec_active_requests(hctx);
701
702         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
703                 laptop_io_completion(q->disk->bdi);
704
705         rq_qos_done(q, rq);
706
707         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
708         if (req_ref_put_and_test(rq))
709                 __blk_mq_free_request(rq);
710 }
711 EXPORT_SYMBOL_GPL(blk_mq_free_request);
712
713 void blk_mq_free_plug_rqs(struct blk_plug *plug)
714 {
715         struct request *rq;
716
717         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
718                 blk_mq_free_request(rq);
719 }
720
721 void blk_dump_rq_flags(struct request *rq, char *msg)
722 {
723         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
724                 rq->q->disk ? rq->q->disk->disk_name : "?",
725                 (__force unsigned long long) rq->cmd_flags);
726
727         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
728                (unsigned long long)blk_rq_pos(rq),
729                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
730         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
731                rq->bio, rq->biotail, blk_rq_bytes(rq));
732 }
733 EXPORT_SYMBOL(blk_dump_rq_flags);
734
735 static void req_bio_endio(struct request *rq, struct bio *bio,
736                           unsigned int nbytes, blk_status_t error)
737 {
738         if (unlikely(error)) {
739                 bio->bi_status = error;
740         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
741                 /*
742                  * Partial zone append completions cannot be supported as the
743                  * BIO fragments may end up not being written sequentially.
744                  */
745                 if (bio->bi_iter.bi_size != nbytes)
746                         bio->bi_status = BLK_STS_IOERR;
747                 else
748                         bio->bi_iter.bi_sector = rq->__sector;
749         }
750
751         bio_advance(bio, nbytes);
752
753         if (unlikely(rq->rq_flags & RQF_QUIET))
754                 bio_set_flag(bio, BIO_QUIET);
755         /* don't actually finish bio if it's part of flush sequence */
756         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
757                 bio_endio(bio);
758 }
759
760 static void blk_account_io_completion(struct request *req, unsigned int bytes)
761 {
762         if (req->part && blk_do_io_stat(req)) {
763                 const int sgrp = op_stat_group(req_op(req));
764
765                 part_stat_lock();
766                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
767                 part_stat_unlock();
768         }
769 }
770
771 static void blk_print_req_error(struct request *req, blk_status_t status)
772 {
773         printk_ratelimited(KERN_ERR
774                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
775                 "phys_seg %u prio class %u\n",
776                 blk_status_to_str(status),
777                 req->q->disk ? req->q->disk->disk_name : "?",
778                 blk_rq_pos(req), (__force u32)req_op(req),
779                 blk_op_str(req_op(req)),
780                 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
781                 req->nr_phys_segments,
782                 IOPRIO_PRIO_CLASS(req->ioprio));
783 }
784
785 /*
786  * Fully end IO on a request. Does not support partial completions, or
787  * errors.
788  */
789 static void blk_complete_request(struct request *req)
790 {
791         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
792         int total_bytes = blk_rq_bytes(req);
793         struct bio *bio = req->bio;
794
795         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
796
797         if (!bio)
798                 return;
799
800 #ifdef CONFIG_BLK_DEV_INTEGRITY
801         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
802                 req->q->integrity.profile->complete_fn(req, total_bytes);
803 #endif
804
805         blk_account_io_completion(req, total_bytes);
806
807         do {
808                 struct bio *next = bio->bi_next;
809
810                 /* Completion has already been traced */
811                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
812
813                 if (req_op(req) == REQ_OP_ZONE_APPEND)
814                         bio->bi_iter.bi_sector = req->__sector;
815
816                 if (!is_flush)
817                         bio_endio(bio);
818                 bio = next;
819         } while (bio);
820
821         /*
822          * Reset counters so that the request stacking driver
823          * can find how many bytes remain in the request
824          * later.
825          */
826         req->bio = NULL;
827         req->__data_len = 0;
828 }
829
830 /**
831  * blk_update_request - Complete multiple bytes without completing the request
832  * @req:      the request being processed
833  * @error:    block status code
834  * @nr_bytes: number of bytes to complete for @req
835  *
836  * Description:
837  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
838  *     the request structure even if @req doesn't have leftover.
839  *     If @req has leftover, sets it up for the next range of segments.
840  *
841  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
842  *     %false return from this function.
843  *
844  * Note:
845  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
846  *      except in the consistency check at the end of this function.
847  *
848  * Return:
849  *     %false - this request doesn't have any more data
850  *     %true  - this request has more data
851  **/
852 bool blk_update_request(struct request *req, blk_status_t error,
853                 unsigned int nr_bytes)
854 {
855         int total_bytes;
856
857         trace_block_rq_complete(req, error, nr_bytes);
858
859         if (!req->bio)
860                 return false;
861
862 #ifdef CONFIG_BLK_DEV_INTEGRITY
863         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
864             error == BLK_STS_OK)
865                 req->q->integrity.profile->complete_fn(req, nr_bytes);
866 #endif
867
868         if (unlikely(error && !blk_rq_is_passthrough(req) &&
869                      !(req->rq_flags & RQF_QUIET)) &&
870                      !test_bit(GD_DEAD, &req->q->disk->state)) {
871                 blk_print_req_error(req, error);
872                 trace_block_rq_error(req, error, nr_bytes);
873         }
874
875         blk_account_io_completion(req, nr_bytes);
876
877         total_bytes = 0;
878         while (req->bio) {
879                 struct bio *bio = req->bio;
880                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
881
882                 if (bio_bytes == bio->bi_iter.bi_size)
883                         req->bio = bio->bi_next;
884
885                 /* Completion has already been traced */
886                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
887                 req_bio_endio(req, bio, bio_bytes, error);
888
889                 total_bytes += bio_bytes;
890                 nr_bytes -= bio_bytes;
891
892                 if (!nr_bytes)
893                         break;
894         }
895
896         /*
897          * completely done
898          */
899         if (!req->bio) {
900                 /*
901                  * Reset counters so that the request stacking driver
902                  * can find how many bytes remain in the request
903                  * later.
904                  */
905                 req->__data_len = 0;
906                 return false;
907         }
908
909         req->__data_len -= total_bytes;
910
911         /* update sector only for requests with clear definition of sector */
912         if (!blk_rq_is_passthrough(req))
913                 req->__sector += total_bytes >> 9;
914
915         /* mixed attributes always follow the first bio */
916         if (req->rq_flags & RQF_MIXED_MERGE) {
917                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
918                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
919         }
920
921         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
922                 /*
923                  * If total number of sectors is less than the first segment
924                  * size, something has gone terribly wrong.
925                  */
926                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
927                         blk_dump_rq_flags(req, "request botched");
928                         req->__data_len = blk_rq_cur_bytes(req);
929                 }
930
931                 /* recalculate the number of segments */
932                 req->nr_phys_segments = blk_recalc_rq_segments(req);
933         }
934
935         return true;
936 }
937 EXPORT_SYMBOL_GPL(blk_update_request);
938
939 static void __blk_account_io_done(struct request *req, u64 now)
940 {
941         const int sgrp = op_stat_group(req_op(req));
942
943         part_stat_lock();
944         update_io_ticks(req->part, jiffies, true);
945         part_stat_inc(req->part, ios[sgrp]);
946         part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
947         part_stat_unlock();
948 }
949
950 static inline void blk_account_io_done(struct request *req, u64 now)
951 {
952         /*
953          * Account IO completion.  flush_rq isn't accounted as a
954          * normal IO on queueing nor completion.  Accounting the
955          * containing request is enough.
956          */
957         if (blk_do_io_stat(req) && req->part &&
958             !(req->rq_flags & RQF_FLUSH_SEQ))
959                 __blk_account_io_done(req, now);
960 }
961
962 static void __blk_account_io_start(struct request *rq)
963 {
964         /*
965          * All non-passthrough requests are created from a bio with one
966          * exception: when a flush command that is part of a flush sequence
967          * generated by the state machine in blk-flush.c is cloned onto the
968          * lower device by dm-multipath we can get here without a bio.
969          */
970         if (rq->bio)
971                 rq->part = rq->bio->bi_bdev;
972         else
973                 rq->part = rq->q->disk->part0;
974
975         part_stat_lock();
976         update_io_ticks(rq->part, jiffies, false);
977         part_stat_unlock();
978 }
979
980 static inline void blk_account_io_start(struct request *req)
981 {
982         if (blk_do_io_stat(req))
983                 __blk_account_io_start(req);
984 }
985
986 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
987 {
988         if (rq->rq_flags & RQF_STATS) {
989                 blk_mq_poll_stats_start(rq->q);
990                 blk_stat_add(rq, now);
991         }
992
993         blk_mq_sched_completed_request(rq, now);
994         blk_account_io_done(rq, now);
995 }
996
997 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
998 {
999         if (blk_mq_need_time_stamp(rq))
1000                 __blk_mq_end_request_acct(rq, ktime_get_ns());
1001
1002         if (rq->end_io) {
1003                 rq_qos_done(rq->q, rq);
1004                 rq->end_io(rq, error);
1005         } else {
1006                 blk_mq_free_request(rq);
1007         }
1008 }
1009 EXPORT_SYMBOL(__blk_mq_end_request);
1010
1011 void blk_mq_end_request(struct request *rq, blk_status_t error)
1012 {
1013         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1014                 BUG();
1015         __blk_mq_end_request(rq, error);
1016 }
1017 EXPORT_SYMBOL(blk_mq_end_request);
1018
1019 #define TAG_COMP_BATCH          32
1020
1021 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1022                                           int *tag_array, int nr_tags)
1023 {
1024         struct request_queue *q = hctx->queue;
1025
1026         /*
1027          * All requests should have been marked as RQF_MQ_INFLIGHT, so
1028          * update hctx->nr_active in batch
1029          */
1030         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1031                 __blk_mq_sub_active_requests(hctx, nr_tags);
1032
1033         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1034         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1035 }
1036
1037 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1038 {
1039         int tags[TAG_COMP_BATCH], nr_tags = 0;
1040         struct blk_mq_hw_ctx *cur_hctx = NULL;
1041         struct request *rq;
1042         u64 now = 0;
1043
1044         if (iob->need_ts)
1045                 now = ktime_get_ns();
1046
1047         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1048                 prefetch(rq->bio);
1049                 prefetch(rq->rq_next);
1050
1051                 blk_complete_request(rq);
1052                 if (iob->need_ts)
1053                         __blk_mq_end_request_acct(rq, now);
1054
1055                 rq_qos_done(rq->q, rq);
1056
1057                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1058                 if (!req_ref_put_and_test(rq))
1059                         continue;
1060
1061                 blk_crypto_free_request(rq);
1062                 blk_pm_mark_last_busy(rq);
1063
1064                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1065                         if (cur_hctx)
1066                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1067                         nr_tags = 0;
1068                         cur_hctx = rq->mq_hctx;
1069                 }
1070                 tags[nr_tags++] = rq->tag;
1071         }
1072
1073         if (nr_tags)
1074                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1075 }
1076 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1077
1078 static void blk_complete_reqs(struct llist_head *list)
1079 {
1080         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1081         struct request *rq, *next;
1082
1083         llist_for_each_entry_safe(rq, next, entry, ipi_list)
1084                 rq->q->mq_ops->complete(rq);
1085 }
1086
1087 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1088 {
1089         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1090 }
1091
1092 static int blk_softirq_cpu_dead(unsigned int cpu)
1093 {
1094         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1095         return 0;
1096 }
1097
1098 static void __blk_mq_complete_request_remote(void *data)
1099 {
1100         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1101 }
1102
1103 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1104 {
1105         int cpu = raw_smp_processor_id();
1106
1107         if (!IS_ENABLED(CONFIG_SMP) ||
1108             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1109                 return false;
1110         /*
1111          * With force threaded interrupts enabled, raising softirq from an SMP
1112          * function call will always result in waking the ksoftirqd thread.
1113          * This is probably worse than completing the request on a different
1114          * cache domain.
1115          */
1116         if (force_irqthreads())
1117                 return false;
1118
1119         /* same CPU or cache domain?  Complete locally */
1120         if (cpu == rq->mq_ctx->cpu ||
1121             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1122              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1123                 return false;
1124
1125         /* don't try to IPI to an offline CPU */
1126         return cpu_online(rq->mq_ctx->cpu);
1127 }
1128
1129 static void blk_mq_complete_send_ipi(struct request *rq)
1130 {
1131         struct llist_head *list;
1132         unsigned int cpu;
1133
1134         cpu = rq->mq_ctx->cpu;
1135         list = &per_cpu(blk_cpu_done, cpu);
1136         if (llist_add(&rq->ipi_list, list)) {
1137                 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
1138                 smp_call_function_single_async(cpu, &rq->csd);
1139         }
1140 }
1141
1142 static void blk_mq_raise_softirq(struct request *rq)
1143 {
1144         struct llist_head *list;
1145
1146         preempt_disable();
1147         list = this_cpu_ptr(&blk_cpu_done);
1148         if (llist_add(&rq->ipi_list, list))
1149                 raise_softirq(BLOCK_SOFTIRQ);
1150         preempt_enable();
1151 }
1152
1153 bool blk_mq_complete_request_remote(struct request *rq)
1154 {
1155         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1156
1157         /*
1158          * For request which hctx has only one ctx mapping,
1159          * or a polled request, always complete locally,
1160          * it's pointless to redirect the completion.
1161          */
1162         if (rq->mq_hctx->nr_ctx == 1 ||
1163                 rq->cmd_flags & REQ_POLLED)
1164                 return false;
1165
1166         if (blk_mq_complete_need_ipi(rq)) {
1167                 blk_mq_complete_send_ipi(rq);
1168                 return true;
1169         }
1170
1171         if (rq->q->nr_hw_queues == 1) {
1172                 blk_mq_raise_softirq(rq);
1173                 return true;
1174         }
1175         return false;
1176 }
1177 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1178
1179 /**
1180  * blk_mq_complete_request - end I/O on a request
1181  * @rq:         the request being processed
1182  *
1183  * Description:
1184  *      Complete a request by scheduling the ->complete_rq operation.
1185  **/
1186 void blk_mq_complete_request(struct request *rq)
1187 {
1188         if (!blk_mq_complete_request_remote(rq))
1189                 rq->q->mq_ops->complete(rq);
1190 }
1191 EXPORT_SYMBOL(blk_mq_complete_request);
1192
1193 /**
1194  * blk_mq_start_request - Start processing a request
1195  * @rq: Pointer to request to be started
1196  *
1197  * Function used by device drivers to notify the block layer that a request
1198  * is going to be processed now, so blk layer can do proper initializations
1199  * such as starting the timeout timer.
1200  */
1201 void blk_mq_start_request(struct request *rq)
1202 {
1203         struct request_queue *q = rq->q;
1204
1205         trace_block_rq_issue(rq);
1206
1207         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1208                 rq->io_start_time_ns = ktime_get_ns();
1209                 rq->stats_sectors = blk_rq_sectors(rq);
1210                 rq->rq_flags |= RQF_STATS;
1211                 rq_qos_issue(q, rq);
1212         }
1213
1214         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1215
1216         blk_add_timer(rq);
1217         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1218
1219 #ifdef CONFIG_BLK_DEV_INTEGRITY
1220         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1221                 q->integrity.profile->prepare_fn(rq);
1222 #endif
1223         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1224                 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1225 }
1226 EXPORT_SYMBOL(blk_mq_start_request);
1227
1228 /*
1229  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1230  * queues. This is important for md arrays to benefit from merging
1231  * requests.
1232  */
1233 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1234 {
1235         if (plug->multiple_queues)
1236                 return BLK_MAX_REQUEST_COUNT * 2;
1237         return BLK_MAX_REQUEST_COUNT;
1238 }
1239
1240 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1241 {
1242         struct request *last = rq_list_peek(&plug->mq_list);
1243
1244         if (!plug->rq_count) {
1245                 trace_block_plug(rq->q);
1246         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1247                    (!blk_queue_nomerges(rq->q) &&
1248                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1249                 blk_mq_flush_plug_list(plug, false);
1250                 trace_block_plug(rq->q);
1251         }
1252
1253         if (!plug->multiple_queues && last && last->q != rq->q)
1254                 plug->multiple_queues = true;
1255         if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
1256                 plug->has_elevator = true;
1257         rq->rq_next = NULL;
1258         rq_list_add(&plug->mq_list, rq);
1259         plug->rq_count++;
1260 }
1261
1262 /**
1263  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1264  * @rq:         request to insert
1265  * @at_head:    insert request at head or tail of queue
1266  *
1267  * Description:
1268  *    Insert a fully prepared request at the back of the I/O scheduler queue
1269  *    for execution.  Don't wait for completion.
1270  *
1271  * Note:
1272  *    This function will invoke @done directly if the queue is dead.
1273  */
1274 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1275 {
1276         WARN_ON(irqs_disabled());
1277         WARN_ON(!blk_rq_is_passthrough(rq));
1278
1279         blk_account_io_start(rq);
1280
1281         /*
1282          * As plugging can be enabled for passthrough requests on a zoned
1283          * device, directly accessing the plug instead of using blk_mq_plug()
1284          * should not have any consequences.
1285          */
1286         if (current->plug)
1287                 blk_add_rq_to_plug(current->plug, rq);
1288         else
1289                 blk_mq_sched_insert_request(rq, at_head, true, false);
1290 }
1291 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1292
1293 struct blk_rq_wait {
1294         struct completion done;
1295         blk_status_t ret;
1296 };
1297
1298 static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
1299 {
1300         struct blk_rq_wait *wait = rq->end_io_data;
1301
1302         wait->ret = ret;
1303         complete(&wait->done);
1304 }
1305
1306 bool blk_rq_is_poll(struct request *rq)
1307 {
1308         if (!rq->mq_hctx)
1309                 return false;
1310         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1311                 return false;
1312         if (WARN_ON_ONCE(!rq->bio))
1313                 return false;
1314         return true;
1315 }
1316 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1317
1318 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1319 {
1320         do {
1321                 bio_poll(rq->bio, NULL, 0);
1322                 cond_resched();
1323         } while (!completion_done(wait));
1324 }
1325
1326 /**
1327  * blk_execute_rq - insert a request into queue for execution
1328  * @rq:         request to insert
1329  * @at_head:    insert request at head or tail of queue
1330  *
1331  * Description:
1332  *    Insert a fully prepared request at the back of the I/O scheduler queue
1333  *    for execution and wait for completion.
1334  * Return: The blk_status_t result provided to blk_mq_end_request().
1335  */
1336 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1337 {
1338         struct blk_rq_wait wait = {
1339                 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1340         };
1341
1342         WARN_ON(irqs_disabled());
1343         WARN_ON(!blk_rq_is_passthrough(rq));
1344
1345         rq->end_io_data = &wait;
1346         rq->end_io = blk_end_sync_rq;
1347
1348         blk_account_io_start(rq);
1349         blk_mq_sched_insert_request(rq, at_head, true, false);
1350
1351         if (blk_rq_is_poll(rq)) {
1352                 blk_rq_poll_completion(rq, &wait.done);
1353         } else {
1354                 /*
1355                  * Prevent hang_check timer from firing at us during very long
1356                  * I/O
1357                  */
1358                 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1359
1360                 if (hang_check)
1361                         while (!wait_for_completion_io_timeout(&wait.done,
1362                                         hang_check * (HZ/2)))
1363                                 ;
1364                 else
1365                         wait_for_completion_io(&wait.done);
1366         }
1367
1368         return wait.ret;
1369 }
1370 EXPORT_SYMBOL(blk_execute_rq);
1371
1372 static void __blk_mq_requeue_request(struct request *rq)
1373 {
1374         struct request_queue *q = rq->q;
1375
1376         blk_mq_put_driver_tag(rq);
1377
1378         trace_block_rq_requeue(rq);
1379         rq_qos_requeue(q, rq);
1380
1381         if (blk_mq_request_started(rq)) {
1382                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1383                 rq->rq_flags &= ~RQF_TIMED_OUT;
1384         }
1385 }
1386
1387 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1388 {
1389         __blk_mq_requeue_request(rq);
1390
1391         /* this request will be re-inserted to io scheduler queue */
1392         blk_mq_sched_requeue_request(rq);
1393
1394         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1395 }
1396 EXPORT_SYMBOL(blk_mq_requeue_request);
1397
1398 static void blk_mq_requeue_work(struct work_struct *work)
1399 {
1400         struct request_queue *q =
1401                 container_of(work, struct request_queue, requeue_work.work);
1402         LIST_HEAD(rq_list);
1403         struct request *rq, *next;
1404
1405         spin_lock_irq(&q->requeue_lock);
1406         list_splice_init(&q->requeue_list, &rq_list);
1407         spin_unlock_irq(&q->requeue_lock);
1408
1409         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1410                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1411                         continue;
1412
1413                 rq->rq_flags &= ~RQF_SOFTBARRIER;
1414                 list_del_init(&rq->queuelist);
1415                 /*
1416                  * If RQF_DONTPREP, rq has contained some driver specific
1417                  * data, so insert it to hctx dispatch list to avoid any
1418                  * merge.
1419                  */
1420                 if (rq->rq_flags & RQF_DONTPREP)
1421                         blk_mq_request_bypass_insert(rq, false, false);
1422                 else
1423                         blk_mq_sched_insert_request(rq, true, false, false);
1424         }
1425
1426         while (!list_empty(&rq_list)) {
1427                 rq = list_entry(rq_list.next, struct request, queuelist);
1428                 list_del_init(&rq->queuelist);
1429                 blk_mq_sched_insert_request(rq, false, false, false);
1430         }
1431
1432         blk_mq_run_hw_queues(q, false);
1433 }
1434
1435 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
1436                                 bool kick_requeue_list)
1437 {
1438         struct request_queue *q = rq->q;
1439         unsigned long flags;
1440
1441         /*
1442          * We abuse this flag that is otherwise used by the I/O scheduler to
1443          * request head insertion from the workqueue.
1444          */
1445         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1446
1447         spin_lock_irqsave(&q->requeue_lock, flags);
1448         if (at_head) {
1449                 rq->rq_flags |= RQF_SOFTBARRIER;
1450                 list_add(&rq->queuelist, &q->requeue_list);
1451         } else {
1452                 list_add_tail(&rq->queuelist, &q->requeue_list);
1453         }
1454         spin_unlock_irqrestore(&q->requeue_lock, flags);
1455
1456         if (kick_requeue_list)
1457                 blk_mq_kick_requeue_list(q);
1458 }
1459
1460 void blk_mq_kick_requeue_list(struct request_queue *q)
1461 {
1462         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1463 }
1464 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1465
1466 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1467                                     unsigned long msecs)
1468 {
1469         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1470                                     msecs_to_jiffies(msecs));
1471 }
1472 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1473
1474 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1475 {
1476         /*
1477          * If we find a request that isn't idle we know the queue is busy
1478          * as it's checked in the iter.
1479          * Return false to stop the iteration.
1480          */
1481         if (blk_mq_request_started(rq)) {
1482                 bool *busy = priv;
1483
1484                 *busy = true;
1485                 return false;
1486         }
1487
1488         return true;
1489 }
1490
1491 bool blk_mq_queue_inflight(struct request_queue *q)
1492 {
1493         bool busy = false;
1494
1495         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1496         return busy;
1497 }
1498 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1499
1500 static void blk_mq_rq_timed_out(struct request *req)
1501 {
1502         req->rq_flags |= RQF_TIMED_OUT;
1503         if (req->q->mq_ops->timeout) {
1504                 enum blk_eh_timer_return ret;
1505
1506                 ret = req->q->mq_ops->timeout(req);
1507                 if (ret == BLK_EH_DONE)
1508                         return;
1509                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1510         }
1511
1512         blk_add_timer(req);
1513 }
1514
1515 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1516 {
1517         unsigned long deadline;
1518
1519         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1520                 return false;
1521         if (rq->rq_flags & RQF_TIMED_OUT)
1522                 return false;
1523
1524         deadline = READ_ONCE(rq->deadline);
1525         if (time_after_eq(jiffies, deadline))
1526                 return true;
1527
1528         if (*next == 0)
1529                 *next = deadline;
1530         else if (time_after(*next, deadline))
1531                 *next = deadline;
1532         return false;
1533 }
1534
1535 void blk_mq_put_rq_ref(struct request *rq)
1536 {
1537         if (is_flush_rq(rq))
1538                 rq->end_io(rq, 0);
1539         else if (req_ref_put_and_test(rq))
1540                 __blk_mq_free_request(rq);
1541 }
1542
1543 static bool blk_mq_check_expired(struct request *rq, void *priv)
1544 {
1545         unsigned long *next = priv;
1546
1547         /*
1548          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1549          * be reallocated underneath the timeout handler's processing, then
1550          * the expire check is reliable. If the request is not expired, then
1551          * it was completed and reallocated as a new request after returning
1552          * from blk_mq_check_expired().
1553          */
1554         if (blk_mq_req_expired(rq, next))
1555                 blk_mq_rq_timed_out(rq);
1556         return true;
1557 }
1558
1559 static void blk_mq_timeout_work(struct work_struct *work)
1560 {
1561         struct request_queue *q =
1562                 container_of(work, struct request_queue, timeout_work);
1563         unsigned long next = 0;
1564         struct blk_mq_hw_ctx *hctx;
1565         unsigned long i;
1566
1567         /* A deadlock might occur if a request is stuck requiring a
1568          * timeout at the same time a queue freeze is waiting
1569          * completion, since the timeout code would not be able to
1570          * acquire the queue reference here.
1571          *
1572          * That's why we don't use blk_queue_enter here; instead, we use
1573          * percpu_ref_tryget directly, because we need to be able to
1574          * obtain a reference even in the short window between the queue
1575          * starting to freeze, by dropping the first reference in
1576          * blk_freeze_queue_start, and the moment the last request is
1577          * consumed, marked by the instant q_usage_counter reaches
1578          * zero.
1579          */
1580         if (!percpu_ref_tryget(&q->q_usage_counter))
1581                 return;
1582
1583         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1584
1585         if (next != 0) {
1586                 mod_timer(&q->timeout, next);
1587         } else {
1588                 /*
1589                  * Request timeouts are handled as a forward rolling timer. If
1590                  * we end up here it means that no requests are pending and
1591                  * also that no request has been pending for a while. Mark
1592                  * each hctx as idle.
1593                  */
1594                 queue_for_each_hw_ctx(q, hctx, i) {
1595                         /* the hctx may be unmapped, so check it here */
1596                         if (blk_mq_hw_queue_mapped(hctx))
1597                                 blk_mq_tag_idle(hctx);
1598                 }
1599         }
1600         blk_queue_exit(q);
1601 }
1602
1603 struct flush_busy_ctx_data {
1604         struct blk_mq_hw_ctx *hctx;
1605         struct list_head *list;
1606 };
1607
1608 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1609 {
1610         struct flush_busy_ctx_data *flush_data = data;
1611         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1612         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1613         enum hctx_type type = hctx->type;
1614
1615         spin_lock(&ctx->lock);
1616         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1617         sbitmap_clear_bit(sb, bitnr);
1618         spin_unlock(&ctx->lock);
1619         return true;
1620 }
1621
1622 /*
1623  * Process software queues that have been marked busy, splicing them
1624  * to the for-dispatch
1625  */
1626 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1627 {
1628         struct flush_busy_ctx_data data = {
1629                 .hctx = hctx,
1630                 .list = list,
1631         };
1632
1633         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1634 }
1635 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1636
1637 struct dispatch_rq_data {
1638         struct blk_mq_hw_ctx *hctx;
1639         struct request *rq;
1640 };
1641
1642 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1643                 void *data)
1644 {
1645         struct dispatch_rq_data *dispatch_data = data;
1646         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1647         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1648         enum hctx_type type = hctx->type;
1649
1650         spin_lock(&ctx->lock);
1651         if (!list_empty(&ctx->rq_lists[type])) {
1652                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1653                 list_del_init(&dispatch_data->rq->queuelist);
1654                 if (list_empty(&ctx->rq_lists[type]))
1655                         sbitmap_clear_bit(sb, bitnr);
1656         }
1657         spin_unlock(&ctx->lock);
1658
1659         return !dispatch_data->rq;
1660 }
1661
1662 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1663                                         struct blk_mq_ctx *start)
1664 {
1665         unsigned off = start ? start->index_hw[hctx->type] : 0;
1666         struct dispatch_rq_data data = {
1667                 .hctx = hctx,
1668                 .rq   = NULL,
1669         };
1670
1671         __sbitmap_for_each_set(&hctx->ctx_map, off,
1672                                dispatch_rq_from_ctx, &data);
1673
1674         return data.rq;
1675 }
1676
1677 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1678 {
1679         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1680         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1681         int tag;
1682
1683         blk_mq_tag_busy(rq->mq_hctx);
1684
1685         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1686                 bt = &rq->mq_hctx->tags->breserved_tags;
1687                 tag_offset = 0;
1688         } else {
1689                 if (!hctx_may_queue(rq->mq_hctx, bt))
1690                         return false;
1691         }
1692
1693         tag = __sbitmap_queue_get(bt);
1694         if (tag == BLK_MQ_NO_TAG)
1695                 return false;
1696
1697         rq->tag = tag + tag_offset;
1698         return true;
1699 }
1700
1701 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1702 {
1703         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1704                 return false;
1705
1706         if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1707                         !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1708                 rq->rq_flags |= RQF_MQ_INFLIGHT;
1709                 __blk_mq_inc_active_requests(hctx);
1710         }
1711         hctx->tags->rqs[rq->tag] = rq;
1712         return true;
1713 }
1714
1715 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1716                                 int flags, void *key)
1717 {
1718         struct blk_mq_hw_ctx *hctx;
1719
1720         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1721
1722         spin_lock(&hctx->dispatch_wait_lock);
1723         if (!list_empty(&wait->entry)) {
1724                 struct sbitmap_queue *sbq;
1725
1726                 list_del_init(&wait->entry);
1727                 sbq = &hctx->tags->bitmap_tags;
1728                 atomic_dec(&sbq->ws_active);
1729         }
1730         spin_unlock(&hctx->dispatch_wait_lock);
1731
1732         blk_mq_run_hw_queue(hctx, true);
1733         return 1;
1734 }
1735
1736 /*
1737  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1738  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1739  * restart. For both cases, take care to check the condition again after
1740  * marking us as waiting.
1741  */
1742 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1743                                  struct request *rq)
1744 {
1745         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1746         struct wait_queue_head *wq;
1747         wait_queue_entry_t *wait;
1748         bool ret;
1749
1750         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1751                 blk_mq_sched_mark_restart_hctx(hctx);
1752
1753                 /*
1754                  * It's possible that a tag was freed in the window between the
1755                  * allocation failure and adding the hardware queue to the wait
1756                  * queue.
1757                  *
1758                  * Don't clear RESTART here, someone else could have set it.
1759                  * At most this will cost an extra queue run.
1760                  */
1761                 return blk_mq_get_driver_tag(rq);
1762         }
1763
1764         wait = &hctx->dispatch_wait;
1765         if (!list_empty_careful(&wait->entry))
1766                 return false;
1767
1768         wq = &bt_wait_ptr(sbq, hctx)->wait;
1769
1770         spin_lock_irq(&wq->lock);
1771         spin_lock(&hctx->dispatch_wait_lock);
1772         if (!list_empty(&wait->entry)) {
1773                 spin_unlock(&hctx->dispatch_wait_lock);
1774                 spin_unlock_irq(&wq->lock);
1775                 return false;
1776         }
1777
1778         atomic_inc(&sbq->ws_active);
1779         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1780         __add_wait_queue(wq, wait);
1781
1782         /*
1783          * It's possible that a tag was freed in the window between the
1784          * allocation failure and adding the hardware queue to the wait
1785          * queue.
1786          */
1787         ret = blk_mq_get_driver_tag(rq);
1788         if (!ret) {
1789                 spin_unlock(&hctx->dispatch_wait_lock);
1790                 spin_unlock_irq(&wq->lock);
1791                 return false;
1792         }
1793
1794         /*
1795          * We got a tag, remove ourselves from the wait queue to ensure
1796          * someone else gets the wakeup.
1797          */
1798         list_del_init(&wait->entry);
1799         atomic_dec(&sbq->ws_active);
1800         spin_unlock(&hctx->dispatch_wait_lock);
1801         spin_unlock_irq(&wq->lock);
1802
1803         return true;
1804 }
1805
1806 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1807 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1808 /*
1809  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1810  * - EWMA is one simple way to compute running average value
1811  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1812  * - take 4 as factor for avoiding to get too small(0) result, and this
1813  *   factor doesn't matter because EWMA decreases exponentially
1814  */
1815 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1816 {
1817         unsigned int ewma;
1818
1819         ewma = hctx->dispatch_busy;
1820
1821         if (!ewma && !busy)
1822                 return;
1823
1824         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1825         if (busy)
1826                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1827         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1828
1829         hctx->dispatch_busy = ewma;
1830 }
1831
1832 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1833
1834 static void blk_mq_handle_dev_resource(struct request *rq,
1835                                        struct list_head *list)
1836 {
1837         struct request *next =
1838                 list_first_entry_or_null(list, struct request, queuelist);
1839
1840         /*
1841          * If an I/O scheduler has been configured and we got a driver tag for
1842          * the next request already, free it.
1843          */
1844         if (next)
1845                 blk_mq_put_driver_tag(next);
1846
1847         list_add(&rq->queuelist, list);
1848         __blk_mq_requeue_request(rq);
1849 }
1850
1851 static void blk_mq_handle_zone_resource(struct request *rq,
1852                                         struct list_head *zone_list)
1853 {
1854         /*
1855          * If we end up here it is because we cannot dispatch a request to a
1856          * specific zone due to LLD level zone-write locking or other zone
1857          * related resource not being available. In this case, set the request
1858          * aside in zone_list for retrying it later.
1859          */
1860         list_add(&rq->queuelist, zone_list);
1861         __blk_mq_requeue_request(rq);
1862 }
1863
1864 enum prep_dispatch {
1865         PREP_DISPATCH_OK,
1866         PREP_DISPATCH_NO_TAG,
1867         PREP_DISPATCH_NO_BUDGET,
1868 };
1869
1870 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1871                                                   bool need_budget)
1872 {
1873         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1874         int budget_token = -1;
1875
1876         if (need_budget) {
1877                 budget_token = blk_mq_get_dispatch_budget(rq->q);
1878                 if (budget_token < 0) {
1879                         blk_mq_put_driver_tag(rq);
1880                         return PREP_DISPATCH_NO_BUDGET;
1881                 }
1882                 blk_mq_set_rq_budget_token(rq, budget_token);
1883         }
1884
1885         if (!blk_mq_get_driver_tag(rq)) {
1886                 /*
1887                  * The initial allocation attempt failed, so we need to
1888                  * rerun the hardware queue when a tag is freed. The
1889                  * waitqueue takes care of that. If the queue is run
1890                  * before we add this entry back on the dispatch list,
1891                  * we'll re-run it below.
1892                  */
1893                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1894                         /*
1895                          * All budgets not got from this function will be put
1896                          * together during handling partial dispatch
1897                          */
1898                         if (need_budget)
1899                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
1900                         return PREP_DISPATCH_NO_TAG;
1901                 }
1902         }
1903
1904         return PREP_DISPATCH_OK;
1905 }
1906
1907 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1908 static void blk_mq_release_budgets(struct request_queue *q,
1909                 struct list_head *list)
1910 {
1911         struct request *rq;
1912
1913         list_for_each_entry(rq, list, queuelist) {
1914                 int budget_token = blk_mq_get_rq_budget_token(rq);
1915
1916                 if (budget_token >= 0)
1917                         blk_mq_put_dispatch_budget(q, budget_token);
1918         }
1919 }
1920
1921 /*
1922  * Returns true if we did some work AND can potentially do more.
1923  */
1924 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1925                              unsigned int nr_budgets)
1926 {
1927         enum prep_dispatch prep;
1928         struct request_queue *q = hctx->queue;
1929         struct request *rq, *nxt;
1930         int errors, queued;
1931         blk_status_t ret = BLK_STS_OK;
1932         LIST_HEAD(zone_list);
1933         bool needs_resource = false;
1934
1935         if (list_empty(list))
1936                 return false;
1937
1938         /*
1939          * Now process all the entries, sending them to the driver.
1940          */
1941         errors = queued = 0;
1942         do {
1943                 struct blk_mq_queue_data bd;
1944
1945                 rq = list_first_entry(list, struct request, queuelist);
1946
1947                 WARN_ON_ONCE(hctx != rq->mq_hctx);
1948                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1949                 if (prep != PREP_DISPATCH_OK)
1950                         break;
1951
1952                 list_del_init(&rq->queuelist);
1953
1954                 bd.rq = rq;
1955
1956                 /*
1957                  * Flag last if we have no more requests, or if we have more
1958                  * but can't assign a driver tag to it.
1959                  */
1960                 if (list_empty(list))
1961                         bd.last = true;
1962                 else {
1963                         nxt = list_first_entry(list, struct request, queuelist);
1964                         bd.last = !blk_mq_get_driver_tag(nxt);
1965                 }
1966
1967                 /*
1968                  * once the request is queued to lld, no need to cover the
1969                  * budget any more
1970                  */
1971                 if (nr_budgets)
1972                         nr_budgets--;
1973                 ret = q->mq_ops->queue_rq(hctx, &bd);
1974                 switch (ret) {
1975                 case BLK_STS_OK:
1976                         queued++;
1977                         break;
1978                 case BLK_STS_RESOURCE:
1979                         needs_resource = true;
1980                         fallthrough;
1981                 case BLK_STS_DEV_RESOURCE:
1982                         blk_mq_handle_dev_resource(rq, list);
1983                         goto out;
1984                 case BLK_STS_ZONE_RESOURCE:
1985                         /*
1986                          * Move the request to zone_list and keep going through
1987                          * the dispatch list to find more requests the drive can
1988                          * accept.
1989                          */
1990                         blk_mq_handle_zone_resource(rq, &zone_list);
1991                         needs_resource = true;
1992                         break;
1993                 default:
1994                         errors++;
1995                         blk_mq_end_request(rq, ret);
1996                 }
1997         } while (!list_empty(list));
1998 out:
1999         if (!list_empty(&zone_list))
2000                 list_splice_tail_init(&zone_list, list);
2001
2002         /* If we didn't flush the entire list, we could have told the driver
2003          * there was more coming, but that turned out to be a lie.
2004          */
2005         if ((!list_empty(list) || errors || needs_resource ||
2006              ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
2007                 q->mq_ops->commit_rqs(hctx);
2008         /*
2009          * Any items that need requeuing? Stuff them into hctx->dispatch,
2010          * that is where we will continue on next queue run.
2011          */
2012         if (!list_empty(list)) {
2013                 bool needs_restart;
2014                 /* For non-shared tags, the RESTART check will suffice */
2015                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2016                         (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
2017
2018                 if (nr_budgets)
2019                         blk_mq_release_budgets(q, list);
2020
2021                 spin_lock(&hctx->lock);
2022                 list_splice_tail_init(list, &hctx->dispatch);
2023                 spin_unlock(&hctx->lock);
2024
2025                 /*
2026                  * Order adding requests to hctx->dispatch and checking
2027                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
2028                  * in blk_mq_sched_restart(). Avoid restart code path to
2029                  * miss the new added requests to hctx->dispatch, meantime
2030                  * SCHED_RESTART is observed here.
2031                  */
2032                 smp_mb();
2033
2034                 /*
2035                  * If SCHED_RESTART was set by the caller of this function and
2036                  * it is no longer set that means that it was cleared by another
2037                  * thread and hence that a queue rerun is needed.
2038                  *
2039                  * If 'no_tag' is set, that means that we failed getting
2040                  * a driver tag with an I/O scheduler attached. If our dispatch
2041                  * waitqueue is no longer active, ensure that we run the queue
2042                  * AFTER adding our entries back to the list.
2043                  *
2044                  * If no I/O scheduler has been configured it is possible that
2045                  * the hardware queue got stopped and restarted before requests
2046                  * were pushed back onto the dispatch list. Rerun the queue to
2047                  * avoid starvation. Notes:
2048                  * - blk_mq_run_hw_queue() checks whether or not a queue has
2049                  *   been stopped before rerunning a queue.
2050                  * - Some but not all block drivers stop a queue before
2051                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2052                  *   and dm-rq.
2053                  *
2054                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2055                  * bit is set, run queue after a delay to avoid IO stalls
2056                  * that could otherwise occur if the queue is idle.  We'll do
2057                  * similar if we couldn't get budget or couldn't lock a zone
2058                  * and SCHED_RESTART is set.
2059                  */
2060                 needs_restart = blk_mq_sched_needs_restart(hctx);
2061                 if (prep == PREP_DISPATCH_NO_BUDGET)
2062                         needs_resource = true;
2063                 if (!needs_restart ||
2064                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2065                         blk_mq_run_hw_queue(hctx, true);
2066                 else if (needs_resource)
2067                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2068
2069                 blk_mq_update_dispatch_busy(hctx, true);
2070                 return false;
2071         } else
2072                 blk_mq_update_dispatch_busy(hctx, false);
2073
2074         return (queued + errors) != 0;
2075 }
2076
2077 /**
2078  * __blk_mq_run_hw_queue - Run a hardware queue.
2079  * @hctx: Pointer to the hardware queue to run.
2080  *
2081  * Send pending requests to the hardware.
2082  */
2083 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
2084 {
2085         /*
2086          * We can't run the queue inline with ints disabled. Ensure that
2087          * we catch bad users of this early.
2088          */
2089         WARN_ON_ONCE(in_interrupt());
2090
2091         blk_mq_run_dispatch_ops(hctx->queue,
2092                         blk_mq_sched_dispatch_requests(hctx));
2093 }
2094
2095 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2096 {
2097         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2098
2099         if (cpu >= nr_cpu_ids)
2100                 cpu = cpumask_first(hctx->cpumask);
2101         return cpu;
2102 }
2103
2104 /*
2105  * It'd be great if the workqueue API had a way to pass
2106  * in a mask and had some smarts for more clever placement.
2107  * For now we just round-robin here, switching for every
2108  * BLK_MQ_CPU_WORK_BATCH queued items.
2109  */
2110 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2111 {
2112         bool tried = false;
2113         int next_cpu = hctx->next_cpu;
2114
2115         if (hctx->queue->nr_hw_queues == 1)
2116                 return WORK_CPU_UNBOUND;
2117
2118         if (--hctx->next_cpu_batch <= 0) {
2119 select_cpu:
2120                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2121                                 cpu_online_mask);
2122                 if (next_cpu >= nr_cpu_ids)
2123                         next_cpu = blk_mq_first_mapped_cpu(hctx);
2124                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2125         }
2126
2127         /*
2128          * Do unbound schedule if we can't find a online CPU for this hctx,
2129          * and it should only happen in the path of handling CPU DEAD.
2130          */
2131         if (!cpu_online(next_cpu)) {
2132                 if (!tried) {
2133                         tried = true;
2134                         goto select_cpu;
2135                 }
2136
2137                 /*
2138                  * Make sure to re-select CPU next time once after CPUs
2139                  * in hctx->cpumask become online again.
2140                  */
2141                 hctx->next_cpu = next_cpu;
2142                 hctx->next_cpu_batch = 1;
2143                 return WORK_CPU_UNBOUND;
2144         }
2145
2146         hctx->next_cpu = next_cpu;
2147         return next_cpu;
2148 }
2149
2150 /**
2151  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
2152  * @hctx: Pointer to the hardware queue to run.
2153  * @async: If we want to run the queue asynchronously.
2154  * @msecs: Milliseconds of delay to wait before running the queue.
2155  *
2156  * If !@async, try to run the queue now. Else, run the queue asynchronously and
2157  * with a delay of @msecs.
2158  */
2159 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
2160                                         unsigned long msecs)
2161 {
2162         if (unlikely(blk_mq_hctx_stopped(hctx)))
2163                 return;
2164
2165         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2166                 if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2167                         __blk_mq_run_hw_queue(hctx);
2168                         return;
2169                 }
2170         }
2171
2172         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2173                                     msecs_to_jiffies(msecs));
2174 }
2175
2176 /**
2177  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2178  * @hctx: Pointer to the hardware queue to run.
2179  * @msecs: Milliseconds of delay to wait before running the queue.
2180  *
2181  * Run a hardware queue asynchronously with a delay of @msecs.
2182  */
2183 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2184 {
2185         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
2186 }
2187 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2188
2189 /**
2190  * blk_mq_run_hw_queue - Start to run a hardware queue.
2191  * @hctx: Pointer to the hardware queue to run.
2192  * @async: If we want to run the queue asynchronously.
2193  *
2194  * Check if the request queue is not in a quiesced state and if there are
2195  * pending requests to be sent. If this is true, run the queue to send requests
2196  * to hardware.
2197  */
2198 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2199 {
2200         bool need_run;
2201
2202         /*
2203          * When queue is quiesced, we may be switching io scheduler, or
2204          * updating nr_hw_queues, or other things, and we can't run queue
2205          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2206          *
2207          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2208          * quiesced.
2209          */
2210         __blk_mq_run_dispatch_ops(hctx->queue, false,
2211                 need_run = !blk_queue_quiesced(hctx->queue) &&
2212                 blk_mq_hctx_has_pending(hctx));
2213
2214         if (need_run)
2215                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
2216 }
2217 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2218
2219 /*
2220  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2221  * scheduler.
2222  */
2223 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2224 {
2225         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2226         /*
2227          * If the IO scheduler does not respect hardware queues when
2228          * dispatching, we just don't bother with multiple HW queues and
2229          * dispatch from hctx for the current CPU since running multiple queues
2230          * just causes lock contention inside the scheduler and pointless cache
2231          * bouncing.
2232          */
2233         struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2234
2235         if (!blk_mq_hctx_stopped(hctx))
2236                 return hctx;
2237         return NULL;
2238 }
2239
2240 /**
2241  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2242  * @q: Pointer to the request queue to run.
2243  * @async: If we want to run the queue asynchronously.
2244  */
2245 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2246 {
2247         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2248         unsigned long i;
2249
2250         sq_hctx = NULL;
2251         if (blk_queue_sq_sched(q))
2252                 sq_hctx = blk_mq_get_sq_hctx(q);
2253         queue_for_each_hw_ctx(q, hctx, i) {
2254                 if (blk_mq_hctx_stopped(hctx))
2255                         continue;
2256                 /*
2257                  * Dispatch from this hctx either if there's no hctx preferred
2258                  * by IO scheduler or if it has requests that bypass the
2259                  * scheduler.
2260                  */
2261                 if (!sq_hctx || sq_hctx == hctx ||
2262                     !list_empty_careful(&hctx->dispatch))
2263                         blk_mq_run_hw_queue(hctx, async);
2264         }
2265 }
2266 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2267
2268 /**
2269  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2270  * @q: Pointer to the request queue to run.
2271  * @msecs: Milliseconds of delay to wait before running the queues.
2272  */
2273 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2274 {
2275         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2276         unsigned long i;
2277
2278         sq_hctx = NULL;
2279         if (blk_queue_sq_sched(q))
2280                 sq_hctx = blk_mq_get_sq_hctx(q);
2281         queue_for_each_hw_ctx(q, hctx, i) {
2282                 if (blk_mq_hctx_stopped(hctx))
2283                         continue;
2284                 /*
2285                  * If there is already a run_work pending, leave the
2286                  * pending delay untouched. Otherwise, a hctx can stall
2287                  * if another hctx is re-delaying the other's work
2288                  * before the work executes.
2289                  */
2290                 if (delayed_work_pending(&hctx->run_work))
2291                         continue;
2292                 /*
2293                  * Dispatch from this hctx either if there's no hctx preferred
2294                  * by IO scheduler or if it has requests that bypass the
2295                  * scheduler.
2296                  */
2297                 if (!sq_hctx || sq_hctx == hctx ||
2298                     !list_empty_careful(&hctx->dispatch))
2299                         blk_mq_delay_run_hw_queue(hctx, msecs);
2300         }
2301 }
2302 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2303
2304 /*
2305  * This function is often used for pausing .queue_rq() by driver when
2306  * there isn't enough resource or some conditions aren't satisfied, and
2307  * BLK_STS_RESOURCE is usually returned.
2308  *
2309  * We do not guarantee that dispatch can be drained or blocked
2310  * after blk_mq_stop_hw_queue() returns. Please use
2311  * blk_mq_quiesce_queue() for that requirement.
2312  */
2313 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2314 {
2315         cancel_delayed_work(&hctx->run_work);
2316
2317         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2318 }
2319 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2320
2321 /*
2322  * This function is often used for pausing .queue_rq() by driver when
2323  * there isn't enough resource or some conditions aren't satisfied, and
2324  * BLK_STS_RESOURCE is usually returned.
2325  *
2326  * We do not guarantee that dispatch can be drained or blocked
2327  * after blk_mq_stop_hw_queues() returns. Please use
2328  * blk_mq_quiesce_queue() for that requirement.
2329  */
2330 void blk_mq_stop_hw_queues(struct request_queue *q)
2331 {
2332         struct blk_mq_hw_ctx *hctx;
2333         unsigned long i;
2334
2335         queue_for_each_hw_ctx(q, hctx, i)
2336                 blk_mq_stop_hw_queue(hctx);
2337 }
2338 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2339
2340 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2341 {
2342         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2343
2344         blk_mq_run_hw_queue(hctx, false);
2345 }
2346 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2347
2348 void blk_mq_start_hw_queues(struct request_queue *q)
2349 {
2350         struct blk_mq_hw_ctx *hctx;
2351         unsigned long i;
2352
2353         queue_for_each_hw_ctx(q, hctx, i)
2354                 blk_mq_start_hw_queue(hctx);
2355 }
2356 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2357
2358 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2359 {
2360         if (!blk_mq_hctx_stopped(hctx))
2361                 return;
2362
2363         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2364         blk_mq_run_hw_queue(hctx, async);
2365 }
2366 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2367
2368 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2369 {
2370         struct blk_mq_hw_ctx *hctx;
2371         unsigned long i;
2372
2373         queue_for_each_hw_ctx(q, hctx, i)
2374                 blk_mq_start_stopped_hw_queue(hctx, async);
2375 }
2376 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2377
2378 static void blk_mq_run_work_fn(struct work_struct *work)
2379 {
2380         struct blk_mq_hw_ctx *hctx;
2381
2382         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2383
2384         /*
2385          * If we are stopped, don't run the queue.
2386          */
2387         if (blk_mq_hctx_stopped(hctx))
2388                 return;
2389
2390         __blk_mq_run_hw_queue(hctx);
2391 }
2392
2393 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
2394                                             struct request *rq,
2395                                             bool at_head)
2396 {
2397         struct blk_mq_ctx *ctx = rq->mq_ctx;
2398         enum hctx_type type = hctx->type;
2399
2400         lockdep_assert_held(&ctx->lock);
2401
2402         trace_block_rq_insert(rq);
2403
2404         if (at_head)
2405                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
2406         else
2407                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2408 }
2409
2410 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
2411                              bool at_head)
2412 {
2413         struct blk_mq_ctx *ctx = rq->mq_ctx;
2414
2415         lockdep_assert_held(&ctx->lock);
2416
2417         __blk_mq_insert_req_list(hctx, rq, at_head);
2418         blk_mq_hctx_mark_pending(hctx, ctx);
2419 }
2420
2421 /**
2422  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2423  * @rq: Pointer to request to be inserted.
2424  * @at_head: true if the request should be inserted at the head of the list.
2425  * @run_queue: If we should run the hardware queue after inserting the request.
2426  *
2427  * Should only be used carefully, when the caller knows we want to
2428  * bypass a potential IO scheduler on the target device.
2429  */
2430 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
2431                                   bool run_queue)
2432 {
2433         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2434
2435         spin_lock(&hctx->lock);
2436         if (at_head)
2437                 list_add(&rq->queuelist, &hctx->dispatch);
2438         else
2439                 list_add_tail(&rq->queuelist, &hctx->dispatch);
2440         spin_unlock(&hctx->lock);
2441
2442         if (run_queue)
2443                 blk_mq_run_hw_queue(hctx, false);
2444 }
2445
2446 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
2447                             struct list_head *list)
2448
2449 {
2450         struct request *rq;
2451         enum hctx_type type = hctx->type;
2452
2453         /*
2454          * preemption doesn't flush plug list, so it's possible ctx->cpu is
2455          * offline now
2456          */
2457         list_for_each_entry(rq, list, queuelist) {
2458                 BUG_ON(rq->mq_ctx != ctx);
2459                 trace_block_rq_insert(rq);
2460         }
2461
2462         spin_lock(&ctx->lock);
2463         list_splice_tail_init(list, &ctx->rq_lists[type]);
2464         blk_mq_hctx_mark_pending(hctx, ctx);
2465         spin_unlock(&ctx->lock);
2466 }
2467
2468 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
2469                               bool from_schedule)
2470 {
2471         if (hctx->queue->mq_ops->commit_rqs) {
2472                 trace_block_unplug(hctx->queue, *queued, !from_schedule);
2473                 hctx->queue->mq_ops->commit_rqs(hctx);
2474         }
2475         *queued = 0;
2476 }
2477
2478 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2479                 unsigned int nr_segs)
2480 {
2481         int err;
2482
2483         if (bio->bi_opf & REQ_RAHEAD)
2484                 rq->cmd_flags |= REQ_FAILFAST_MASK;
2485
2486         rq->__sector = bio->bi_iter.bi_sector;
2487         blk_rq_bio_prep(rq, bio, nr_segs);
2488
2489         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2490         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2491         WARN_ON_ONCE(err);
2492
2493         blk_account_io_start(rq);
2494 }
2495
2496 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2497                                             struct request *rq, bool last)
2498 {
2499         struct request_queue *q = rq->q;
2500         struct blk_mq_queue_data bd = {
2501                 .rq = rq,
2502                 .last = last,
2503         };
2504         blk_status_t ret;
2505
2506         /*
2507          * For OK queue, we are done. For error, caller may kill it.
2508          * Any other error (busy), just add it to our list as we
2509          * previously would have done.
2510          */
2511         ret = q->mq_ops->queue_rq(hctx, &bd);
2512         switch (ret) {
2513         case BLK_STS_OK:
2514                 blk_mq_update_dispatch_busy(hctx, false);
2515                 break;
2516         case BLK_STS_RESOURCE:
2517         case BLK_STS_DEV_RESOURCE:
2518                 blk_mq_update_dispatch_busy(hctx, true);
2519                 __blk_mq_requeue_request(rq);
2520                 break;
2521         default:
2522                 blk_mq_update_dispatch_busy(hctx, false);
2523                 break;
2524         }
2525
2526         return ret;
2527 }
2528
2529 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2530                                                 struct request *rq,
2531                                                 bool bypass_insert, bool last)
2532 {
2533         struct request_queue *q = rq->q;
2534         bool run_queue = true;
2535         int budget_token;
2536
2537         /*
2538          * RCU or SRCU read lock is needed before checking quiesced flag.
2539          *
2540          * When queue is stopped or quiesced, ignore 'bypass_insert' from
2541          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2542          * and avoid driver to try to dispatch again.
2543          */
2544         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2545                 run_queue = false;
2546                 bypass_insert = false;
2547                 goto insert;
2548         }
2549
2550         if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2551                 goto insert;
2552
2553         budget_token = blk_mq_get_dispatch_budget(q);
2554         if (budget_token < 0)
2555                 goto insert;
2556
2557         blk_mq_set_rq_budget_token(rq, budget_token);
2558
2559         if (!blk_mq_get_driver_tag(rq)) {
2560                 blk_mq_put_dispatch_budget(q, budget_token);
2561                 goto insert;
2562         }
2563
2564         return __blk_mq_issue_directly(hctx, rq, last);
2565 insert:
2566         if (bypass_insert)
2567                 return BLK_STS_RESOURCE;
2568
2569         blk_mq_sched_insert_request(rq, false, run_queue, false);
2570
2571         return BLK_STS_OK;
2572 }
2573
2574 /**
2575  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2576  * @hctx: Pointer of the associated hardware queue.
2577  * @rq: Pointer to request to be sent.
2578  *
2579  * If the device has enough resources to accept a new request now, send the
2580  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2581  * we can try send it another time in the future. Requests inserted at this
2582  * queue have higher priority.
2583  */
2584 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2585                 struct request *rq)
2586 {
2587         blk_status_t ret =
2588                 __blk_mq_try_issue_directly(hctx, rq, false, true);
2589
2590         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2591                 blk_mq_request_bypass_insert(rq, false, true);
2592         else if (ret != BLK_STS_OK)
2593                 blk_mq_end_request(rq, ret);
2594 }
2595
2596 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2597 {
2598         return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2599 }
2600
2601 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
2602 {
2603         struct blk_mq_hw_ctx *hctx = NULL;
2604         struct request *rq;
2605         int queued = 0;
2606         int errors = 0;
2607
2608         while ((rq = rq_list_pop(&plug->mq_list))) {
2609                 bool last = rq_list_empty(plug->mq_list);
2610                 blk_status_t ret;
2611
2612                 if (hctx != rq->mq_hctx) {
2613                         if (hctx)
2614                                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2615                         hctx = rq->mq_hctx;
2616                 }
2617
2618                 ret = blk_mq_request_issue_directly(rq, last);
2619                 switch (ret) {
2620                 case BLK_STS_OK:
2621                         queued++;
2622                         break;
2623                 case BLK_STS_RESOURCE:
2624                 case BLK_STS_DEV_RESOURCE:
2625                         blk_mq_request_bypass_insert(rq, false, true);
2626                         blk_mq_commit_rqs(hctx, &queued, from_schedule);
2627                         return;
2628                 default:
2629                         blk_mq_end_request(rq, ret);
2630                         errors++;
2631                         break;
2632                 }
2633         }
2634
2635         /*
2636          * If we didn't flush the entire list, we could have told the driver
2637          * there was more coming, but that turned out to be a lie.
2638          */
2639         if (errors)
2640                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2641 }
2642
2643 static void __blk_mq_flush_plug_list(struct request_queue *q,
2644                                      struct blk_plug *plug)
2645 {
2646         if (blk_queue_quiesced(q))
2647                 return;
2648         q->mq_ops->queue_rqs(&plug->mq_list);
2649 }
2650
2651 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2652 {
2653         struct blk_mq_hw_ctx *this_hctx = NULL;
2654         struct blk_mq_ctx *this_ctx = NULL;
2655         struct request *requeue_list = NULL;
2656         unsigned int depth = 0;
2657         LIST_HEAD(list);
2658
2659         do {
2660                 struct request *rq = rq_list_pop(&plug->mq_list);
2661
2662                 if (!this_hctx) {
2663                         this_hctx = rq->mq_hctx;
2664                         this_ctx = rq->mq_ctx;
2665                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2666                         rq_list_add(&requeue_list, rq);
2667                         continue;
2668                 }
2669                 list_add_tail(&rq->queuelist, &list);
2670                 depth++;
2671         } while (!rq_list_empty(plug->mq_list));
2672
2673         plug->mq_list = requeue_list;
2674         trace_block_unplug(this_hctx->queue, depth, !from_sched);
2675         blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
2676 }
2677
2678 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2679 {
2680         struct request *rq;
2681
2682         if (rq_list_empty(plug->mq_list))
2683                 return;
2684         plug->rq_count = 0;
2685
2686         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2687                 struct request_queue *q;
2688
2689                 rq = rq_list_peek(&plug->mq_list);
2690                 q = rq->q;
2691
2692                 /*
2693                  * Peek first request and see if we have a ->queue_rqs() hook.
2694                  * If we do, we can dispatch the whole plug list in one go. We
2695                  * already know at this point that all requests belong to the
2696                  * same queue, caller must ensure that's the case.
2697                  *
2698                  * Since we pass off the full list to the driver at this point,
2699                  * we do not increment the active request count for the queue.
2700                  * Bypass shared tags for now because of that.
2701                  */
2702                 if (q->mq_ops->queue_rqs &&
2703                     !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2704                         blk_mq_run_dispatch_ops(q,
2705                                 __blk_mq_flush_plug_list(q, plug));
2706                         if (rq_list_empty(plug->mq_list))
2707                                 return;
2708                 }
2709
2710                 blk_mq_run_dispatch_ops(q,
2711                                 blk_mq_plug_issue_direct(plug, false));
2712                 if (rq_list_empty(plug->mq_list))
2713                         return;
2714         }
2715
2716         do {
2717                 blk_mq_dispatch_plug_list(plug, from_schedule);
2718         } while (!rq_list_empty(plug->mq_list));
2719 }
2720
2721 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2722                 struct list_head *list)
2723 {
2724         int queued = 0;
2725         int errors = 0;
2726
2727         while (!list_empty(list)) {
2728                 blk_status_t ret;
2729                 struct request *rq = list_first_entry(list, struct request,
2730                                 queuelist);
2731
2732                 list_del_init(&rq->queuelist);
2733                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2734                 if (ret != BLK_STS_OK) {
2735                         errors++;
2736                         if (ret == BLK_STS_RESOURCE ||
2737                                         ret == BLK_STS_DEV_RESOURCE) {
2738                                 blk_mq_request_bypass_insert(rq, false,
2739                                                         list_empty(list));
2740                                 break;
2741                         }
2742                         blk_mq_end_request(rq, ret);
2743                 } else
2744                         queued++;
2745         }
2746
2747         /*
2748          * If we didn't flush the entire list, we could have told
2749          * the driver there was more coming, but that turned out to
2750          * be a lie.
2751          */
2752         if ((!list_empty(list) || errors) &&
2753              hctx->queue->mq_ops->commit_rqs && queued)
2754                 hctx->queue->mq_ops->commit_rqs(hctx);
2755 }
2756
2757 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2758                                      struct bio *bio, unsigned int nr_segs)
2759 {
2760         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2761                 if (blk_attempt_plug_merge(q, bio, nr_segs))
2762                         return true;
2763                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2764                         return true;
2765         }
2766         return false;
2767 }
2768
2769 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2770                                                struct blk_plug *plug,
2771                                                struct bio *bio,
2772                                                unsigned int nsegs)
2773 {
2774         struct blk_mq_alloc_data data = {
2775                 .q              = q,
2776                 .nr_tags        = 1,
2777                 .cmd_flags      = bio->bi_opf,
2778         };
2779         struct request *rq;
2780
2781         if (unlikely(bio_queue_enter(bio)))
2782                 return NULL;
2783
2784         if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2785                 goto queue_exit;
2786
2787         rq_qos_throttle(q, bio);
2788
2789         if (plug) {
2790                 data.nr_tags = plug->nr_ios;
2791                 plug->nr_ios = 1;
2792                 data.cached_rq = &plug->cached_rq;
2793         }
2794
2795         rq = __blk_mq_alloc_requests(&data);
2796         if (rq)
2797                 return rq;
2798         rq_qos_cleanup(q, bio);
2799         if (bio->bi_opf & REQ_NOWAIT)
2800                 bio_wouldblock_error(bio);
2801 queue_exit:
2802         blk_queue_exit(q);
2803         return NULL;
2804 }
2805
2806 static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2807                 struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2808 {
2809         struct request *rq;
2810
2811         if (!plug)
2812                 return NULL;
2813         rq = rq_list_peek(&plug->cached_rq);
2814         if (!rq || rq->q != q)
2815                 return NULL;
2816
2817         if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
2818                 *bio = NULL;
2819                 return NULL;
2820         }
2821
2822         if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2823                 return NULL;
2824         if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2825                 return NULL;
2826
2827         /*
2828          * If any qos ->throttle() end up blocking, we will have flushed the
2829          * plug and hence killed the cached_rq list as well. Pop this entry
2830          * before we throttle.
2831          */
2832         plug->cached_rq = rq_list_next(rq);
2833         rq_qos_throttle(q, *bio);
2834
2835         rq->cmd_flags = (*bio)->bi_opf;
2836         INIT_LIST_HEAD(&rq->queuelist);
2837         return rq;
2838 }
2839
2840 static void bio_set_ioprio(struct bio *bio)
2841 {
2842         /* Nobody set ioprio so far? Initialize it based on task's nice value */
2843         if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
2844                 bio->bi_ioprio = get_current_ioprio();
2845         blkcg_set_ioprio(bio);
2846 }
2847
2848 /**
2849  * blk_mq_submit_bio - Create and send a request to block device.
2850  * @bio: Bio pointer.
2851  *
2852  * Builds up a request structure from @q and @bio and send to the device. The
2853  * request may not be queued directly to hardware if:
2854  * * This request can be merged with another one
2855  * * We want to place request at plug queue for possible future merging
2856  * * There is an IO scheduler active at this queue
2857  *
2858  * It will not queue the request if there is an error with the bio, or at the
2859  * request creation.
2860  */
2861 void blk_mq_submit_bio(struct bio *bio)
2862 {
2863         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2864         struct blk_plug *plug = blk_mq_plug(bio);
2865         const int is_sync = op_is_sync(bio->bi_opf);
2866         struct request *rq;
2867         unsigned int nr_segs = 1;
2868         blk_status_t ret;
2869
2870         bio = blk_queue_bounce(bio, q);
2871         if (bio_may_exceed_limits(bio, &q->limits))
2872                 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2873
2874         if (!bio_integrity_prep(bio))
2875                 return;
2876
2877         bio_set_ioprio(bio);
2878
2879         rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2880         if (!rq) {
2881                 if (!bio)
2882                         return;
2883                 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2884                 if (unlikely(!rq))
2885                         return;
2886         }
2887
2888         trace_block_getrq(bio);
2889
2890         rq_qos_track(q, rq, bio);
2891
2892         blk_mq_bio_to_request(rq, bio, nr_segs);
2893
2894         ret = blk_crypto_init_request(rq);
2895         if (ret != BLK_STS_OK) {
2896                 bio->bi_status = ret;
2897                 bio_endio(bio);
2898                 blk_mq_free_request(rq);
2899                 return;
2900         }
2901
2902         if (op_is_flush(bio->bi_opf)) {
2903                 blk_insert_flush(rq);
2904                 return;
2905         }
2906
2907         if (plug)
2908                 blk_add_rq_to_plug(plug, rq);
2909         else if ((rq->rq_flags & RQF_ELV) ||
2910                  (rq->mq_hctx->dispatch_busy &&
2911                   (q->nr_hw_queues == 1 || !is_sync)))
2912                 blk_mq_sched_insert_request(rq, false, true, true);
2913         else
2914                 blk_mq_run_dispatch_ops(rq->q,
2915                                 blk_mq_try_issue_directly(rq->mq_hctx, rq));
2916 }
2917
2918 #ifdef CONFIG_BLK_MQ_STACKING
2919 /**
2920  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2921  * @rq: the request being queued
2922  */
2923 blk_status_t blk_insert_cloned_request(struct request *rq)
2924 {
2925         struct request_queue *q = rq->q;
2926         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2927         blk_status_t ret;
2928
2929         if (blk_rq_sectors(rq) > max_sectors) {
2930                 /*
2931                  * SCSI device does not have a good way to return if
2932                  * Write Same/Zero is actually supported. If a device rejects
2933                  * a non-read/write command (discard, write same,etc.) the
2934                  * low-level device driver will set the relevant queue limit to
2935                  * 0 to prevent blk-lib from issuing more of the offending
2936                  * operations. Commands queued prior to the queue limit being
2937                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
2938                  * errors being propagated to upper layers.
2939                  */
2940                 if (max_sectors == 0)
2941                         return BLK_STS_NOTSUPP;
2942
2943                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
2944                         __func__, blk_rq_sectors(rq), max_sectors);
2945                 return BLK_STS_IOERR;
2946         }
2947
2948         /*
2949          * The queue settings related to segment counting may differ from the
2950          * original queue.
2951          */
2952         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
2953         if (rq->nr_phys_segments > queue_max_segments(q)) {
2954                 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
2955                         __func__, rq->nr_phys_segments, queue_max_segments(q));
2956                 return BLK_STS_IOERR;
2957         }
2958
2959         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
2960                 return BLK_STS_IOERR;
2961
2962         if (blk_crypto_insert_cloned_request(rq))
2963                 return BLK_STS_IOERR;
2964
2965         blk_account_io_start(rq);
2966
2967         /*
2968          * Since we have a scheduler attached on the top device,
2969          * bypass a potential scheduler on the bottom device for
2970          * insert.
2971          */
2972         blk_mq_run_dispatch_ops(q,
2973                         ret = blk_mq_request_issue_directly(rq, true));
2974         if (ret)
2975                 blk_account_io_done(rq, ktime_get_ns());
2976         return ret;
2977 }
2978 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2979
2980 /**
2981  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2982  * @rq: the clone request to be cleaned up
2983  *
2984  * Description:
2985  *     Free all bios in @rq for a cloned request.
2986  */
2987 void blk_rq_unprep_clone(struct request *rq)
2988 {
2989         struct bio *bio;
2990
2991         while ((bio = rq->bio) != NULL) {
2992                 rq->bio = bio->bi_next;
2993
2994                 bio_put(bio);
2995         }
2996 }
2997 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2998
2999 /**
3000  * blk_rq_prep_clone - Helper function to setup clone request
3001  * @rq: the request to be setup
3002  * @rq_src: original request to be cloned
3003  * @bs: bio_set that bios for clone are allocated from
3004  * @gfp_mask: memory allocation mask for bio
3005  * @bio_ctr: setup function to be called for each clone bio.
3006  *           Returns %0 for success, non %0 for failure.
3007  * @data: private data to be passed to @bio_ctr
3008  *
3009  * Description:
3010  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3011  *     Also, pages which the original bios are pointing to are not copied
3012  *     and the cloned bios just point same pages.
3013  *     So cloned bios must be completed before original bios, which means
3014  *     the caller must complete @rq before @rq_src.
3015  */
3016 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3017                       struct bio_set *bs, gfp_t gfp_mask,
3018                       int (*bio_ctr)(struct bio *, struct bio *, void *),
3019                       void *data)
3020 {
3021         struct bio *bio, *bio_src;
3022
3023         if (!bs)
3024                 bs = &fs_bio_set;
3025
3026         __rq_for_each_bio(bio_src, rq_src) {
3027                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3028                                       bs);
3029                 if (!bio)
3030                         goto free_and_out;
3031
3032                 if (bio_ctr && bio_ctr(bio, bio_src, data))
3033                         goto free_and_out;
3034
3035                 if (rq->bio) {
3036                         rq->biotail->bi_next = bio;
3037                         rq->biotail = bio;
3038                 } else {
3039                         rq->bio = rq->biotail = bio;
3040                 }
3041                 bio = NULL;
3042         }
3043
3044         /* Copy attributes of the original request to the clone request. */
3045         rq->__sector = blk_rq_pos(rq_src);
3046         rq->__data_len = blk_rq_bytes(rq_src);
3047         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3048                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3049                 rq->special_vec = rq_src->special_vec;
3050         }
3051         rq->nr_phys_segments = rq_src->nr_phys_segments;
3052         rq->ioprio = rq_src->ioprio;
3053
3054         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3055                 goto free_and_out;
3056
3057         return 0;
3058
3059 free_and_out:
3060         if (bio)
3061                 bio_put(bio);
3062         blk_rq_unprep_clone(rq);
3063
3064         return -ENOMEM;
3065 }
3066 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3067 #endif /* CONFIG_BLK_MQ_STACKING */
3068
3069 /*
3070  * Steal bios from a request and add them to a bio list.
3071  * The request must not have been partially completed before.
3072  */
3073 void blk_steal_bios(struct bio_list *list, struct request *rq)
3074 {
3075         if (rq->bio) {
3076                 if (list->tail)
3077                         list->tail->bi_next = rq->bio;
3078                 else
3079                         list->head = rq->bio;
3080                 list->tail = rq->biotail;
3081
3082                 rq->bio = NULL;
3083                 rq->biotail = NULL;
3084         }
3085
3086         rq->__data_len = 0;
3087 }
3088 EXPORT_SYMBOL_GPL(blk_steal_bios);
3089
3090 static size_t order_to_size(unsigned int order)
3091 {
3092         return (size_t)PAGE_SIZE << order;
3093 }
3094
3095 /* called before freeing request pool in @tags */
3096 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3097                                     struct blk_mq_tags *tags)
3098 {
3099         struct page *page;
3100         unsigned long flags;
3101
3102         /* There is no need to clear a driver tags own mapping */
3103         if (drv_tags == tags)
3104                 return;
3105
3106         list_for_each_entry(page, &tags->page_list, lru) {
3107                 unsigned long start = (unsigned long)page_address(page);
3108                 unsigned long end = start + order_to_size(page->private);
3109                 int i;
3110
3111                 for (i = 0; i < drv_tags->nr_tags; i++) {
3112                         struct request *rq = drv_tags->rqs[i];
3113                         unsigned long rq_addr = (unsigned long)rq;
3114
3115                         if (rq_addr >= start && rq_addr < end) {
3116                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
3117                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3118                         }
3119                 }
3120         }
3121
3122         /*
3123          * Wait until all pending iteration is done.
3124          *
3125          * Request reference is cleared and it is guaranteed to be observed
3126          * after the ->lock is released.
3127          */
3128         spin_lock_irqsave(&drv_tags->lock, flags);
3129         spin_unlock_irqrestore(&drv_tags->lock, flags);
3130 }
3131
3132 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3133                      unsigned int hctx_idx)
3134 {
3135         struct blk_mq_tags *drv_tags;
3136         struct page *page;
3137
3138         if (list_empty(&tags->page_list))
3139                 return;
3140
3141         if (blk_mq_is_shared_tags(set->flags))
3142                 drv_tags = set->shared_tags;
3143         else
3144                 drv_tags = set->tags[hctx_idx];
3145
3146         if (tags->static_rqs && set->ops->exit_request) {
3147                 int i;
3148
3149                 for (i = 0; i < tags->nr_tags; i++) {
3150                         struct request *rq = tags->static_rqs[i];
3151
3152                         if (!rq)
3153                                 continue;
3154                         set->ops->exit_request(set, rq, hctx_idx);
3155                         tags->static_rqs[i] = NULL;
3156                 }
3157         }
3158
3159         blk_mq_clear_rq_mapping(drv_tags, tags);
3160
3161         while (!list_empty(&tags->page_list)) {
3162                 page = list_first_entry(&tags->page_list, struct page, lru);
3163                 list_del_init(&page->lru);
3164                 /*
3165                  * Remove kmemleak object previously allocated in
3166                  * blk_mq_alloc_rqs().
3167                  */
3168                 kmemleak_free(page_address(page));
3169                 __free_pages(page, page->private);
3170         }
3171 }
3172
3173 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3174 {
3175         kfree(tags->rqs);
3176         tags->rqs = NULL;
3177         kfree(tags->static_rqs);
3178         tags->static_rqs = NULL;
3179
3180         blk_mq_free_tags(tags);
3181 }
3182
3183 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3184                 unsigned int hctx_idx)
3185 {
3186         int i;
3187
3188         for (i = 0; i < set->nr_maps; i++) {
3189                 unsigned int start = set->map[i].queue_offset;
3190                 unsigned int end = start + set->map[i].nr_queues;
3191
3192                 if (hctx_idx >= start && hctx_idx < end)
3193                         break;
3194         }
3195
3196         if (i >= set->nr_maps)
3197                 i = HCTX_TYPE_DEFAULT;
3198
3199         return i;
3200 }
3201
3202 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3203                 unsigned int hctx_idx)
3204 {
3205         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3206
3207         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3208 }
3209
3210 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3211                                                unsigned int hctx_idx,
3212                                                unsigned int nr_tags,
3213                                                unsigned int reserved_tags)
3214 {
3215         int node = blk_mq_get_hctx_node(set, hctx_idx);
3216         struct blk_mq_tags *tags;
3217
3218         if (node == NUMA_NO_NODE)
3219                 node = set->numa_node;
3220
3221         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3222                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3223         if (!tags)
3224                 return NULL;
3225
3226         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3227                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3228                                  node);
3229         if (!tags->rqs) {
3230                 blk_mq_free_tags(tags);
3231                 return NULL;
3232         }
3233
3234         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3235                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3236                                         node);
3237         if (!tags->static_rqs) {
3238                 kfree(tags->rqs);
3239                 blk_mq_free_tags(tags);
3240                 return NULL;
3241         }
3242
3243         return tags;
3244 }
3245
3246 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3247                                unsigned int hctx_idx, int node)
3248 {
3249         int ret;
3250
3251         if (set->ops->init_request) {
3252                 ret = set->ops->init_request(set, rq, hctx_idx, node);
3253                 if (ret)
3254                         return ret;
3255         }
3256
3257         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3258         return 0;
3259 }
3260
3261 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3262                             struct blk_mq_tags *tags,
3263                             unsigned int hctx_idx, unsigned int depth)
3264 {
3265         unsigned int i, j, entries_per_page, max_order = 4;
3266         int node = blk_mq_get_hctx_node(set, hctx_idx);
3267         size_t rq_size, left;
3268
3269         if (node == NUMA_NO_NODE)
3270                 node = set->numa_node;
3271
3272         INIT_LIST_HEAD(&tags->page_list);
3273
3274         /*
3275          * rq_size is the size of the request plus driver payload, rounded
3276          * to the cacheline size
3277          */
3278         rq_size = round_up(sizeof(struct request) + set->cmd_size,
3279                                 cache_line_size());
3280         left = rq_size * depth;
3281
3282         for (i = 0; i < depth; ) {
3283                 int this_order = max_order;
3284                 struct page *page;
3285                 int to_do;
3286                 void *p;
3287
3288                 while (this_order && left < order_to_size(this_order - 1))
3289                         this_order--;
3290
3291                 do {
3292                         page = alloc_pages_node(node,
3293                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3294                                 this_order);
3295                         if (page)
3296                                 break;
3297                         if (!this_order--)
3298                                 break;
3299                         if (order_to_size(this_order) < rq_size)
3300                                 break;
3301                 } while (1);
3302
3303                 if (!page)
3304                         goto fail;
3305
3306                 page->private = this_order;
3307                 list_add_tail(&page->lru, &tags->page_list);
3308
3309                 p = page_address(page);
3310                 /*
3311                  * Allow kmemleak to scan these pages as they contain pointers
3312                  * to additional allocations like via ops->init_request().
3313                  */
3314                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3315                 entries_per_page = order_to_size(this_order) / rq_size;
3316                 to_do = min(entries_per_page, depth - i);
3317                 left -= to_do * rq_size;
3318                 for (j = 0; j < to_do; j++) {
3319                         struct request *rq = p;
3320
3321                         tags->static_rqs[i] = rq;
3322                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3323                                 tags->static_rqs[i] = NULL;
3324                                 goto fail;
3325                         }
3326
3327                         p += rq_size;
3328                         i++;
3329                 }
3330         }
3331         return 0;
3332
3333 fail:
3334         blk_mq_free_rqs(set, tags, hctx_idx);
3335         return -ENOMEM;
3336 }
3337
3338 struct rq_iter_data {
3339         struct blk_mq_hw_ctx *hctx;
3340         bool has_rq;
3341 };
3342
3343 static bool blk_mq_has_request(struct request *rq, void *data)
3344 {
3345         struct rq_iter_data *iter_data = data;
3346
3347         if (rq->mq_hctx != iter_data->hctx)
3348                 return true;
3349         iter_data->has_rq = true;
3350         return false;
3351 }
3352
3353 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3354 {
3355         struct blk_mq_tags *tags = hctx->sched_tags ?
3356                         hctx->sched_tags : hctx->tags;
3357         struct rq_iter_data data = {
3358                 .hctx   = hctx,
3359         };
3360
3361         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3362         return data.has_rq;
3363 }
3364
3365 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3366                 struct blk_mq_hw_ctx *hctx)
3367 {
3368         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3369                 return false;
3370         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3371                 return false;
3372         return true;
3373 }
3374
3375 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3376 {
3377         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3378                         struct blk_mq_hw_ctx, cpuhp_online);
3379
3380         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3381             !blk_mq_last_cpu_in_hctx(cpu, hctx))
3382                 return 0;
3383
3384         /*
3385          * Prevent new request from being allocated on the current hctx.
3386          *
3387          * The smp_mb__after_atomic() Pairs with the implied barrier in
3388          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3389          * seen once we return from the tag allocator.
3390          */
3391         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3392         smp_mb__after_atomic();
3393
3394         /*
3395          * Try to grab a reference to the queue and wait for any outstanding
3396          * requests.  If we could not grab a reference the queue has been
3397          * frozen and there are no requests.
3398          */
3399         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3400                 while (blk_mq_hctx_has_requests(hctx))
3401                         msleep(5);
3402                 percpu_ref_put(&hctx->queue->q_usage_counter);
3403         }
3404
3405         return 0;
3406 }
3407
3408 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3409 {
3410         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3411                         struct blk_mq_hw_ctx, cpuhp_online);
3412
3413         if (cpumask_test_cpu(cpu, hctx->cpumask))
3414                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3415         return 0;
3416 }
3417
3418 /*
3419  * 'cpu' is going away. splice any existing rq_list entries from this
3420  * software queue to the hw queue dispatch list, and ensure that it
3421  * gets run.
3422  */
3423 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3424 {
3425         struct blk_mq_hw_ctx *hctx;
3426         struct blk_mq_ctx *ctx;
3427         LIST_HEAD(tmp);
3428         enum hctx_type type;
3429
3430         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3431         if (!cpumask_test_cpu(cpu, hctx->cpumask))
3432                 return 0;
3433
3434         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3435         type = hctx->type;
3436
3437         spin_lock(&ctx->lock);
3438         if (!list_empty(&ctx->rq_lists[type])) {
3439                 list_splice_init(&ctx->rq_lists[type], &tmp);
3440                 blk_mq_hctx_clear_pending(hctx, ctx);
3441         }
3442         spin_unlock(&ctx->lock);
3443
3444         if (list_empty(&tmp))
3445                 return 0;
3446
3447         spin_lock(&hctx->lock);
3448         list_splice_tail_init(&tmp, &hctx->dispatch);
3449         spin_unlock(&hctx->lock);
3450
3451         blk_mq_run_hw_queue(hctx, true);
3452         return 0;
3453 }
3454
3455 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3456 {
3457         if (!(hctx->flags & BLK_MQ_F_STACKING))
3458                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3459                                                     &hctx->cpuhp_online);
3460         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3461                                             &hctx->cpuhp_dead);
3462 }
3463
3464 /*
3465  * Before freeing hw queue, clearing the flush request reference in
3466  * tags->rqs[] for avoiding potential UAF.
3467  */
3468 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3469                 unsigned int queue_depth, struct request *flush_rq)
3470 {
3471         int i;
3472         unsigned long flags;
3473
3474         /* The hw queue may not be mapped yet */
3475         if (!tags)
3476                 return;
3477
3478         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3479
3480         for (i = 0; i < queue_depth; i++)
3481                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3482
3483         /*
3484          * Wait until all pending iteration is done.
3485          *
3486          * Request reference is cleared and it is guaranteed to be observed
3487          * after the ->lock is released.
3488          */
3489         spin_lock_irqsave(&tags->lock, flags);
3490         spin_unlock_irqrestore(&tags->lock, flags);
3491 }
3492
3493 /* hctx->ctxs will be freed in queue's release handler */
3494 static void blk_mq_exit_hctx(struct request_queue *q,
3495                 struct blk_mq_tag_set *set,
3496                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3497 {
3498         struct request *flush_rq = hctx->fq->flush_rq;
3499
3500         if (blk_mq_hw_queue_mapped(hctx))
3501                 blk_mq_tag_idle(hctx);
3502
3503         if (blk_queue_init_done(q))
3504                 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3505                                 set->queue_depth, flush_rq);
3506         if (set->ops->exit_request)
3507                 set->ops->exit_request(set, flush_rq, hctx_idx);
3508
3509         if (set->ops->exit_hctx)
3510                 set->ops->exit_hctx(hctx, hctx_idx);
3511
3512         blk_mq_remove_cpuhp(hctx);
3513
3514         xa_erase(&q->hctx_table, hctx_idx);
3515
3516         spin_lock(&q->unused_hctx_lock);
3517         list_add(&hctx->hctx_list, &q->unused_hctx_list);
3518         spin_unlock(&q->unused_hctx_lock);
3519 }
3520
3521 static void blk_mq_exit_hw_queues(struct request_queue *q,
3522                 struct blk_mq_tag_set *set, int nr_queue)
3523 {
3524         struct blk_mq_hw_ctx *hctx;
3525         unsigned long i;
3526
3527         queue_for_each_hw_ctx(q, hctx, i) {
3528                 if (i == nr_queue)
3529                         break;
3530                 blk_mq_exit_hctx(q, set, hctx, i);
3531         }
3532 }
3533
3534 static int blk_mq_init_hctx(struct request_queue *q,
3535                 struct blk_mq_tag_set *set,
3536                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3537 {
3538         hctx->queue_num = hctx_idx;
3539
3540         if (!(hctx->flags & BLK_MQ_F_STACKING))
3541                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3542                                 &hctx->cpuhp_online);
3543         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3544
3545         hctx->tags = set->tags[hctx_idx];
3546
3547         if (set->ops->init_hctx &&
3548             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3549                 goto unregister_cpu_notifier;
3550
3551         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3552                                 hctx->numa_node))
3553                 goto exit_hctx;
3554
3555         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3556                 goto exit_flush_rq;
3557
3558         return 0;
3559
3560  exit_flush_rq:
3561         if (set->ops->exit_request)
3562                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3563  exit_hctx:
3564         if (set->ops->exit_hctx)
3565                 set->ops->exit_hctx(hctx, hctx_idx);
3566  unregister_cpu_notifier:
3567         blk_mq_remove_cpuhp(hctx);
3568         return -1;
3569 }
3570
3571 static struct blk_mq_hw_ctx *
3572 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3573                 int node)
3574 {
3575         struct blk_mq_hw_ctx *hctx;
3576         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3577
3578         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3579         if (!hctx)
3580                 goto fail_alloc_hctx;
3581
3582         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3583                 goto free_hctx;
3584
3585         atomic_set(&hctx->nr_active, 0);
3586         if (node == NUMA_NO_NODE)
3587                 node = set->numa_node;
3588         hctx->numa_node = node;
3589
3590         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3591         spin_lock_init(&hctx->lock);
3592         INIT_LIST_HEAD(&hctx->dispatch);
3593         hctx->queue = q;
3594         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3595
3596         INIT_LIST_HEAD(&hctx->hctx_list);
3597
3598         /*
3599          * Allocate space for all possible cpus to avoid allocation at
3600          * runtime
3601          */
3602         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3603                         gfp, node);
3604         if (!hctx->ctxs)
3605                 goto free_cpumask;
3606
3607         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3608                                 gfp, node, false, false))
3609                 goto free_ctxs;
3610         hctx->nr_ctx = 0;
3611
3612         spin_lock_init(&hctx->dispatch_wait_lock);
3613         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3614         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3615
3616         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3617         if (!hctx->fq)
3618                 goto free_bitmap;
3619
3620         blk_mq_hctx_kobj_init(hctx);
3621
3622         return hctx;
3623
3624  free_bitmap:
3625         sbitmap_free(&hctx->ctx_map);
3626  free_ctxs:
3627         kfree(hctx->ctxs);
3628  free_cpumask:
3629         free_cpumask_var(hctx->cpumask);
3630  free_hctx:
3631         kfree(hctx);
3632  fail_alloc_hctx:
3633         return NULL;
3634 }
3635
3636 static void blk_mq_init_cpu_queues(struct request_queue *q,
3637                                    unsigned int nr_hw_queues)
3638 {
3639         struct blk_mq_tag_set *set = q->tag_set;
3640         unsigned int i, j;
3641
3642         for_each_possible_cpu(i) {
3643                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3644                 struct blk_mq_hw_ctx *hctx;
3645                 int k;
3646
3647                 __ctx->cpu = i;
3648                 spin_lock_init(&__ctx->lock);
3649                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3650                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3651
3652                 __ctx->queue = q;
3653
3654                 /*
3655                  * Set local node, IFF we have more than one hw queue. If
3656                  * not, we remain on the home node of the device
3657                  */
3658                 for (j = 0; j < set->nr_maps; j++) {
3659                         hctx = blk_mq_map_queue_type(q, j, i);
3660                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3661                                 hctx->numa_node = cpu_to_node(i);
3662                 }
3663         }
3664 }
3665
3666 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3667                                              unsigned int hctx_idx,
3668                                              unsigned int depth)
3669 {
3670         struct blk_mq_tags *tags;
3671         int ret;
3672
3673         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3674         if (!tags)
3675                 return NULL;
3676
3677         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3678         if (ret) {
3679                 blk_mq_free_rq_map(tags);
3680                 return NULL;
3681         }
3682
3683         return tags;
3684 }
3685
3686 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3687                                        int hctx_idx)
3688 {
3689         if (blk_mq_is_shared_tags(set->flags)) {
3690                 set->tags[hctx_idx] = set->shared_tags;
3691
3692                 return true;
3693         }
3694
3695         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3696                                                        set->queue_depth);
3697
3698         return set->tags[hctx_idx];
3699 }
3700
3701 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3702                              struct blk_mq_tags *tags,
3703                              unsigned int hctx_idx)
3704 {
3705         if (tags) {
3706                 blk_mq_free_rqs(set, tags, hctx_idx);
3707                 blk_mq_free_rq_map(tags);
3708         }
3709 }
3710
3711 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3712                                       unsigned int hctx_idx)
3713 {
3714         if (!blk_mq_is_shared_tags(set->flags))
3715                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3716
3717         set->tags[hctx_idx] = NULL;
3718 }
3719
3720 static void blk_mq_map_swqueue(struct request_queue *q)
3721 {
3722         unsigned int j, hctx_idx;
3723         unsigned long i;
3724         struct blk_mq_hw_ctx *hctx;
3725         struct blk_mq_ctx *ctx;
3726         struct blk_mq_tag_set *set = q->tag_set;
3727
3728         queue_for_each_hw_ctx(q, hctx, i) {
3729                 cpumask_clear(hctx->cpumask);
3730                 hctx->nr_ctx = 0;
3731                 hctx->dispatch_from = NULL;
3732         }
3733
3734         /*
3735          * Map software to hardware queues.
3736          *
3737          * If the cpu isn't present, the cpu is mapped to first hctx.
3738          */
3739         for_each_possible_cpu(i) {
3740
3741                 ctx = per_cpu_ptr(q->queue_ctx, i);
3742                 for (j = 0; j < set->nr_maps; j++) {
3743                         if (!set->map[j].nr_queues) {
3744                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3745                                                 HCTX_TYPE_DEFAULT, i);
3746                                 continue;
3747                         }
3748                         hctx_idx = set->map[j].mq_map[i];
3749                         /* unmapped hw queue can be remapped after CPU topo changed */
3750                         if (!set->tags[hctx_idx] &&
3751                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3752                                 /*
3753                                  * If tags initialization fail for some hctx,
3754                                  * that hctx won't be brought online.  In this
3755                                  * case, remap the current ctx to hctx[0] which
3756                                  * is guaranteed to always have tags allocated
3757                                  */
3758                                 set->map[j].mq_map[i] = 0;
3759                         }
3760
3761                         hctx = blk_mq_map_queue_type(q, j, i);
3762                         ctx->hctxs[j] = hctx;
3763                         /*
3764                          * If the CPU is already set in the mask, then we've
3765                          * mapped this one already. This can happen if
3766                          * devices share queues across queue maps.
3767                          */
3768                         if (cpumask_test_cpu(i, hctx->cpumask))
3769                                 continue;
3770
3771                         cpumask_set_cpu(i, hctx->cpumask);
3772                         hctx->type = j;
3773                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
3774                         hctx->ctxs[hctx->nr_ctx++] = ctx;
3775
3776                         /*
3777                          * If the nr_ctx type overflows, we have exceeded the
3778                          * amount of sw queues we can support.
3779                          */
3780                         BUG_ON(!hctx->nr_ctx);
3781                 }
3782
3783                 for (; j < HCTX_MAX_TYPES; j++)
3784                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
3785                                         HCTX_TYPE_DEFAULT, i);
3786         }
3787
3788         queue_for_each_hw_ctx(q, hctx, i) {
3789                 /*
3790                  * If no software queues are mapped to this hardware queue,
3791                  * disable it and free the request entries.
3792                  */
3793                 if (!hctx->nr_ctx) {
3794                         /* Never unmap queue 0.  We need it as a
3795                          * fallback in case of a new remap fails
3796                          * allocation
3797                          */
3798                         if (i)
3799                                 __blk_mq_free_map_and_rqs(set, i);
3800
3801                         hctx->tags = NULL;
3802                         continue;
3803                 }
3804
3805                 hctx->tags = set->tags[i];
3806                 WARN_ON(!hctx->tags);
3807
3808                 /*
3809                  * Set the map size to the number of mapped software queues.
3810                  * This is more accurate and more efficient than looping
3811                  * over all possibly mapped software queues.
3812                  */
3813                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3814
3815                 /*
3816                  * Initialize batch roundrobin counts
3817                  */
3818                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3819                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3820         }
3821 }
3822
3823 /*
3824  * Caller needs to ensure that we're either frozen/quiesced, or that
3825  * the queue isn't live yet.
3826  */
3827 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3828 {
3829         struct blk_mq_hw_ctx *hctx;
3830         unsigned long i;
3831
3832         queue_for_each_hw_ctx(q, hctx, i) {
3833                 if (shared) {
3834                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3835                 } else {
3836                         blk_mq_tag_idle(hctx);
3837                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3838                 }
3839         }
3840 }
3841
3842 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3843                                          bool shared)
3844 {
3845         struct request_queue *q;
3846
3847         lockdep_assert_held(&set->tag_list_lock);
3848
3849         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3850                 blk_mq_freeze_queue(q);
3851                 queue_set_hctx_shared(q, shared);
3852                 blk_mq_unfreeze_queue(q);
3853         }
3854 }
3855
3856 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3857 {
3858         struct blk_mq_tag_set *set = q->tag_set;
3859
3860         mutex_lock(&set->tag_list_lock);
3861         list_del(&q->tag_set_list);
3862         if (list_is_singular(&set->tag_list)) {
3863                 /* just transitioned to unshared */
3864                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3865                 /* update existing queue */
3866                 blk_mq_update_tag_set_shared(set, false);
3867         }
3868         mutex_unlock(&set->tag_list_lock);
3869         INIT_LIST_HEAD(&q->tag_set_list);
3870 }
3871
3872 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3873                                      struct request_queue *q)
3874 {
3875         mutex_lock(&set->tag_list_lock);
3876
3877         /*
3878          * Check to see if we're transitioning to shared (from 1 to 2 queues).
3879          */
3880         if (!list_empty(&set->tag_list) &&
3881             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3882                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3883                 /* update existing queue */
3884                 blk_mq_update_tag_set_shared(set, true);
3885         }
3886         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3887                 queue_set_hctx_shared(q, true);
3888         list_add_tail(&q->tag_set_list, &set->tag_list);
3889
3890         mutex_unlock(&set->tag_list_lock);
3891 }
3892
3893 /* All allocations will be freed in release handler of q->mq_kobj */
3894 static int blk_mq_alloc_ctxs(struct request_queue *q)
3895 {
3896         struct blk_mq_ctxs *ctxs;
3897         int cpu;
3898
3899         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3900         if (!ctxs)
3901                 return -ENOMEM;
3902
3903         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3904         if (!ctxs->queue_ctx)
3905                 goto fail;
3906
3907         for_each_possible_cpu(cpu) {
3908                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3909                 ctx->ctxs = ctxs;
3910         }
3911
3912         q->mq_kobj = &ctxs->kobj;
3913         q->queue_ctx = ctxs->queue_ctx;
3914
3915         return 0;
3916  fail:
3917         kfree(ctxs);
3918         return -ENOMEM;
3919 }
3920
3921 /*
3922  * It is the actual release handler for mq, but we do it from
3923  * request queue's release handler for avoiding use-after-free
3924  * and headache because q->mq_kobj shouldn't have been introduced,
3925  * but we can't group ctx/kctx kobj without it.
3926  */
3927 void blk_mq_release(struct request_queue *q)
3928 {
3929         struct blk_mq_hw_ctx *hctx, *next;
3930         unsigned long i;
3931
3932         queue_for_each_hw_ctx(q, hctx, i)
3933                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3934
3935         /* all hctx are in .unused_hctx_list now */
3936         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3937                 list_del_init(&hctx->hctx_list);
3938                 kobject_put(&hctx->kobj);
3939         }
3940
3941         xa_destroy(&q->hctx_table);
3942
3943         /*
3944          * release .mq_kobj and sw queue's kobject now because
3945          * both share lifetime with request queue.
3946          */
3947         blk_mq_sysfs_deinit(q);
3948 }
3949
3950 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3951                 void *queuedata)
3952 {
3953         struct request_queue *q;
3954         int ret;
3955
3956         q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
3957         if (!q)
3958                 return ERR_PTR(-ENOMEM);
3959         q->queuedata = queuedata;
3960         ret = blk_mq_init_allocated_queue(set, q);
3961         if (ret) {
3962                 blk_put_queue(q);
3963                 return ERR_PTR(ret);
3964         }
3965         return q;
3966 }
3967
3968 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3969 {
3970         return blk_mq_init_queue_data(set, NULL);
3971 }
3972 EXPORT_SYMBOL(blk_mq_init_queue);
3973
3974 /**
3975  * blk_mq_destroy_queue - shutdown a request queue
3976  * @q: request queue to shutdown
3977  *
3978  * This shuts down a request queue allocated by blk_mq_init_queue() and drops
3979  * the initial reference.  All future requests will failed with -ENODEV.
3980  *
3981  * Context: can sleep
3982  */
3983 void blk_mq_destroy_queue(struct request_queue *q)
3984 {
3985         WARN_ON_ONCE(!queue_is_mq(q));
3986         WARN_ON_ONCE(blk_queue_registered(q));
3987
3988         might_sleep();
3989
3990         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
3991         blk_queue_start_drain(q);
3992         blk_freeze_queue(q);
3993
3994         blk_sync_queue(q);
3995         blk_mq_cancel_work_sync(q);
3996         blk_mq_exit_queue(q);
3997
3998         /* @q is and will stay empty, shutdown and put */
3999         blk_put_queue(q);
4000 }
4001 EXPORT_SYMBOL(blk_mq_destroy_queue);
4002
4003 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4004                 struct lock_class_key *lkclass)
4005 {
4006         struct request_queue *q;
4007         struct gendisk *disk;
4008
4009         q = blk_mq_init_queue_data(set, queuedata);
4010         if (IS_ERR(q))
4011                 return ERR_CAST(q);
4012
4013         disk = __alloc_disk_node(q, set->numa_node, lkclass);
4014         if (!disk) {
4015                 blk_mq_destroy_queue(q);
4016                 return ERR_PTR(-ENOMEM);
4017         }
4018         set_bit(GD_OWNS_QUEUE, &disk->state);
4019         return disk;
4020 }
4021 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4022
4023 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4024                 struct lock_class_key *lkclass)
4025 {
4026         if (!blk_get_queue(q))
4027                 return NULL;
4028         return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4029 }
4030 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4031
4032 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4033                 struct blk_mq_tag_set *set, struct request_queue *q,
4034                 int hctx_idx, int node)
4035 {
4036         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4037
4038         /* reuse dead hctx first */
4039         spin_lock(&q->unused_hctx_lock);
4040         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4041                 if (tmp->numa_node == node) {
4042                         hctx = tmp;
4043                         break;
4044                 }
4045         }
4046         if (hctx)
4047                 list_del_init(&hctx->hctx_list);
4048         spin_unlock(&q->unused_hctx_lock);
4049
4050         if (!hctx)
4051                 hctx = blk_mq_alloc_hctx(q, set, node);
4052         if (!hctx)
4053                 goto fail;
4054
4055         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4056                 goto free_hctx;
4057
4058         return hctx;
4059
4060  free_hctx:
4061         kobject_put(&hctx->kobj);
4062  fail:
4063         return NULL;
4064 }
4065
4066 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4067                                                 struct request_queue *q)
4068 {
4069         struct blk_mq_hw_ctx *hctx;
4070         unsigned long i, j;
4071
4072         /* protect against switching io scheduler  */
4073         mutex_lock(&q->sysfs_lock);
4074         for (i = 0; i < set->nr_hw_queues; i++) {
4075                 int old_node;
4076                 int node = blk_mq_get_hctx_node(set, i);
4077                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4078
4079                 if (old_hctx) {
4080                         old_node = old_hctx->numa_node;
4081                         blk_mq_exit_hctx(q, set, old_hctx, i);
4082                 }
4083
4084                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4085                         if (!old_hctx)
4086                                 break;
4087                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4088                                         node, old_node);
4089                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4090                         WARN_ON_ONCE(!hctx);
4091                 }
4092         }
4093         /*
4094          * Increasing nr_hw_queues fails. Free the newly allocated
4095          * hctxs and keep the previous q->nr_hw_queues.
4096          */
4097         if (i != set->nr_hw_queues) {
4098                 j = q->nr_hw_queues;
4099         } else {
4100                 j = i;
4101                 q->nr_hw_queues = set->nr_hw_queues;
4102         }
4103
4104         xa_for_each_start(&q->hctx_table, j, hctx, j)
4105                 blk_mq_exit_hctx(q, set, hctx, j);
4106         mutex_unlock(&q->sysfs_lock);
4107 }
4108
4109 static void blk_mq_update_poll_flag(struct request_queue *q)
4110 {
4111         struct blk_mq_tag_set *set = q->tag_set;
4112
4113         if (set->nr_maps > HCTX_TYPE_POLL &&
4114             set->map[HCTX_TYPE_POLL].nr_queues)
4115                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4116         else
4117                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4118 }
4119
4120 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4121                 struct request_queue *q)
4122 {
4123         WARN_ON_ONCE(blk_queue_has_srcu(q) !=
4124                         !!(set->flags & BLK_MQ_F_BLOCKING));
4125
4126         /* mark the queue as mq asap */
4127         q->mq_ops = set->ops;
4128
4129         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4130                                              blk_mq_poll_stats_bkt,
4131                                              BLK_MQ_POLL_STATS_BKTS, q);
4132         if (!q->poll_cb)
4133                 goto err_exit;
4134
4135         if (blk_mq_alloc_ctxs(q))
4136                 goto err_poll;
4137
4138         /* init q->mq_kobj and sw queues' kobjects */
4139         blk_mq_sysfs_init(q);
4140
4141         INIT_LIST_HEAD(&q->unused_hctx_list);
4142         spin_lock_init(&q->unused_hctx_lock);
4143
4144         xa_init(&q->hctx_table);
4145
4146         blk_mq_realloc_hw_ctxs(set, q);
4147         if (!q->nr_hw_queues)
4148                 goto err_hctxs;
4149
4150         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4151         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4152
4153         q->tag_set = set;
4154
4155         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4156         blk_mq_update_poll_flag(q);
4157
4158         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4159         INIT_LIST_HEAD(&q->requeue_list);
4160         spin_lock_init(&q->requeue_lock);
4161
4162         q->nr_requests = set->queue_depth;
4163
4164         /*
4165          * Default to classic polling
4166          */
4167         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4168
4169         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4170         blk_mq_add_queue_tag_set(set, q);
4171         blk_mq_map_swqueue(q);
4172         return 0;
4173
4174 err_hctxs:
4175         xa_destroy(&q->hctx_table);
4176         q->nr_hw_queues = 0;
4177         blk_mq_sysfs_deinit(q);
4178 err_poll:
4179         blk_stat_free_callback(q->poll_cb);
4180         q->poll_cb = NULL;
4181 err_exit:
4182         q->mq_ops = NULL;
4183         return -ENOMEM;
4184 }
4185 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4186
4187 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4188 void blk_mq_exit_queue(struct request_queue *q)
4189 {
4190         struct blk_mq_tag_set *set = q->tag_set;
4191
4192         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4193         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4194         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4195         blk_mq_del_queue_tag_set(q);
4196 }
4197
4198 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4199 {
4200         int i;
4201
4202         if (blk_mq_is_shared_tags(set->flags)) {
4203                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4204                                                 BLK_MQ_NO_HCTX_IDX,
4205                                                 set->queue_depth);
4206                 if (!set->shared_tags)
4207                         return -ENOMEM;
4208         }
4209
4210         for (i = 0; i < set->nr_hw_queues; i++) {
4211                 if (!__blk_mq_alloc_map_and_rqs(set, i))
4212                         goto out_unwind;
4213                 cond_resched();
4214         }
4215
4216         return 0;
4217
4218 out_unwind:
4219         while (--i >= 0)
4220                 __blk_mq_free_map_and_rqs(set, i);
4221
4222         if (blk_mq_is_shared_tags(set->flags)) {
4223                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4224                                         BLK_MQ_NO_HCTX_IDX);
4225         }
4226
4227         return -ENOMEM;
4228 }
4229
4230 /*
4231  * Allocate the request maps associated with this tag_set. Note that this
4232  * may reduce the depth asked for, if memory is tight. set->queue_depth
4233  * will be updated to reflect the allocated depth.
4234  */
4235 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4236 {
4237         unsigned int depth;
4238         int err;
4239
4240         depth = set->queue_depth;
4241         do {
4242                 err = __blk_mq_alloc_rq_maps(set);
4243                 if (!err)
4244                         break;
4245
4246                 set->queue_depth >>= 1;
4247                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4248                         err = -ENOMEM;
4249                         break;
4250                 }
4251         } while (set->queue_depth);
4252
4253         if (!set->queue_depth || err) {
4254                 pr_err("blk-mq: failed to allocate request map\n");
4255                 return -ENOMEM;
4256         }
4257
4258         if (depth != set->queue_depth)
4259                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4260                                                 depth, set->queue_depth);
4261
4262         return 0;
4263 }
4264
4265 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4266 {
4267         /*
4268          * blk_mq_map_queues() and multiple .map_queues() implementations
4269          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4270          * number of hardware queues.
4271          */
4272         if (set->nr_maps == 1)
4273                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4274
4275         if (set->ops->map_queues && !is_kdump_kernel()) {
4276                 int i;
4277
4278                 /*
4279                  * transport .map_queues is usually done in the following
4280                  * way:
4281                  *
4282                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4283                  *      mask = get_cpu_mask(queue)
4284                  *      for_each_cpu(cpu, mask)
4285                  *              set->map[x].mq_map[cpu] = queue;
4286                  * }
4287                  *
4288                  * When we need to remap, the table has to be cleared for
4289                  * killing stale mapping since one CPU may not be mapped
4290                  * to any hw queue.
4291                  */
4292                 for (i = 0; i < set->nr_maps; i++)
4293                         blk_mq_clear_mq_map(&set->map[i]);
4294
4295                 set->ops->map_queues(set);
4296         } else {
4297                 BUG_ON(set->nr_maps > 1);
4298                 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4299         }
4300 }
4301
4302 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4303                                   int cur_nr_hw_queues, int new_nr_hw_queues)
4304 {
4305         struct blk_mq_tags **new_tags;
4306
4307         if (cur_nr_hw_queues >= new_nr_hw_queues)
4308                 return 0;
4309
4310         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4311                                 GFP_KERNEL, set->numa_node);
4312         if (!new_tags)
4313                 return -ENOMEM;
4314
4315         if (set->tags)
4316                 memcpy(new_tags, set->tags, cur_nr_hw_queues *
4317                        sizeof(*set->tags));
4318         kfree(set->tags);
4319         set->tags = new_tags;
4320         set->nr_hw_queues = new_nr_hw_queues;
4321
4322         return 0;
4323 }
4324
4325 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
4326                                 int new_nr_hw_queues)
4327 {
4328         return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
4329 }
4330
4331 /*
4332  * Alloc a tag set to be associated with one or more request queues.
4333  * May fail with EINVAL for various error conditions. May adjust the
4334  * requested depth down, if it's too large. In that case, the set
4335  * value will be stored in set->queue_depth.
4336  */
4337 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4338 {
4339         int i, ret;
4340
4341         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4342
4343         if (!set->nr_hw_queues)
4344                 return -EINVAL;
4345         if (!set->queue_depth)
4346                 return -EINVAL;
4347         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4348                 return -EINVAL;
4349
4350         if (!set->ops->queue_rq)
4351                 return -EINVAL;
4352
4353         if (!set->ops->get_budget ^ !set->ops->put_budget)
4354                 return -EINVAL;
4355
4356         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4357                 pr_info("blk-mq: reduced tag depth to %u\n",
4358                         BLK_MQ_MAX_DEPTH);
4359                 set->queue_depth = BLK_MQ_MAX_DEPTH;
4360         }
4361
4362         if (!set->nr_maps)
4363                 set->nr_maps = 1;
4364         else if (set->nr_maps > HCTX_MAX_TYPES)
4365                 return -EINVAL;
4366
4367         /*
4368          * If a crashdump is active, then we are potentially in a very
4369          * memory constrained environment. Limit us to 1 queue and
4370          * 64 tags to prevent using too much memory.
4371          */
4372         if (is_kdump_kernel()) {
4373                 set->nr_hw_queues = 1;
4374                 set->nr_maps = 1;
4375                 set->queue_depth = min(64U, set->queue_depth);
4376         }
4377         /*
4378          * There is no use for more h/w queues than cpus if we just have
4379          * a single map
4380          */
4381         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4382                 set->nr_hw_queues = nr_cpu_ids;
4383
4384         if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4385                 return -ENOMEM;
4386
4387         ret = -ENOMEM;
4388         for (i = 0; i < set->nr_maps; i++) {
4389                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4390                                                   sizeof(set->map[i].mq_map[0]),
4391                                                   GFP_KERNEL, set->numa_node);
4392                 if (!set->map[i].mq_map)
4393                         goto out_free_mq_map;
4394                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4395         }
4396
4397         blk_mq_update_queue_map(set);
4398
4399         ret = blk_mq_alloc_set_map_and_rqs(set);
4400         if (ret)
4401                 goto out_free_mq_map;
4402
4403         mutex_init(&set->tag_list_lock);
4404         INIT_LIST_HEAD(&set->tag_list);
4405
4406         return 0;
4407
4408 out_free_mq_map:
4409         for (i = 0; i < set->nr_maps; i++) {
4410                 kfree(set->map[i].mq_map);
4411                 set->map[i].mq_map = NULL;
4412         }
4413         kfree(set->tags);
4414         set->tags = NULL;
4415         return ret;
4416 }
4417 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4418
4419 /* allocate and initialize a tagset for a simple single-queue device */
4420 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4421                 const struct blk_mq_ops *ops, unsigned int queue_depth,
4422                 unsigned int set_flags)
4423 {
4424         memset(set, 0, sizeof(*set));
4425         set->ops = ops;
4426         set->nr_hw_queues = 1;
4427         set->nr_maps = 1;
4428         set->queue_depth = queue_depth;
4429         set->numa_node = NUMA_NO_NODE;
4430         set->flags = set_flags;
4431         return blk_mq_alloc_tag_set(set);
4432 }
4433 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4434
4435 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4436 {
4437         int i, j;
4438
4439         for (i = 0; i < set->nr_hw_queues; i++)
4440                 __blk_mq_free_map_and_rqs(set, i);
4441
4442         if (blk_mq_is_shared_tags(set->flags)) {
4443                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4444                                         BLK_MQ_NO_HCTX_IDX);
4445         }
4446
4447         for (j = 0; j < set->nr_maps; j++) {
4448                 kfree(set->map[j].mq_map);
4449                 set->map[j].mq_map = NULL;
4450         }
4451
4452         kfree(set->tags);
4453         set->tags = NULL;
4454 }
4455 EXPORT_SYMBOL(blk_mq_free_tag_set);
4456
4457 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4458 {
4459         struct blk_mq_tag_set *set = q->tag_set;
4460         struct blk_mq_hw_ctx *hctx;
4461         int ret;
4462         unsigned long i;
4463
4464         if (!set)
4465                 return -EINVAL;
4466
4467         if (q->nr_requests == nr)
4468                 return 0;
4469
4470         blk_mq_freeze_queue(q);
4471         blk_mq_quiesce_queue(q);
4472
4473         ret = 0;
4474         queue_for_each_hw_ctx(q, hctx, i) {
4475                 if (!hctx->tags)
4476                         continue;
4477                 /*
4478                  * If we're using an MQ scheduler, just update the scheduler
4479                  * queue depth. This is similar to what the old code would do.
4480                  */
4481                 if (hctx->sched_tags) {
4482                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4483                                                       nr, true);
4484                 } else {
4485                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4486                                                       false);
4487                 }
4488                 if (ret)
4489                         break;
4490                 if (q->elevator && q->elevator->type->ops.depth_updated)
4491                         q->elevator->type->ops.depth_updated(hctx);
4492         }
4493         if (!ret) {
4494                 q->nr_requests = nr;
4495                 if (blk_mq_is_shared_tags(set->flags)) {
4496                         if (q->elevator)
4497                                 blk_mq_tag_update_sched_shared_tags(q);
4498                         else
4499                                 blk_mq_tag_resize_shared_tags(set, nr);
4500                 }
4501         }
4502
4503         blk_mq_unquiesce_queue(q);
4504         blk_mq_unfreeze_queue(q);
4505
4506         return ret;
4507 }
4508
4509 /*
4510  * request_queue and elevator_type pair.
4511  * It is just used by __blk_mq_update_nr_hw_queues to cache
4512  * the elevator_type associated with a request_queue.
4513  */
4514 struct blk_mq_qe_pair {
4515         struct list_head node;
4516         struct request_queue *q;
4517         struct elevator_type *type;
4518 };
4519
4520 /*
4521  * Cache the elevator_type in qe pair list and switch the
4522  * io scheduler to 'none'
4523  */
4524 static bool blk_mq_elv_switch_none(struct list_head *head,
4525                 struct request_queue *q)
4526 {
4527         struct blk_mq_qe_pair *qe;
4528
4529         if (!q->elevator)
4530                 return true;
4531
4532         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4533         if (!qe)
4534                 return false;
4535
4536         /* q->elevator needs protection from ->sysfs_lock */
4537         mutex_lock(&q->sysfs_lock);
4538
4539         INIT_LIST_HEAD(&qe->node);
4540         qe->q = q;
4541         qe->type = q->elevator->type;
4542         list_add(&qe->node, head);
4543
4544         /*
4545          * After elevator_switch, the previous elevator_queue will be
4546          * released by elevator_release. The reference of the io scheduler
4547          * module get by elevator_get will also be put. So we need to get
4548          * a reference of the io scheduler module here to prevent it to be
4549          * removed.
4550          */
4551         __module_get(qe->type->elevator_owner);
4552         elevator_switch(q, NULL);
4553         mutex_unlock(&q->sysfs_lock);
4554
4555         return true;
4556 }
4557
4558 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4559                                                 struct request_queue *q)
4560 {
4561         struct blk_mq_qe_pair *qe;
4562
4563         list_for_each_entry(qe, head, node)
4564                 if (qe->q == q)
4565                         return qe;
4566
4567         return NULL;
4568 }
4569
4570 static void blk_mq_elv_switch_back(struct list_head *head,
4571                                   struct request_queue *q)
4572 {
4573         struct blk_mq_qe_pair *qe;
4574         struct elevator_type *t;
4575
4576         qe = blk_lookup_qe_pair(head, q);
4577         if (!qe)
4578                 return;
4579         t = qe->type;
4580         list_del(&qe->node);
4581         kfree(qe);
4582
4583         mutex_lock(&q->sysfs_lock);
4584         elevator_switch(q, t);
4585         mutex_unlock(&q->sysfs_lock);
4586 }
4587
4588 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4589                                                         int nr_hw_queues)
4590 {
4591         struct request_queue *q;
4592         LIST_HEAD(head);
4593         int prev_nr_hw_queues;
4594
4595         lockdep_assert_held(&set->tag_list_lock);
4596
4597         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4598                 nr_hw_queues = nr_cpu_ids;
4599         if (nr_hw_queues < 1)
4600                 return;
4601         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4602                 return;
4603
4604         list_for_each_entry(q, &set->tag_list, tag_set_list)
4605                 blk_mq_freeze_queue(q);
4606         /*
4607          * Switch IO scheduler to 'none', cleaning up the data associated
4608          * with the previous scheduler. We will switch back once we are done
4609          * updating the new sw to hw queue mappings.
4610          */
4611         list_for_each_entry(q, &set->tag_list, tag_set_list)
4612                 if (!blk_mq_elv_switch_none(&head, q))
4613                         goto switch_back;
4614
4615         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4616                 blk_mq_debugfs_unregister_hctxs(q);
4617                 blk_mq_sysfs_unregister_hctxs(q);
4618         }
4619
4620         prev_nr_hw_queues = set->nr_hw_queues;
4621         if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
4622             0)
4623                 goto reregister;
4624
4625         set->nr_hw_queues = nr_hw_queues;
4626 fallback:
4627         blk_mq_update_queue_map(set);
4628         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4629                 blk_mq_realloc_hw_ctxs(set, q);
4630                 blk_mq_update_poll_flag(q);
4631                 if (q->nr_hw_queues != set->nr_hw_queues) {
4632                         int i = prev_nr_hw_queues;
4633
4634                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4635                                         nr_hw_queues, prev_nr_hw_queues);
4636                         for (; i < set->nr_hw_queues; i++)
4637                                 __blk_mq_free_map_and_rqs(set, i);
4638
4639                         set->nr_hw_queues = prev_nr_hw_queues;
4640                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4641                         goto fallback;
4642                 }
4643                 blk_mq_map_swqueue(q);
4644         }
4645
4646 reregister:
4647         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4648                 blk_mq_sysfs_register_hctxs(q);
4649                 blk_mq_debugfs_register_hctxs(q);
4650         }
4651
4652 switch_back:
4653         list_for_each_entry(q, &set->tag_list, tag_set_list)
4654                 blk_mq_elv_switch_back(&head, q);
4655
4656         list_for_each_entry(q, &set->tag_list, tag_set_list)
4657                 blk_mq_unfreeze_queue(q);
4658 }
4659
4660 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4661 {
4662         mutex_lock(&set->tag_list_lock);
4663         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4664         mutex_unlock(&set->tag_list_lock);
4665 }
4666 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4667
4668 /* Enable polling stats and return whether they were already enabled. */
4669 static bool blk_poll_stats_enable(struct request_queue *q)
4670 {
4671         if (q->poll_stat)
4672                 return true;
4673
4674         return blk_stats_alloc_enable(q);
4675 }
4676
4677 static void blk_mq_poll_stats_start(struct request_queue *q)
4678 {
4679         /*
4680          * We don't arm the callback if polling stats are not enabled or the
4681          * callback is already active.
4682          */
4683         if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4684                 return;
4685
4686         blk_stat_activate_msecs(q->poll_cb, 100);
4687 }
4688
4689 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
4690 {
4691         struct request_queue *q = cb->data;
4692         int bucket;
4693
4694         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
4695                 if (cb->stat[bucket].nr_samples)
4696                         q->poll_stat[bucket] = cb->stat[bucket];
4697         }
4698 }
4699
4700 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
4701                                        struct request *rq)
4702 {
4703         unsigned long ret = 0;
4704         int bucket;
4705
4706         /*
4707          * If stats collection isn't on, don't sleep but turn it on for
4708          * future users
4709          */
4710         if (!blk_poll_stats_enable(q))
4711                 return 0;
4712
4713         /*
4714          * As an optimistic guess, use half of the mean service time
4715          * for this type of request. We can (and should) make this smarter.
4716          * For instance, if the completion latencies are tight, we can
4717          * get closer than just half the mean. This is especially
4718          * important on devices where the completion latencies are longer
4719          * than ~10 usec. We do use the stats for the relevant IO size
4720          * if available which does lead to better estimates.
4721          */
4722         bucket = blk_mq_poll_stats_bkt(rq);
4723         if (bucket < 0)
4724                 return ret;
4725
4726         if (q->poll_stat[bucket].nr_samples)
4727                 ret = (q->poll_stat[bucket].mean + 1) / 2;
4728
4729         return ret;
4730 }
4731
4732 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4733 {
4734         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
4735         struct request *rq = blk_qc_to_rq(hctx, qc);
4736         struct hrtimer_sleeper hs;
4737         enum hrtimer_mode mode;
4738         unsigned int nsecs;
4739         ktime_t kt;
4740
4741         /*
4742          * If a request has completed on queue that uses an I/O scheduler, we
4743          * won't get back a request from blk_qc_to_rq.
4744          */
4745         if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4746                 return false;
4747
4748         /*
4749          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4750          *
4751          *  0:  use half of prev avg
4752          * >0:  use this specific value
4753          */
4754         if (q->poll_nsec > 0)
4755                 nsecs = q->poll_nsec;
4756         else
4757                 nsecs = blk_mq_poll_nsecs(q, rq);
4758
4759         if (!nsecs)
4760                 return false;
4761
4762         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4763
4764         /*
4765          * This will be replaced with the stats tracking code, using
4766          * 'avg_completion_time / 2' as the pre-sleep target.
4767          */
4768         kt = nsecs;
4769
4770         mode = HRTIMER_MODE_REL;
4771         hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4772         hrtimer_set_expires(&hs.timer, kt);
4773
4774         do {
4775                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4776                         break;
4777                 set_current_state(TASK_UNINTERRUPTIBLE);
4778                 hrtimer_sleeper_start_expires(&hs, mode);
4779                 if (hs.task)
4780                         io_schedule();
4781                 hrtimer_cancel(&hs.timer);
4782                 mode = HRTIMER_MODE_ABS;
4783         } while (hs.task && !signal_pending(current));
4784
4785         __set_current_state(TASK_RUNNING);
4786         destroy_hrtimer_on_stack(&hs.timer);
4787
4788         /*
4789          * If we sleep, have the caller restart the poll loop to reset the
4790          * state.  Like for the other success return cases, the caller is
4791          * responsible for checking if the IO completed.  If the IO isn't
4792          * complete, we'll get called again and will go straight to the busy
4793          * poll loop.
4794          */
4795         return true;
4796 }
4797
4798 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4799                                struct io_comp_batch *iob, unsigned int flags)
4800 {
4801         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
4802         long state = get_current_state();
4803         int ret;
4804
4805         do {
4806                 ret = q->mq_ops->poll(hctx, iob);
4807                 if (ret > 0) {
4808                         __set_current_state(TASK_RUNNING);
4809                         return ret;
4810                 }
4811
4812                 if (signal_pending_state(state, current))
4813                         __set_current_state(TASK_RUNNING);
4814                 if (task_is_running(current))
4815                         return 1;
4816
4817                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4818                         break;
4819                 cpu_relax();
4820         } while (!need_resched());
4821
4822         __set_current_state(TASK_RUNNING);
4823         return 0;
4824 }
4825
4826 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4827                 unsigned int flags)
4828 {
4829         if (!(flags & BLK_POLL_NOSLEEP) &&
4830             q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4831                 if (blk_mq_poll_hybrid(q, cookie))
4832                         return 1;
4833         }
4834         return blk_mq_poll_classic(q, cookie, iob, flags);
4835 }
4836
4837 unsigned int blk_mq_rq_cpu(struct request *rq)
4838 {
4839         return rq->mq_ctx->cpu;
4840 }
4841 EXPORT_SYMBOL(blk_mq_rq_cpu);
4842
4843 void blk_mq_cancel_work_sync(struct request_queue *q)
4844 {
4845         if (queue_is_mq(q)) {
4846                 struct blk_mq_hw_ctx *hctx;
4847                 unsigned long i;
4848
4849                 cancel_delayed_work_sync(&q->requeue_work);
4850
4851                 queue_for_each_hw_ctx(q, hctx, i)
4852                         cancel_delayed_work_sync(&hctx->run_work);
4853         }
4854 }
4855
4856 static int __init blk_mq_init(void)
4857 {
4858         int i;
4859
4860         for_each_possible_cpu(i)
4861                 init_llist_head(&per_cpu(blk_cpu_done, i));
4862         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4863
4864         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4865                                   "block/softirq:dead", NULL,
4866                                   blk_softirq_cpu_dead);
4867         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4868                                 blk_mq_hctx_notify_dead);
4869         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4870                                 blk_mq_hctx_notify_online,
4871                                 blk_mq_hctx_notify_offline);
4872         return 0;
4873 }
4874 subsys_initcall(blk_mq_init);