1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
13 * This handles all read/write requests to block devices
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/psi.h>
41 #include <linux/part_stat.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
49 #include "blk-mq-sched.h"
51 #include "blk-cgroup.h"
52 #include "blk-throttle.h"
54 struct dentry *blk_debugfs_root;
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63 DEFINE_IDA(blk_queue_ida);
66 * For queue allocation
68 struct kmem_cache *blk_requestq_cachep;
69 struct kmem_cache *blk_requestq_srcu_cachep;
72 * Controlling structure to kblockd
74 static struct workqueue_struct *kblockd_workqueue;
77 * blk_queue_flag_set - atomically set a queue flag
78 * @flag: flag to be set
81 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
83 set_bit(flag, &q->queue_flags);
85 EXPORT_SYMBOL(blk_queue_flag_set);
88 * blk_queue_flag_clear - atomically clear a queue flag
89 * @flag: flag to be cleared
92 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
94 clear_bit(flag, &q->queue_flags);
96 EXPORT_SYMBOL(blk_queue_flag_clear);
99 * blk_queue_flag_test_and_set - atomically test and set a queue flag
100 * @flag: flag to be set
103 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
104 * the flag was already set.
106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
108 return test_and_set_bit(flag, &q->queue_flags);
110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
112 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
113 static const char *const blk_op_name[] = {
117 REQ_OP_NAME(DISCARD),
118 REQ_OP_NAME(SECURE_ERASE),
119 REQ_OP_NAME(ZONE_RESET),
120 REQ_OP_NAME(ZONE_RESET_ALL),
121 REQ_OP_NAME(ZONE_OPEN),
122 REQ_OP_NAME(ZONE_CLOSE),
123 REQ_OP_NAME(ZONE_FINISH),
124 REQ_OP_NAME(ZONE_APPEND),
125 REQ_OP_NAME(WRITE_ZEROES),
127 REQ_OP_NAME(DRV_OUT),
132 * blk_op_str - Return string XXX in the REQ_OP_XXX.
135 * Description: Centralize block layer function to convert REQ_OP_XXX into
136 * string format. Useful in the debugging and tracing bio or request. For
137 * invalid REQ_OP_XXX it returns string "UNKNOWN".
139 inline const char *blk_op_str(enum req_op op)
141 const char *op_str = "UNKNOWN";
143 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
144 op_str = blk_op_name[op];
148 EXPORT_SYMBOL_GPL(blk_op_str);
150 static const struct {
154 [BLK_STS_OK] = { 0, "" },
155 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
156 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
157 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
158 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
159 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
160 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
161 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
162 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
163 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
164 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
165 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
166 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
168 /* device mapper special case, should not leak out: */
169 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
171 /* zone device specific errors */
172 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
173 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
175 /* everything else not covered above: */
176 [BLK_STS_IOERR] = { -EIO, "I/O" },
179 blk_status_t errno_to_blk_status(int errno)
183 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
184 if (blk_errors[i].errno == errno)
185 return (__force blk_status_t)i;
188 return BLK_STS_IOERR;
190 EXPORT_SYMBOL_GPL(errno_to_blk_status);
192 int blk_status_to_errno(blk_status_t status)
194 int idx = (__force int)status;
196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
198 return blk_errors[idx].errno;
200 EXPORT_SYMBOL_GPL(blk_status_to_errno);
202 const char *blk_status_to_str(blk_status_t status)
204 int idx = (__force int)status;
206 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
208 return blk_errors[idx].name;
212 * blk_sync_queue - cancel any pending callbacks on a queue
216 * The block layer may perform asynchronous callback activity
217 * on a queue, such as calling the unplug function after a timeout.
218 * A block device may call blk_sync_queue to ensure that any
219 * such activity is cancelled, thus allowing it to release resources
220 * that the callbacks might use. The caller must already have made sure
221 * that its ->submit_bio will not re-add plugging prior to calling
224 * This function does not cancel any asynchronous activity arising
225 * out of elevator or throttling code. That would require elevator_exit()
226 * and blkcg_exit_queue() to be called with queue lock initialized.
229 void blk_sync_queue(struct request_queue *q)
231 del_timer_sync(&q->timeout);
232 cancel_work_sync(&q->timeout_work);
234 EXPORT_SYMBOL(blk_sync_queue);
237 * blk_set_pm_only - increment pm_only counter
238 * @q: request queue pointer
240 void blk_set_pm_only(struct request_queue *q)
242 atomic_inc(&q->pm_only);
244 EXPORT_SYMBOL_GPL(blk_set_pm_only);
246 void blk_clear_pm_only(struct request_queue *q)
250 pm_only = atomic_dec_return(&q->pm_only);
251 WARN_ON_ONCE(pm_only < 0);
253 wake_up_all(&q->mq_freeze_wq);
255 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
258 * blk_put_queue - decrement the request_queue refcount
259 * @q: the request_queue structure to decrement the refcount for
261 * Decrements the refcount of the request_queue kobject. When this reaches 0
262 * we'll have blk_release_queue() called.
264 * Context: Any context, but the last reference must not be dropped from
267 void blk_put_queue(struct request_queue *q)
269 kobject_put(&q->kobj);
271 EXPORT_SYMBOL(blk_put_queue);
273 void blk_queue_start_drain(struct request_queue *q)
276 * When queue DYING flag is set, we need to block new req
277 * entering queue, so we call blk_freeze_queue_start() to
278 * prevent I/O from crossing blk_queue_enter().
280 blk_freeze_queue_start(q);
282 blk_mq_wake_waiters(q);
283 /* Make blk_queue_enter() reexamine the DYING flag. */
284 wake_up_all(&q->mq_freeze_wq);
288 * blk_queue_enter() - try to increase q->q_usage_counter
289 * @q: request queue pointer
290 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
292 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
294 const bool pm = flags & BLK_MQ_REQ_PM;
296 while (!blk_try_enter_queue(q, pm)) {
297 if (flags & BLK_MQ_REQ_NOWAIT)
301 * read pair of barrier in blk_freeze_queue_start(), we need to
302 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
303 * reading .mq_freeze_depth or queue dying flag, otherwise the
304 * following wait may never return if the two reads are
308 wait_event(q->mq_freeze_wq,
309 (!q->mq_freeze_depth &&
310 blk_pm_resume_queue(pm, q)) ||
312 if (blk_queue_dying(q))
319 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
321 while (!blk_try_enter_queue(q, false)) {
322 struct gendisk *disk = bio->bi_bdev->bd_disk;
324 if (bio->bi_opf & REQ_NOWAIT) {
325 if (test_bit(GD_DEAD, &disk->state))
327 bio_wouldblock_error(bio);
332 * read pair of barrier in blk_freeze_queue_start(), we need to
333 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
334 * reading .mq_freeze_depth or queue dying flag, otherwise the
335 * following wait may never return if the two reads are
339 wait_event(q->mq_freeze_wq,
340 (!q->mq_freeze_depth &&
341 blk_pm_resume_queue(false, q)) ||
342 test_bit(GD_DEAD, &disk->state));
343 if (test_bit(GD_DEAD, &disk->state))
353 void blk_queue_exit(struct request_queue *q)
355 percpu_ref_put(&q->q_usage_counter);
358 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
360 struct request_queue *q =
361 container_of(ref, struct request_queue, q_usage_counter);
363 wake_up_all(&q->mq_freeze_wq);
366 static void blk_rq_timed_out_timer(struct timer_list *t)
368 struct request_queue *q = from_timer(q, t, timeout);
370 kblockd_schedule_work(&q->timeout_work);
373 static void blk_timeout_work(struct work_struct *work)
377 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
379 struct request_queue *q;
382 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
383 GFP_KERNEL | __GFP_ZERO, node_id);
388 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
389 if (init_srcu_struct(q->srcu) != 0)
393 q->last_merge = NULL;
395 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
399 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
403 q->stats = blk_alloc_queue_stats();
409 atomic_set(&q->nr_active_requests_shared_tags, 0);
411 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
412 INIT_WORK(&q->timeout_work, blk_timeout_work);
413 INIT_LIST_HEAD(&q->icq_list);
415 kobject_init(&q->kobj, &blk_queue_ktype);
417 mutex_init(&q->debugfs_mutex);
418 mutex_init(&q->sysfs_lock);
419 mutex_init(&q->sysfs_dir_lock);
420 spin_lock_init(&q->queue_lock);
422 init_waitqueue_head(&q->mq_freeze_wq);
423 mutex_init(&q->mq_freeze_lock);
426 * Init percpu_ref in atomic mode so that it's faster to shutdown.
427 * See blk_register_queue() for details.
429 if (percpu_ref_init(&q->q_usage_counter,
430 blk_queue_usage_counter_release,
431 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
434 blk_queue_dma_alignment(q, 511);
435 blk_set_default_limits(&q->limits);
436 q->nr_requests = BLKDEV_DEFAULT_RQ;
441 blk_free_queue_stats(q->stats);
443 bioset_exit(&q->bio_split);
445 ida_free(&blk_queue_ida, q->id);
448 cleanup_srcu_struct(q->srcu);
450 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
455 * blk_get_queue - increment the request_queue refcount
456 * @q: the request_queue structure to increment the refcount for
458 * Increment the refcount of the request_queue kobject.
460 * Context: Any context.
462 bool blk_get_queue(struct request_queue *q)
464 if (unlikely(blk_queue_dying(q)))
466 kobject_get(&q->kobj);
469 EXPORT_SYMBOL(blk_get_queue);
471 #ifdef CONFIG_FAIL_MAKE_REQUEST
473 static DECLARE_FAULT_ATTR(fail_make_request);
475 static int __init setup_fail_make_request(char *str)
477 return setup_fault_attr(&fail_make_request, str);
479 __setup("fail_make_request=", setup_fail_make_request);
481 bool should_fail_request(struct block_device *part, unsigned int bytes)
483 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
486 static int __init fail_make_request_debugfs(void)
488 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
489 NULL, &fail_make_request);
491 return PTR_ERR_OR_ZERO(dir);
494 late_initcall(fail_make_request_debugfs);
495 #endif /* CONFIG_FAIL_MAKE_REQUEST */
497 static inline bool bio_check_ro(struct bio *bio)
499 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
500 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
502 pr_warn("Trying to write to read-only block-device %pg\n",
504 /* Older lvm-tools actually trigger this */
511 static noinline int should_fail_bio(struct bio *bio)
513 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
517 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
520 * Check whether this bio extends beyond the end of the device or partition.
521 * This may well happen - the kernel calls bread() without checking the size of
522 * the device, e.g., when mounting a file system.
524 static inline int bio_check_eod(struct bio *bio)
526 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
527 unsigned int nr_sectors = bio_sectors(bio);
529 if (nr_sectors && maxsector &&
530 (nr_sectors > maxsector ||
531 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
532 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
533 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
534 current->comm, bio->bi_bdev, bio->bi_opf,
535 bio->bi_iter.bi_sector, nr_sectors, maxsector);
542 * Remap block n of partition p to block n+start(p) of the disk.
544 static int blk_partition_remap(struct bio *bio)
546 struct block_device *p = bio->bi_bdev;
548 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
550 if (bio_sectors(bio)) {
551 bio->bi_iter.bi_sector += p->bd_start_sect;
552 trace_block_bio_remap(bio, p->bd_dev,
553 bio->bi_iter.bi_sector -
556 bio_set_flag(bio, BIO_REMAPPED);
561 * Check write append to a zoned block device.
563 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
566 int nr_sectors = bio_sectors(bio);
568 /* Only applicable to zoned block devices */
569 if (!bdev_is_zoned(bio->bi_bdev))
570 return BLK_STS_NOTSUPP;
572 /* The bio sector must point to the start of a sequential zone */
573 if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
574 !bio_zone_is_seq(bio))
575 return BLK_STS_IOERR;
578 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
579 * split and could result in non-contiguous sectors being written in
582 if (nr_sectors > q->limits.chunk_sectors)
583 return BLK_STS_IOERR;
585 /* Make sure the BIO is small enough and will not get split */
586 if (nr_sectors > q->limits.max_zone_append_sectors)
587 return BLK_STS_IOERR;
589 bio->bi_opf |= REQ_NOMERGE;
594 static void __submit_bio(struct bio *bio)
596 struct gendisk *disk = bio->bi_bdev->bd_disk;
598 if (unlikely(!blk_crypto_bio_prep(&bio)))
601 if (!disk->fops->submit_bio) {
602 blk_mq_submit_bio(bio);
603 } else if (likely(bio_queue_enter(bio) == 0)) {
604 disk->fops->submit_bio(bio);
605 blk_queue_exit(disk->queue);
610 * The loop in this function may be a bit non-obvious, and so deserves some
613 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
614 * that), so we have a list with a single bio.
615 * - We pretend that we have just taken it off a longer list, so we assign
616 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
617 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
618 * bios through a recursive call to submit_bio_noacct. If it did, we find a
619 * non-NULL value in bio_list and re-enter the loop from the top.
620 * - In this case we really did just take the bio of the top of the list (no
621 * pretending) and so remove it from bio_list, and call into ->submit_bio()
624 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
625 * bio_list_on_stack[1] contains bios that were submitted before the current
626 * ->submit_bio, but that haven't been processed yet.
628 static void __submit_bio_noacct(struct bio *bio)
630 struct bio_list bio_list_on_stack[2];
632 BUG_ON(bio->bi_next);
634 bio_list_init(&bio_list_on_stack[0]);
635 current->bio_list = bio_list_on_stack;
638 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
639 struct bio_list lower, same;
642 * Create a fresh bio_list for all subordinate requests.
644 bio_list_on_stack[1] = bio_list_on_stack[0];
645 bio_list_init(&bio_list_on_stack[0]);
650 * Sort new bios into those for a lower level and those for the
653 bio_list_init(&lower);
654 bio_list_init(&same);
655 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
656 if (q == bdev_get_queue(bio->bi_bdev))
657 bio_list_add(&same, bio);
659 bio_list_add(&lower, bio);
662 * Now assemble so we handle the lowest level first.
664 bio_list_merge(&bio_list_on_stack[0], &lower);
665 bio_list_merge(&bio_list_on_stack[0], &same);
666 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
667 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
669 current->bio_list = NULL;
672 static void __submit_bio_noacct_mq(struct bio *bio)
674 struct bio_list bio_list[2] = { };
676 current->bio_list = bio_list;
680 } while ((bio = bio_list_pop(&bio_list[0])));
682 current->bio_list = NULL;
685 void submit_bio_noacct_nocheck(struct bio *bio)
688 * We only want one ->submit_bio to be active at a time, else stack
689 * usage with stacked devices could be a problem. Use current->bio_list
690 * to collect a list of requests submited by a ->submit_bio method while
691 * it is active, and then process them after it returned.
693 if (current->bio_list)
694 bio_list_add(¤t->bio_list[0], bio);
695 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
696 __submit_bio_noacct_mq(bio);
698 __submit_bio_noacct(bio);
702 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
703 * @bio: The bio describing the location in memory and on the device.
705 * This is a version of submit_bio() that shall only be used for I/O that is
706 * resubmitted to lower level drivers by stacking block drivers. All file
707 * systems and other upper level users of the block layer should use
708 * submit_bio() instead.
710 void submit_bio_noacct(struct bio *bio)
712 struct block_device *bdev = bio->bi_bdev;
713 struct request_queue *q = bdev_get_queue(bdev);
714 blk_status_t status = BLK_STS_IOERR;
715 struct blk_plug *plug;
719 plug = blk_mq_plug(bio);
720 if (plug && plug->nowait)
721 bio->bi_opf |= REQ_NOWAIT;
724 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
725 * if queue does not support NOWAIT.
727 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
730 if (should_fail_bio(bio))
732 if (unlikely(bio_check_ro(bio)))
734 if (!bio_flagged(bio, BIO_REMAPPED)) {
735 if (unlikely(bio_check_eod(bio)))
737 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
742 * Filter flush bio's early so that bio based drivers without flush
743 * support don't have to worry about them.
745 if (op_is_flush(bio->bi_opf) &&
746 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
747 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
748 if (!bio_sectors(bio)) {
754 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
755 bio_clear_polled(bio);
757 switch (bio_op(bio)) {
759 if (!bdev_max_discard_sectors(bdev))
762 case REQ_OP_SECURE_ERASE:
763 if (!bdev_max_secure_erase_sectors(bdev))
766 case REQ_OP_ZONE_APPEND:
767 status = blk_check_zone_append(q, bio);
768 if (status != BLK_STS_OK)
771 case REQ_OP_ZONE_RESET:
772 case REQ_OP_ZONE_OPEN:
773 case REQ_OP_ZONE_CLOSE:
774 case REQ_OP_ZONE_FINISH:
775 if (!bdev_is_zoned(bio->bi_bdev))
778 case REQ_OP_ZONE_RESET_ALL:
779 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
782 case REQ_OP_WRITE_ZEROES:
783 if (!q->limits.max_write_zeroes_sectors)
790 if (blk_throtl_bio(bio))
793 blk_cgroup_bio_start(bio);
794 blkcg_bio_issue_init(bio);
796 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
797 trace_block_bio_queue(bio);
798 /* Now that enqueuing has been traced, we need to trace
799 * completion as well.
801 bio_set_flag(bio, BIO_TRACE_COMPLETION);
803 submit_bio_noacct_nocheck(bio);
807 status = BLK_STS_NOTSUPP;
809 bio->bi_status = status;
812 EXPORT_SYMBOL(submit_bio_noacct);
815 * submit_bio - submit a bio to the block device layer for I/O
816 * @bio: The &struct bio which describes the I/O
818 * submit_bio() is used to submit I/O requests to block devices. It is passed a
819 * fully set up &struct bio that describes the I/O that needs to be done. The
820 * bio will be send to the device described by the bi_bdev field.
822 * The success/failure status of the request, along with notification of
823 * completion, is delivered asynchronously through the ->bi_end_io() callback
824 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
827 void submit_bio(struct bio *bio)
829 if (blkcg_punt_bio_submit(bio))
832 if (bio_op(bio) == REQ_OP_READ) {
833 task_io_account_read(bio->bi_iter.bi_size);
834 count_vm_events(PGPGIN, bio_sectors(bio));
835 } else if (bio_op(bio) == REQ_OP_WRITE) {
836 count_vm_events(PGPGOUT, bio_sectors(bio));
840 * If we're reading data that is part of the userspace workingset, count
841 * submission time as memory stall. When the device is congested, or
842 * the submitting cgroup IO-throttled, submission can be a significant
843 * part of overall IO time.
845 if (unlikely(bio_op(bio) == REQ_OP_READ &&
846 bio_flagged(bio, BIO_WORKINGSET))) {
847 unsigned long pflags;
849 psi_memstall_enter(&pflags);
850 submit_bio_noacct(bio);
851 psi_memstall_leave(&pflags);
855 submit_bio_noacct(bio);
857 EXPORT_SYMBOL(submit_bio);
860 * bio_poll - poll for BIO completions
861 * @bio: bio to poll for
862 * @iob: batches of IO
863 * @flags: BLK_POLL_* flags that control the behavior
865 * Poll for completions on queue associated with the bio. Returns number of
866 * completed entries found.
868 * Note: the caller must either be the context that submitted @bio, or
869 * be in a RCU critical section to prevent freeing of @bio.
871 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
873 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
874 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
877 if (cookie == BLK_QC_T_NONE ||
878 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
881 blk_flush_plug(current->plug, false);
883 if (bio_queue_enter(bio))
885 if (queue_is_mq(q)) {
886 ret = blk_mq_poll(q, cookie, iob, flags);
888 struct gendisk *disk = q->disk;
890 if (disk && disk->fops->poll_bio)
891 ret = disk->fops->poll_bio(bio, iob, flags);
896 EXPORT_SYMBOL_GPL(bio_poll);
899 * Helper to implement file_operations.iopoll. Requires the bio to be stored
900 * in iocb->private, and cleared before freeing the bio.
902 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
909 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
910 * point to a freshly allocated bio at this point. If that happens
911 * we have a few cases to consider:
913 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
914 * simply nothing in this case
915 * 2) the bio points to a not poll enabled device. bio_poll will catch
917 * 3) the bio points to a poll capable device, including but not
918 * limited to the one that the original bio pointed to. In this
919 * case we will call into the actual poll method and poll for I/O,
920 * even if we don't need to, but it won't cause harm either.
922 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
923 * is still allocated. Because partitions hold a reference to the whole
924 * device bdev and thus disk, the disk is also still valid. Grabbing
925 * a reference to the queue in bio_poll() ensures the hctxs and requests
926 * are still valid as well.
929 bio = READ_ONCE(kiocb->private);
930 if (bio && bio->bi_bdev)
931 ret = bio_poll(bio, iob, flags);
936 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
938 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
942 stamp = READ_ONCE(part->bd_stamp);
943 if (unlikely(time_after(now, stamp))) {
944 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
945 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
947 if (part->bd_partno) {
948 part = bdev_whole(part);
953 unsigned long bdev_start_io_acct(struct block_device *bdev,
954 unsigned int sectors, enum req_op op,
955 unsigned long start_time)
957 const int sgrp = op_stat_group(op);
960 update_io_ticks(bdev, start_time, false);
961 part_stat_inc(bdev, ios[sgrp]);
962 part_stat_add(bdev, sectors[sgrp], sectors);
963 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
968 EXPORT_SYMBOL(bdev_start_io_acct);
971 * bio_start_io_acct_time - start I/O accounting for bio based drivers
972 * @bio: bio to start account for
973 * @start_time: start time that should be passed back to bio_end_io_acct().
975 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
977 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
978 bio_op(bio), start_time);
980 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
983 * bio_start_io_acct - start I/O accounting for bio based drivers
984 * @bio: bio to start account for
986 * Returns the start time that should be passed back to bio_end_io_acct().
988 unsigned long bio_start_io_acct(struct bio *bio)
990 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
991 bio_op(bio), jiffies);
993 EXPORT_SYMBOL_GPL(bio_start_io_acct);
995 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
996 unsigned long start_time)
998 const int sgrp = op_stat_group(op);
999 unsigned long now = READ_ONCE(jiffies);
1000 unsigned long duration = now - start_time;
1003 update_io_ticks(bdev, now, true);
1004 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
1005 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
1008 EXPORT_SYMBOL(bdev_end_io_acct);
1010 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1011 struct block_device *orig_bdev)
1013 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
1015 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1018 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1019 * @q : the queue of the device being checked
1022 * Check if underlying low-level drivers of a device are busy.
1023 * If the drivers want to export their busy state, they must set own
1024 * exporting function using blk_queue_lld_busy() first.
1026 * Basically, this function is used only by request stacking drivers
1027 * to stop dispatching requests to underlying devices when underlying
1028 * devices are busy. This behavior helps more I/O merging on the queue
1029 * of the request stacking driver and prevents I/O throughput regression
1030 * on burst I/O load.
1033 * 0 - Not busy (The request stacking driver should dispatch request)
1034 * 1 - Busy (The request stacking driver should stop dispatching request)
1036 int blk_lld_busy(struct request_queue *q)
1038 if (queue_is_mq(q) && q->mq_ops->busy)
1039 return q->mq_ops->busy(q);
1043 EXPORT_SYMBOL_GPL(blk_lld_busy);
1045 int kblockd_schedule_work(struct work_struct *work)
1047 return queue_work(kblockd_workqueue, work);
1049 EXPORT_SYMBOL(kblockd_schedule_work);
1051 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1052 unsigned long delay)
1054 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1056 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1058 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1060 struct task_struct *tsk = current;
1063 * If this is a nested plug, don't actually assign it.
1068 plug->mq_list = NULL;
1069 plug->cached_rq = NULL;
1070 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1072 plug->multiple_queues = false;
1073 plug->has_elevator = false;
1074 plug->nowait = false;
1075 INIT_LIST_HEAD(&plug->cb_list);
1078 * Store ordering should not be needed here, since a potential
1079 * preempt will imply a full memory barrier
1085 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1086 * @plug: The &struct blk_plug that needs to be initialized
1089 * blk_start_plug() indicates to the block layer an intent by the caller
1090 * to submit multiple I/O requests in a batch. The block layer may use
1091 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1092 * is called. However, the block layer may choose to submit requests
1093 * before a call to blk_finish_plug() if the number of queued I/Os
1094 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1095 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1096 * the task schedules (see below).
1098 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1099 * pending I/O should the task end up blocking between blk_start_plug() and
1100 * blk_finish_plug(). This is important from a performance perspective, but
1101 * also ensures that we don't deadlock. For instance, if the task is blocking
1102 * for a memory allocation, memory reclaim could end up wanting to free a
1103 * page belonging to that request that is currently residing in our private
1104 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1105 * this kind of deadlock.
1107 void blk_start_plug(struct blk_plug *plug)
1109 blk_start_plug_nr_ios(plug, 1);
1111 EXPORT_SYMBOL(blk_start_plug);
1113 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1115 LIST_HEAD(callbacks);
1117 while (!list_empty(&plug->cb_list)) {
1118 list_splice_init(&plug->cb_list, &callbacks);
1120 while (!list_empty(&callbacks)) {
1121 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1124 list_del(&cb->list);
1125 cb->callback(cb, from_schedule);
1130 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1133 struct blk_plug *plug = current->plug;
1134 struct blk_plug_cb *cb;
1139 list_for_each_entry(cb, &plug->cb_list, list)
1140 if (cb->callback == unplug && cb->data == data)
1143 /* Not currently on the callback list */
1144 BUG_ON(size < sizeof(*cb));
1145 cb = kzalloc(size, GFP_ATOMIC);
1148 cb->callback = unplug;
1149 list_add(&cb->list, &plug->cb_list);
1153 EXPORT_SYMBOL(blk_check_plugged);
1155 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1157 if (!list_empty(&plug->cb_list))
1158 flush_plug_callbacks(plug, from_schedule);
1159 if (!rq_list_empty(plug->mq_list))
1160 blk_mq_flush_plug_list(plug, from_schedule);
1162 * Unconditionally flush out cached requests, even if the unplug
1163 * event came from schedule. Since we know hold references to the
1164 * queue for cached requests, we don't want a blocked task holding
1165 * up a queue freeze/quiesce event.
1167 if (unlikely(!rq_list_empty(plug->cached_rq)))
1168 blk_mq_free_plug_rqs(plug);
1172 * blk_finish_plug - mark the end of a batch of submitted I/O
1173 * @plug: The &struct blk_plug passed to blk_start_plug()
1176 * Indicate that a batch of I/O submissions is complete. This function
1177 * must be paired with an initial call to blk_start_plug(). The intent
1178 * is to allow the block layer to optimize I/O submission. See the
1179 * documentation for blk_start_plug() for more information.
1181 void blk_finish_plug(struct blk_plug *plug)
1183 if (plug == current->plug) {
1184 __blk_flush_plug(plug, false);
1185 current->plug = NULL;
1188 EXPORT_SYMBOL(blk_finish_plug);
1190 void blk_io_schedule(void)
1192 /* Prevent hang_check timer from firing at us during very long I/O */
1193 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1196 io_schedule_timeout(timeout);
1200 EXPORT_SYMBOL_GPL(blk_io_schedule);
1202 int __init blk_dev_init(void)
1204 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1205 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1206 sizeof_field(struct request, cmd_flags));
1207 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1208 sizeof_field(struct bio, bi_opf));
1209 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1210 __alignof__(struct request_queue)) !=
1211 sizeof(struct request_queue));
1213 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1214 kblockd_workqueue = alloc_workqueue("kblockd",
1215 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1216 if (!kblockd_workqueue)
1217 panic("Failed to create kblockd\n");
1219 blk_requestq_cachep = kmem_cache_create("request_queue",
1220 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1222 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1223 sizeof(struct request_queue) +
1224 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1226 blk_debugfs_root = debugfs_create_dir("block", NULL);