1 // SPDX-License-Identifier: GPL-2.0
3 * Block device elevator/IO-scheduler.
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * - elevator_dequeue_fn, called when a request is taken off the active list
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
26 #include <linux/kernel.h>
28 #include <linux/blkdev.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
39 #include <trace/events/block.h>
43 #include "blk-mq-sched.h"
46 #include "blk-cgroup.h"
48 static DEFINE_SPINLOCK(elv_list_lock);
49 static LIST_HEAD(elv_list);
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
57 * Query io scheduler to see if the current process issuing bio may be
60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
72 * can we safely merge with this request?
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
76 if (!blk_rq_merge_ok(rq, bio))
79 if (!elv_iosched_allow_bio_merge(rq, bio))
84 EXPORT_SYMBOL(elv_bio_merge_ok);
86 static inline bool elv_support_features(struct request_queue *q,
87 const struct elevator_type *e)
89 return (q->required_elevator_features & e->elevator_features) ==
90 q->required_elevator_features;
94 * elevator_match - Test an elevator name and features
95 * @e: Scheduler to test
96 * @name: Elevator name to test
98 * Return true if the elevator @e name matches @name and if @e provides all
99 * the features specified by @required_features.
101 static bool elevator_match(const struct elevator_type *e, const char *name)
103 return !strcmp(e->elevator_name, name) ||
104 (e->elevator_alias && !strcmp(e->elevator_alias, name));
107 static struct elevator_type *__elevator_find(const char *name)
109 struct elevator_type *e;
111 list_for_each_entry(e, &elv_list, list)
112 if (elevator_match(e, name))
117 static struct elevator_type *elevator_find_get(struct request_queue *q,
120 struct elevator_type *e;
122 spin_lock(&elv_list_lock);
123 e = __elevator_find(name);
124 if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
126 spin_unlock(&elv_list_lock);
130 static struct kobj_type elv_ktype;
132 struct elevator_queue *elevator_alloc(struct request_queue *q,
133 struct elevator_type *e)
135 struct elevator_queue *eq;
137 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
143 kobject_init(&eq->kobj, &elv_ktype);
144 mutex_init(&eq->sysfs_lock);
149 EXPORT_SYMBOL(elevator_alloc);
151 static void elevator_release(struct kobject *kobj)
153 struct elevator_queue *e;
155 e = container_of(kobj, struct elevator_queue, kobj);
156 elevator_put(e->type);
160 void elevator_exit(struct request_queue *q)
162 struct elevator_queue *e = q->elevator;
165 blk_mq_sched_free_rqs(q);
167 mutex_lock(&e->sysfs_lock);
168 blk_mq_exit_sched(q, e);
169 mutex_unlock(&e->sysfs_lock);
171 kobject_put(&e->kobj);
174 static inline void __elv_rqhash_del(struct request *rq)
177 rq->rq_flags &= ~RQF_HASHED;
180 void elv_rqhash_del(struct request_queue *q, struct request *rq)
183 __elv_rqhash_del(rq);
185 EXPORT_SYMBOL_GPL(elv_rqhash_del);
187 void elv_rqhash_add(struct request_queue *q, struct request *rq)
189 struct elevator_queue *e = q->elevator;
191 BUG_ON(ELV_ON_HASH(rq));
192 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
193 rq->rq_flags |= RQF_HASHED;
195 EXPORT_SYMBOL_GPL(elv_rqhash_add);
197 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
199 __elv_rqhash_del(rq);
200 elv_rqhash_add(q, rq);
203 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
205 struct elevator_queue *e = q->elevator;
206 struct hlist_node *next;
209 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
210 BUG_ON(!ELV_ON_HASH(rq));
212 if (unlikely(!rq_mergeable(rq))) {
213 __elv_rqhash_del(rq);
217 if (rq_hash_key(rq) == offset)
225 * RB-tree support functions for inserting/lookup/removal of requests
226 * in a sorted RB tree.
228 void elv_rb_add(struct rb_root *root, struct request *rq)
230 struct rb_node **p = &root->rb_node;
231 struct rb_node *parent = NULL;
232 struct request *__rq;
236 __rq = rb_entry(parent, struct request, rb_node);
238 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
240 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
244 rb_link_node(&rq->rb_node, parent, p);
245 rb_insert_color(&rq->rb_node, root);
247 EXPORT_SYMBOL(elv_rb_add);
249 void elv_rb_del(struct rb_root *root, struct request *rq)
251 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
252 rb_erase(&rq->rb_node, root);
253 RB_CLEAR_NODE(&rq->rb_node);
255 EXPORT_SYMBOL(elv_rb_del);
257 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
259 struct rb_node *n = root->rb_node;
263 rq = rb_entry(n, struct request, rb_node);
265 if (sector < blk_rq_pos(rq))
267 else if (sector > blk_rq_pos(rq))
275 EXPORT_SYMBOL(elv_rb_find);
277 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
280 struct elevator_queue *e = q->elevator;
281 struct request *__rq;
285 * nomerges: No merges at all attempted
286 * noxmerges: Only simple one-hit cache try
287 * merges: All merge tries attempted
289 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
290 return ELEVATOR_NO_MERGE;
293 * First try one-hit cache.
295 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
296 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
298 if (ret != ELEVATOR_NO_MERGE) {
299 *req = q->last_merge;
304 if (blk_queue_noxmerges(q))
305 return ELEVATOR_NO_MERGE;
308 * See if our hash lookup can find a potential backmerge.
310 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
311 if (__rq && elv_bio_merge_ok(__rq, bio)) {
314 if (blk_discard_mergable(__rq))
315 return ELEVATOR_DISCARD_MERGE;
316 return ELEVATOR_BACK_MERGE;
319 if (e->type->ops.request_merge)
320 return e->type->ops.request_merge(q, req, bio);
322 return ELEVATOR_NO_MERGE;
326 * Attempt to do an insertion back merge. Only check for the case where
327 * we can append 'rq' to an existing request, so we can throw 'rq' away
330 * Returns true if we merged, false otherwise. 'free' will contain all
331 * requests that need to be freed.
333 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
334 struct list_head *free)
336 struct request *__rq;
339 if (blk_queue_nomerges(q))
343 * First try one-hit cache.
345 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
346 list_add(&rq->queuelist, free);
350 if (blk_queue_noxmerges(q))
355 * See if our hash lookup can find a potential backmerge.
358 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
359 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
362 list_add(&rq->queuelist, free);
363 /* The merged request could be merged with others, try again */
371 void elv_merged_request(struct request_queue *q, struct request *rq,
374 struct elevator_queue *e = q->elevator;
376 if (e->type->ops.request_merged)
377 e->type->ops.request_merged(q, rq, type);
379 if (type == ELEVATOR_BACK_MERGE)
380 elv_rqhash_reposition(q, rq);
385 void elv_merge_requests(struct request_queue *q, struct request *rq,
386 struct request *next)
388 struct elevator_queue *e = q->elevator;
390 if (e->type->ops.requests_merged)
391 e->type->ops.requests_merged(q, rq, next);
393 elv_rqhash_reposition(q, rq);
397 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
399 struct elevator_queue *e = q->elevator;
401 if (e->type->ops.next_request)
402 return e->type->ops.next_request(q, rq);
407 struct request *elv_former_request(struct request_queue *q, struct request *rq)
409 struct elevator_queue *e = q->elevator;
411 if (e->type->ops.former_request)
412 return e->type->ops.former_request(q, rq);
417 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
420 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
422 struct elv_fs_entry *entry = to_elv(attr);
423 struct elevator_queue *e;
429 e = container_of(kobj, struct elevator_queue, kobj);
430 mutex_lock(&e->sysfs_lock);
431 error = e->type ? entry->show(e, page) : -ENOENT;
432 mutex_unlock(&e->sysfs_lock);
437 elv_attr_store(struct kobject *kobj, struct attribute *attr,
438 const char *page, size_t length)
440 struct elv_fs_entry *entry = to_elv(attr);
441 struct elevator_queue *e;
447 e = container_of(kobj, struct elevator_queue, kobj);
448 mutex_lock(&e->sysfs_lock);
449 error = e->type ? entry->store(e, page, length) : -ENOENT;
450 mutex_unlock(&e->sysfs_lock);
454 static const struct sysfs_ops elv_sysfs_ops = {
455 .show = elv_attr_show,
456 .store = elv_attr_store,
459 static struct kobj_type elv_ktype = {
460 .sysfs_ops = &elv_sysfs_ops,
461 .release = elevator_release,
464 int elv_register_queue(struct request_queue *q, bool uevent)
466 struct elevator_queue *e = q->elevator;
469 lockdep_assert_held(&q->sysfs_lock);
471 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
473 struct elv_fs_entry *attr = e->type->elevator_attrs;
475 while (attr->attr.name) {
476 if (sysfs_create_file(&e->kobj, &attr->attr))
482 kobject_uevent(&e->kobj, KOBJ_ADD);
484 set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
489 void elv_unregister_queue(struct request_queue *q)
491 struct elevator_queue *e = q->elevator;
493 lockdep_assert_held(&q->sysfs_lock);
495 if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
496 kobject_uevent(&e->kobj, KOBJ_REMOVE);
497 kobject_del(&e->kobj);
501 int elv_register(struct elevator_type *e)
503 /* insert_requests and dispatch_request are mandatory */
504 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
507 /* create icq_cache if requested */
509 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
510 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
513 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
514 "%s_io_cq", e->elevator_name);
515 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
516 e->icq_align, 0, NULL);
521 /* register, don't allow duplicate names */
522 spin_lock(&elv_list_lock);
523 if (__elevator_find(e->elevator_name)) {
524 spin_unlock(&elv_list_lock);
525 kmem_cache_destroy(e->icq_cache);
528 list_add_tail(&e->list, &elv_list);
529 spin_unlock(&elv_list_lock);
531 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
535 EXPORT_SYMBOL_GPL(elv_register);
537 void elv_unregister(struct elevator_type *e)
540 spin_lock(&elv_list_lock);
541 list_del_init(&e->list);
542 spin_unlock(&elv_list_lock);
545 * Destroy icq_cache if it exists. icq's are RCU managed. Make
546 * sure all RCU operations are complete before proceeding.
550 kmem_cache_destroy(e->icq_cache);
554 EXPORT_SYMBOL_GPL(elv_unregister);
556 static inline bool elv_support_iosched(struct request_queue *q)
558 if (!queue_is_mq(q) ||
559 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
565 * For single queue devices, default to using mq-deadline. If we have multiple
566 * queues or mq-deadline is not available, default to "none".
568 static struct elevator_type *elevator_get_default(struct request_queue *q)
570 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
573 if (q->nr_hw_queues != 1 &&
574 !blk_mq_is_shared_tags(q->tag_set->flags))
577 return elevator_find_get(q, "mq-deadline");
581 * Get the first elevator providing the features required by the request queue.
582 * Default to "none" if no matching elevator is found.
584 static struct elevator_type *elevator_get_by_features(struct request_queue *q)
586 struct elevator_type *e, *found = NULL;
588 spin_lock(&elv_list_lock);
590 list_for_each_entry(e, &elv_list, list) {
591 if (elv_support_features(q, e)) {
597 if (found && !elevator_tryget(found))
600 spin_unlock(&elv_list_lock);
605 * For a device queue that has no required features, use the default elevator
606 * settings. Otherwise, use the first elevator available matching the required
607 * features. If no suitable elevator is find or if the chosen elevator
608 * initialization fails, fall back to the "none" elevator (no elevator).
610 void elevator_init_mq(struct request_queue *q)
612 struct elevator_type *e;
615 if (!elv_support_iosched(q))
618 WARN_ON_ONCE(blk_queue_registered(q));
620 if (unlikely(q->elevator))
623 if (!q->required_elevator_features)
624 e = elevator_get_default(q);
626 e = elevator_get_by_features(q);
631 * We are called before adding disk, when there isn't any FS I/O,
632 * so freezing queue plus canceling dispatch work is enough to
633 * drain any dispatch activities originated from passthrough
634 * requests, then no need to quiesce queue which may add long boot
635 * latency, especially when lots of disks are involved.
637 blk_mq_freeze_queue(q);
638 blk_mq_cancel_work_sync(q);
640 err = blk_mq_init_sched(q, e);
642 blk_mq_unfreeze_queue(q);
645 pr_warn("\"%s\" elevator initialization failed, "
646 "falling back to \"none\"\n", e->elevator_name);
653 * Switch to new_e io scheduler.
655 * If switching fails, we are most likely running out of memory and not able
656 * to restore the old io scheduler, so leaving the io scheduler being none.
658 int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
662 lockdep_assert_held(&q->sysfs_lock);
664 blk_mq_freeze_queue(q);
665 blk_mq_quiesce_queue(q);
668 elv_unregister_queue(q);
672 ret = blk_mq_init_sched(q, new_e);
676 ret = elv_register_queue(q, true);
681 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
684 blk_mq_unquiesce_queue(q);
685 blk_mq_unfreeze_queue(q);
688 pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
689 new_e->elevator_name);
695 void elevator_disable(struct request_queue *q)
697 lockdep_assert_held(&q->sysfs_lock);
699 blk_mq_freeze_queue(q);
700 blk_mq_quiesce_queue(q);
702 elv_unregister_queue(q);
704 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
706 q->nr_requests = q->tag_set->queue_depth;
707 blk_add_trace_msg(q, "elv switch: none");
709 blk_mq_unquiesce_queue(q);
710 blk_mq_unfreeze_queue(q);
714 * Switch this queue to the given IO scheduler.
716 static int elevator_change(struct request_queue *q, const char *elevator_name)
718 struct elevator_type *e;
721 /* Make sure queue is not in the middle of being removed */
722 if (!blk_queue_registered(q))
726 * Special case for mq, turn off scheduling
728 if (!strncmp(elevator_name, "none", 4)) {
734 if (q->elevator && elevator_match(q->elevator->type, elevator_name))
737 e = elevator_find_get(q, elevator_name);
739 request_module("%s-iosched", elevator_name);
740 e = elevator_find_get(q, elevator_name);
744 ret = elevator_switch(q, e);
749 ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
752 char elevator_name[ELV_NAME_MAX];
755 if (!elv_support_iosched(q))
758 strlcpy(elevator_name, buf, sizeof(elevator_name));
759 ret = elevator_change(q, strstrip(elevator_name));
765 ssize_t elv_iosched_show(struct request_queue *q, char *name)
767 struct elevator_queue *eq = q->elevator;
768 struct elevator_type *cur = NULL, *e;
771 if (!elv_support_iosched(q))
772 return sprintf(name, "none\n");
775 len += sprintf(name+len, "[none] ");
779 spin_lock(&elv_list_lock);
780 list_for_each_entry(e, &elv_list, list) {
782 len += sprintf(name+len, "[%s] ", cur->elevator_name);
785 if (elv_support_features(q, e))
786 len += sprintf(name+len, "%s ", e->elevator_name);
788 spin_unlock(&elv_list_lock);
791 len += sprintf(name+len, "none");
793 len += sprintf(len+name, "\n");
797 struct request *elv_rb_former_request(struct request_queue *q,
800 struct rb_node *rbprev = rb_prev(&rq->rb_node);
803 return rb_entry_rq(rbprev);
807 EXPORT_SYMBOL(elv_rb_former_request);
809 struct request *elv_rb_latter_request(struct request_queue *q,
812 struct rb_node *rbnext = rb_next(&rq->rb_node);
815 return rb_entry_rq(rbnext);
819 EXPORT_SYMBOL(elv_rb_latter_request);
821 static int __init elevator_setup(char *str)
823 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
824 "Please use sysfs to set IO scheduler for individual devices.\n");
828 __setup("elevator=", elevator_setup);