1 // SPDX-License-Identifier: GPL-2.0
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
21 #include <trace/events/block.h>
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
30 * See Documentation/block/deadline-iosched.rst
32 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 static const int writes_starved = 2; /* max times reads can starve a write */
35 static const int fifo_batch = 16; /* # of sequential requests treated as one
36 by the above parameters. For throughput. */
43 enum { DD_DIR_COUNT = 2 };
52 enum { DD_PRIO_COUNT = 3 };
54 /* I/O statistics per I/O priority. */
55 struct io_stats_per_prio {
62 /* I/O statistics for all I/O priorities (enum dd_prio). */
64 struct io_stats_per_prio stats[DD_PRIO_COUNT];
68 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
69 * present on both sort_list[] and fifo_list[].
72 struct list_head dispatch;
73 struct rb_root sort_list[DD_DIR_COUNT];
74 struct list_head fifo_list[DD_DIR_COUNT];
75 /* Next request in FIFO order. Read, write or both are NULL. */
76 struct request *next_rq[DD_DIR_COUNT];
79 struct deadline_data {
84 struct dd_per_prio per_prio[DD_PRIO_COUNT];
86 /* Data direction of latest dispatched request. */
87 enum dd_data_dir last_dir;
88 unsigned int batching; /* number of sequential requests made */
89 unsigned int starved; /* times reads have starved writes */
91 struct io_stats __percpu *stats;
94 * settings that change how the i/o scheduler behaves
96 int fifo_expire[DD_DIR_COUNT];
103 spinlock_t zone_lock;
106 /* Count one event of type 'event_type' and with I/O priority 'prio' */
107 #define dd_count(dd, event_type, prio) do { \
108 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
110 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
111 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
112 local_inc(&io_stats->stats[(prio)].event_type); \
113 put_cpu_ptr(io_stats); \
117 * Returns the total number of dd_count(dd, event_type, prio) calls across all
118 * CPUs. No locking or barriers since it is fine if the returned sum is slightly
121 #define dd_sum(dd, event_type, prio) ({ \
125 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
126 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
127 for_each_present_cpu(cpu) \
128 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
129 stats[(prio)].event_type); \
133 /* Maps an I/O priority class to a deadline scheduler priority. */
134 static const enum dd_prio ioprio_class_to_prio[] = {
135 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
136 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
137 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
138 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
141 static inline struct rb_root *
142 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
144 return &per_prio->sort_list[rq_data_dir(rq)];
148 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
151 static u8 dd_rq_ioclass(struct request *rq)
153 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
157 * get the request before `rq' in sector-sorted order
159 static inline struct request *
160 deadline_earlier_request(struct request *rq)
162 struct rb_node *node = rb_prev(&rq->rb_node);
165 return rb_entry_rq(node);
171 * get the request after `rq' in sector-sorted order
173 static inline struct request *
174 deadline_latter_request(struct request *rq)
176 struct rb_node *node = rb_next(&rq->rb_node);
179 return rb_entry_rq(node);
185 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
187 struct rb_root *root = deadline_rb_root(per_prio, rq);
189 elv_rb_add(root, rq);
193 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
195 const enum dd_data_dir data_dir = rq_data_dir(rq);
197 if (per_prio->next_rq[data_dir] == rq)
198 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
200 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
204 * remove rq from rbtree and fifo.
206 static void deadline_remove_request(struct request_queue *q,
207 struct dd_per_prio *per_prio,
210 list_del_init(&rq->queuelist);
213 * We might not be on the rbtree, if we are doing an insert merge
215 if (!RB_EMPTY_NODE(&rq->rb_node))
216 deadline_del_rq_rb(per_prio, rq);
218 elv_rqhash_del(q, rq);
219 if (q->last_merge == rq)
220 q->last_merge = NULL;
223 static void dd_request_merged(struct request_queue *q, struct request *req,
226 struct deadline_data *dd = q->elevator->elevator_data;
227 const u8 ioprio_class = dd_rq_ioclass(req);
228 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
229 struct dd_per_prio *per_prio = &dd->per_prio[prio];
232 * if the merge was a front merge, we need to reposition request
234 if (type == ELEVATOR_FRONT_MERGE) {
235 elv_rb_del(deadline_rb_root(per_prio, req), req);
236 deadline_add_rq_rb(per_prio, req);
241 * Callback function that is invoked after @next has been merged into @req.
243 static void dd_merged_requests(struct request_queue *q, struct request *req,
244 struct request *next)
246 struct deadline_data *dd = q->elevator->elevator_data;
247 const u8 ioprio_class = dd_rq_ioclass(next);
248 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
250 dd_count(dd, merged, prio);
253 * if next expires before rq, assign its expire time to rq
254 * and move into next position (next will be deleted) in fifo
256 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
257 if (time_before((unsigned long)next->fifo_time,
258 (unsigned long)req->fifo_time)) {
259 list_move(&req->queuelist, &next->queuelist);
260 req->fifo_time = next->fifo_time;
265 * kill knowledge of next, this one is a goner
267 deadline_remove_request(q, &dd->per_prio[prio], next);
271 * move an entry to dispatch queue
274 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
277 const enum dd_data_dir data_dir = rq_data_dir(rq);
279 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
282 * take it off the sort and fifo list
284 deadline_remove_request(rq->q, per_prio, rq);
288 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
289 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
291 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
292 enum dd_data_dir data_dir)
294 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
299 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
306 * Check if rq has a sequential request preceding it.
308 static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq)
310 struct request *prev = deadline_earlier_request(rq);
315 return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
319 * Skip all write requests that are sequential from @rq, even if we cross
322 static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
325 sector_t pos = blk_rq_pos(rq);
326 sector_t skipped_sectors = 0;
329 if (blk_rq_pos(rq) != pos + skipped_sectors)
331 skipped_sectors += blk_rq_sectors(rq);
332 rq = deadline_latter_request(rq);
339 * For the specified data direction, return the next request to
340 * dispatch using arrival ordered lists.
342 static struct request *
343 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
344 enum dd_data_dir data_dir)
349 if (list_empty(&per_prio->fifo_list[data_dir]))
352 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
353 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
357 * Look for a write request that can be dispatched, that is one with
358 * an unlocked target zone. For some HDDs, breaking a sequential
359 * write stream can lead to lower throughput, so make sure to preserve
360 * sequential write streams, even if that stream crosses into the next
361 * zones and these zones are unlocked.
363 spin_lock_irqsave(&dd->zone_lock, flags);
364 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
365 if (blk_req_can_dispatch_to_zone(rq) &&
366 (blk_queue_nonrot(rq->q) ||
367 !deadline_is_seq_writes(dd, rq)))
372 spin_unlock_irqrestore(&dd->zone_lock, flags);
378 * For the specified data direction, return the next request to
379 * dispatch using sector position sorted lists.
381 static struct request *
382 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
383 enum dd_data_dir data_dir)
388 rq = per_prio->next_rq[data_dir];
392 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
396 * Look for a write request that can be dispatched, that is one with
397 * an unlocked target zone. For some HDDs, breaking a sequential
398 * write stream can lead to lower throughput, so make sure to preserve
399 * sequential write streams, even if that stream crosses into the next
400 * zones and these zones are unlocked.
402 spin_lock_irqsave(&dd->zone_lock, flags);
404 if (blk_req_can_dispatch_to_zone(rq))
406 if (blk_queue_nonrot(rq->q))
407 rq = deadline_latter_request(rq);
409 rq = deadline_skip_seq_writes(dd, rq);
411 spin_unlock_irqrestore(&dd->zone_lock, flags);
417 * deadline_dispatch_requests selects the best request according to
418 * read/write expire, fifo_batch, etc
420 static struct request *__dd_dispatch_request(struct deadline_data *dd,
421 struct dd_per_prio *per_prio)
423 struct request *rq, *next_rq;
424 enum dd_data_dir data_dir;
428 lockdep_assert_held(&dd->lock);
430 if (!list_empty(&per_prio->dispatch)) {
431 rq = list_first_entry(&per_prio->dispatch, struct request,
433 list_del_init(&rq->queuelist);
438 * batches are currently reads XOR writes
440 rq = deadline_next_request(dd, per_prio, dd->last_dir);
441 if (rq && dd->batching < dd->fifo_batch)
442 /* we have a next request are still entitled to batch */
443 goto dispatch_request;
446 * at this point we are not running a batch. select the appropriate
447 * data direction (read / write)
450 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
451 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
453 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
454 (dd->starved++ >= dd->writes_starved))
455 goto dispatch_writes;
459 goto dispatch_find_request;
463 * there are either no reads or writes have been starved
466 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
468 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
474 goto dispatch_find_request;
479 dispatch_find_request:
481 * we are not running a batch, find best request for selected data_dir
483 next_rq = deadline_next_request(dd, per_prio, data_dir);
484 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
486 * A deadline has expired, the last request was in the other
487 * direction, or we have run out of higher-sectored requests.
488 * Start again from the request with the earliest expiry time.
490 rq = deadline_fifo_request(dd, per_prio, data_dir);
493 * The last req was the same dir and we have a next request in
494 * sort order. No expired requests so continue on from here.
500 * For a zoned block device, if we only have writes queued and none of
501 * them can be dispatched, rq will be NULL.
506 dd->last_dir = data_dir;
511 * rq is the selected appropriate request.
514 deadline_move_request(dd, per_prio, rq);
516 ioprio_class = dd_rq_ioclass(rq);
517 prio = ioprio_class_to_prio[ioprio_class];
518 dd_count(dd, dispatched, prio);
520 * If the request needs its target zone locked, do it.
522 blk_req_zone_write_lock(rq);
523 rq->rq_flags |= RQF_STARTED;
528 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
530 * One confusing aspect here is that we get called for a specific
531 * hardware queue, but we may return a request that is for a
532 * different hardware queue. This is because mq-deadline has shared
533 * state for all hardware queues, in terms of sorting, FIFOs, etc.
535 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
537 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
541 spin_lock(&dd->lock);
542 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
543 rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
547 spin_unlock(&dd->lock);
553 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
554 * function is used by __blk_mq_get_tag().
556 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
558 struct deadline_data *dd = data->q->elevator->elevator_data;
560 /* Do not throttle synchronous reads. */
561 if (op_is_sync(op) && !op_is_write(op))
565 * Throttle asynchronous requests and writes such that these requests
566 * do not block the allocation of synchronous requests.
568 data->shallow_depth = dd->async_depth;
571 /* Called by blk_mq_update_nr_requests(). */
572 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
574 struct request_queue *q = hctx->queue;
575 struct deadline_data *dd = q->elevator->elevator_data;
576 struct blk_mq_tags *tags = hctx->sched_tags;
578 dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
580 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
583 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
584 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
586 dd_depth_updated(hctx);
590 static void dd_exit_sched(struct elevator_queue *e)
592 struct deadline_data *dd = e->elevator_data;
595 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
596 struct dd_per_prio *per_prio = &dd->per_prio[prio];
598 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
599 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
602 free_percpu(dd->stats);
608 * initialize elevator private data (deadline_data).
610 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
612 struct deadline_data *dd;
613 struct elevator_queue *eq;
617 eq = elevator_alloc(q, e);
621 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
625 eq->elevator_data = dd;
627 dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
628 GFP_KERNEL | __GFP_ZERO);
632 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
633 struct dd_per_prio *per_prio = &dd->per_prio[prio];
635 INIT_LIST_HEAD(&per_prio->dispatch);
636 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
637 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
638 per_prio->sort_list[DD_READ] = RB_ROOT;
639 per_prio->sort_list[DD_WRITE] = RB_ROOT;
641 dd->fifo_expire[DD_READ] = read_expire;
642 dd->fifo_expire[DD_WRITE] = write_expire;
643 dd->writes_starved = writes_starved;
644 dd->front_merges = 1;
645 dd->last_dir = DD_WRITE;
646 dd->fifo_batch = fifo_batch;
647 spin_lock_init(&dd->lock);
648 spin_lock_init(&dd->zone_lock);
657 kobject_put(&eq->kobj);
662 * Try to merge @bio into an existing request. If @bio has been merged into
663 * an existing request, store the pointer to that request into *@rq.
665 static int dd_request_merge(struct request_queue *q, struct request **rq,
668 struct deadline_data *dd = q->elevator->elevator_data;
669 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
670 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
671 struct dd_per_prio *per_prio = &dd->per_prio[prio];
672 sector_t sector = bio_end_sector(bio);
673 struct request *__rq;
675 if (!dd->front_merges)
676 return ELEVATOR_NO_MERGE;
678 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
680 BUG_ON(sector != blk_rq_pos(__rq));
682 if (elv_bio_merge_ok(__rq, bio)) {
684 if (blk_discard_mergable(__rq))
685 return ELEVATOR_DISCARD_MERGE;
686 return ELEVATOR_FRONT_MERGE;
690 return ELEVATOR_NO_MERGE;
694 * Attempt to merge a bio into an existing request. This function is called
695 * before @bio is associated with a request.
697 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
698 unsigned int nr_segs)
700 struct deadline_data *dd = q->elevator->elevator_data;
701 struct request *free = NULL;
704 spin_lock(&dd->lock);
705 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
706 spin_unlock(&dd->lock);
709 blk_mq_free_request(free);
715 * add rq to rbtree and fifo
717 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
720 struct request_queue *q = hctx->queue;
721 struct deadline_data *dd = q->elevator->elevator_data;
722 const enum dd_data_dir data_dir = rq_data_dir(rq);
723 u16 ioprio = req_get_ioprio(rq);
724 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
725 struct dd_per_prio *per_prio;
729 lockdep_assert_held(&dd->lock);
732 * This may be a requeue of a write request that has locked its
733 * target zone. If it is the case, this releases the zone lock.
735 blk_req_zone_write_unlock(rq);
737 prio = ioprio_class_to_prio[ioprio_class];
738 dd_count(dd, inserted, prio);
739 rq->elv.priv[0] = (void *)(uintptr_t)1;
741 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
742 blk_mq_free_requests(&free);
746 trace_block_rq_insert(rq);
748 per_prio = &dd->per_prio[prio];
750 list_add(&rq->queuelist, &per_prio->dispatch);
752 deadline_add_rq_rb(per_prio, rq);
754 if (rq_mergeable(rq)) {
755 elv_rqhash_add(q, rq);
761 * set expire time and add to fifo list
763 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
764 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
769 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
771 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
772 struct list_head *list, bool at_head)
774 struct request_queue *q = hctx->queue;
775 struct deadline_data *dd = q->elevator->elevator_data;
777 spin_lock(&dd->lock);
778 while (!list_empty(list)) {
781 rq = list_first_entry(list, struct request, queuelist);
782 list_del_init(&rq->queuelist);
783 dd_insert_request(hctx, rq, at_head);
785 spin_unlock(&dd->lock);
788 /* Callback from inside blk_mq_rq_ctx_init(). */
789 static void dd_prepare_request(struct request *rq)
791 rq->elv.priv[0] = NULL;
794 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
796 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
799 for (p = 0; p <= DD_PRIO_MAX; p++)
800 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
807 * Callback from inside blk_mq_free_request().
809 * For zoned block devices, write unlock the target zone of
810 * completed write requests. Do this while holding the zone lock
811 * spinlock so that the zone is never unlocked while deadline_fifo_request()
812 * or deadline_next_request() are executing. This function is called for
813 * all requests, whether or not these requests complete successfully.
815 * For a zoned block device, __dd_dispatch_request() may have stopped
816 * dispatching requests if all the queued requests are write requests directed
817 * at zones that are already locked due to on-going write requests. To ensure
818 * write request dispatch progress in this case, mark the queue as needing a
819 * restart to ensure that the queue is run again after completion of the
820 * request and zones being unlocked.
822 static void dd_finish_request(struct request *rq)
824 struct request_queue *q = rq->q;
825 struct deadline_data *dd = q->elevator->elevator_data;
826 const u8 ioprio_class = dd_rq_ioclass(rq);
827 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
830 * The block layer core may call dd_finish_request() without having
831 * called dd_insert_requests(). Hence only update statistics for
832 * requests for which dd_insert_requests() has been called. See also
833 * blk_mq_request_bypass_insert().
836 dd_count(dd, completed, prio);
838 if (blk_queue_is_zoned(q)) {
841 spin_lock_irqsave(&dd->zone_lock, flags);
842 blk_req_zone_write_unlock(rq);
843 spin_unlock_irqrestore(&dd->zone_lock, flags);
845 if (dd_has_write_work(rq->mq_hctx))
846 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
850 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
852 return !list_empty_careful(&per_prio->dispatch) ||
853 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
854 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
857 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
859 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
862 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
863 if (dd_has_work_for_prio(&dd->per_prio[prio]))
872 #define SHOW_INT(__FUNC, __VAR) \
873 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
875 struct deadline_data *dd = e->elevator_data; \
877 return sysfs_emit(page, "%d\n", __VAR); \
879 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
880 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
881 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
882 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
883 SHOW_INT(deadline_front_merges_show, dd->front_merges);
884 SHOW_INT(deadline_async_depth_show, dd->async_depth);
885 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
889 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
890 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
892 struct deadline_data *dd = e->elevator_data; \
895 __ret = kstrtoint(page, 0, &__data); \
898 if (__data < (MIN)) \
900 else if (__data > (MAX)) \
902 *(__PTR) = __CONV(__data); \
905 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
906 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
907 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
908 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
909 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
910 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
911 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
912 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
913 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
914 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
915 #undef STORE_FUNCTION
919 #define DD_ATTR(name) \
920 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
922 static struct elv_fs_entry deadline_attrs[] = {
923 DD_ATTR(read_expire),
924 DD_ATTR(write_expire),
925 DD_ATTR(writes_starved),
926 DD_ATTR(front_merges),
927 DD_ATTR(async_depth),
932 #ifdef CONFIG_BLK_DEBUG_FS
933 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
934 static void *deadline_##name##_fifo_start(struct seq_file *m, \
936 __acquires(&dd->lock) \
938 struct request_queue *q = m->private; \
939 struct deadline_data *dd = q->elevator->elevator_data; \
940 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
942 spin_lock(&dd->lock); \
943 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
946 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
949 struct request_queue *q = m->private; \
950 struct deadline_data *dd = q->elevator->elevator_data; \
951 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
953 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
956 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
957 __releases(&dd->lock) \
959 struct request_queue *q = m->private; \
960 struct deadline_data *dd = q->elevator->elevator_data; \
962 spin_unlock(&dd->lock); \
965 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
966 .start = deadline_##name##_fifo_start, \
967 .next = deadline_##name##_fifo_next, \
968 .stop = deadline_##name##_fifo_stop, \
969 .show = blk_mq_debugfs_rq_show, \
972 static int deadline_##name##_next_rq_show(void *data, \
973 struct seq_file *m) \
975 struct request_queue *q = data; \
976 struct deadline_data *dd = q->elevator->elevator_data; \
977 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
978 struct request *rq = per_prio->next_rq[data_dir]; \
981 __blk_mq_debugfs_rq_show(m, rq); \
985 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
986 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
987 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
988 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
989 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
990 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
991 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
993 static int deadline_batching_show(void *data, struct seq_file *m)
995 struct request_queue *q = data;
996 struct deadline_data *dd = q->elevator->elevator_data;
998 seq_printf(m, "%u\n", dd->batching);
1002 static int deadline_starved_show(void *data, struct seq_file *m)
1004 struct request_queue *q = data;
1005 struct deadline_data *dd = q->elevator->elevator_data;
1007 seq_printf(m, "%u\n", dd->starved);
1011 static int dd_async_depth_show(void *data, struct seq_file *m)
1013 struct request_queue *q = data;
1014 struct deadline_data *dd = q->elevator->elevator_data;
1016 seq_printf(m, "%u\n", dd->async_depth);
1020 /* Number of requests queued for a given priority level. */
1021 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
1023 return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
1026 static int dd_queued_show(void *data, struct seq_file *m)
1028 struct request_queue *q = data;
1029 struct deadline_data *dd = q->elevator->elevator_data;
1031 seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
1032 dd_queued(dd, DD_BE_PRIO),
1033 dd_queued(dd, DD_IDLE_PRIO));
1037 /* Number of requests owned by the block driver for a given priority. */
1038 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1040 return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
1041 - dd_sum(dd, completed, prio);
1044 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1046 struct request_queue *q = data;
1047 struct deadline_data *dd = q->elevator->elevator_data;
1049 seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
1050 dd_owned_by_driver(dd, DD_BE_PRIO),
1051 dd_owned_by_driver(dd, DD_IDLE_PRIO));
1055 #define DEADLINE_DISPATCH_ATTR(prio) \
1056 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1058 __acquires(&dd->lock) \
1060 struct request_queue *q = m->private; \
1061 struct deadline_data *dd = q->elevator->elevator_data; \
1062 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1064 spin_lock(&dd->lock); \
1065 return seq_list_start(&per_prio->dispatch, *pos); \
1068 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1069 void *v, loff_t *pos) \
1071 struct request_queue *q = m->private; \
1072 struct deadline_data *dd = q->elevator->elevator_data; \
1073 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1075 return seq_list_next(v, &per_prio->dispatch, pos); \
1078 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1079 __releases(&dd->lock) \
1081 struct request_queue *q = m->private; \
1082 struct deadline_data *dd = q->elevator->elevator_data; \
1084 spin_unlock(&dd->lock); \
1087 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1088 .start = deadline_dispatch##prio##_start, \
1089 .next = deadline_dispatch##prio##_next, \
1090 .stop = deadline_dispatch##prio##_stop, \
1091 .show = blk_mq_debugfs_rq_show, \
1094 DEADLINE_DISPATCH_ATTR(0);
1095 DEADLINE_DISPATCH_ATTR(1);
1096 DEADLINE_DISPATCH_ATTR(2);
1097 #undef DEADLINE_DISPATCH_ATTR
1099 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1100 {#name "_fifo_list", 0400, \
1101 .seq_ops = &deadline_##name##_fifo_seq_ops}
1102 #define DEADLINE_NEXT_RQ_ATTR(name) \
1103 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1104 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1105 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1106 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1107 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1108 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1109 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1110 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1111 DEADLINE_NEXT_RQ_ATTR(read0),
1112 DEADLINE_NEXT_RQ_ATTR(write0),
1113 DEADLINE_NEXT_RQ_ATTR(read1),
1114 DEADLINE_NEXT_RQ_ATTR(write1),
1115 DEADLINE_NEXT_RQ_ATTR(read2),
1116 DEADLINE_NEXT_RQ_ATTR(write2),
1117 {"batching", 0400, deadline_batching_show},
1118 {"starved", 0400, deadline_starved_show},
1119 {"async_depth", 0400, dd_async_depth_show},
1120 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1121 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1122 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1123 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1124 {"queued", 0400, dd_queued_show},
1127 #undef DEADLINE_QUEUE_DDIR_ATTRS
1130 static struct elevator_type mq_deadline = {
1132 .depth_updated = dd_depth_updated,
1133 .limit_depth = dd_limit_depth,
1134 .insert_requests = dd_insert_requests,
1135 .dispatch_request = dd_dispatch_request,
1136 .prepare_request = dd_prepare_request,
1137 .finish_request = dd_finish_request,
1138 .next_request = elv_rb_latter_request,
1139 .former_request = elv_rb_former_request,
1140 .bio_merge = dd_bio_merge,
1141 .request_merge = dd_request_merge,
1142 .requests_merged = dd_merged_requests,
1143 .request_merged = dd_request_merged,
1144 .has_work = dd_has_work,
1145 .init_sched = dd_init_sched,
1146 .exit_sched = dd_exit_sched,
1147 .init_hctx = dd_init_hctx,
1150 #ifdef CONFIG_BLK_DEBUG_FS
1151 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1153 .elevator_attrs = deadline_attrs,
1154 .elevator_name = "mq-deadline",
1155 .elevator_alias = "deadline",
1156 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1157 .elevator_owner = THIS_MODULE,
1159 MODULE_ALIAS("mq-deadline-iosched");
1161 static int __init deadline_init(void)
1163 return elv_register(&mq_deadline);
1166 static void __exit deadline_exit(void)
1168 elv_unregister(&mq_deadline);
1171 module_init(deadline_init);
1172 module_exit(deadline_exit);
1174 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1175 MODULE_LICENSE("GPL");
1176 MODULE_DESCRIPTION("MQ deadline IO scheduler");