1 // SPDX-License-Identifier: GPL-2.0
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
6 * Copyright (C) 2017 Facebook
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/module.h>
14 #include <linux/sbitmap.h>
16 #include <trace/events/block.h>
20 #include "blk-mq-debugfs.h"
21 #include "blk-mq-sched.h"
22 #include "blk-mq-tag.h"
24 #define CREATE_TRACE_POINTS
25 #include <trace/events/kyber.h>
28 * Scheduling domains: the device is divided into multiple domains based on the
39 static const char *kyber_domain_names[] = {
40 [KYBER_READ] = "READ",
41 [KYBER_WRITE] = "WRITE",
42 [KYBER_DISCARD] = "DISCARD",
43 [KYBER_OTHER] = "OTHER",
48 * In order to prevent starvation of synchronous requests by a flood of
49 * asynchronous requests, we reserve 25% of requests for synchronous
52 KYBER_ASYNC_PERCENT = 75,
56 * Maximum device-wide depth for each scheduling domain.
58 * Even for fast devices with lots of tags like NVMe, you can saturate the
59 * device with only a fraction of the maximum possible queue depth. So, we cap
60 * these to a reasonable value.
62 static const unsigned int kyber_depth[] = {
70 * Default latency targets for each scheduling domain.
72 static const u64 kyber_latency_targets[] = {
73 [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
74 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
75 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
79 * Batch size (number of requests we'll dispatch in a row) for each scheduling
82 static const unsigned int kyber_batch_size[] = {
90 * Requests latencies are recorded in a histogram with buckets defined relative
91 * to the target latency:
93 * <= 1/4 * target latency
94 * <= 1/2 * target latency
95 * <= 3/4 * target latency
97 * <= 1 1/4 * target latency
98 * <= 1 1/2 * target latency
99 * <= 1 3/4 * target latency
100 * > 1 3/4 * target latency
104 * The width of the latency histogram buckets is
105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
107 KYBER_LATENCY_SHIFT = 2,
109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
112 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
114 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
118 * We measure both the total latency and the I/O latency (i.e., latency after
119 * submitting to the device).
126 static const char *kyber_latency_type_names[] = {
127 [KYBER_TOTAL_LATENCY] = "total",
128 [KYBER_IO_LATENCY] = "I/O",
132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
133 * domain except for KYBER_OTHER.
135 struct kyber_cpu_latency {
136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
140 * There is a same mapping between ctx & hctx and kcq & khd,
141 * we use request->mq_ctx->index_hw to index the kcq in khd.
143 struct kyber_ctx_queue {
145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
146 * Also protect the rqs on rq_list when merge.
149 struct list_head rq_list[KYBER_NUM_DOMAINS];
150 } ____cacheline_aligned_in_smp;
152 struct kyber_queue_data {
153 struct request_queue *q;
156 * Each scheduling domain has a limited number of in-flight requests
157 * device-wide, limited by these tokens.
159 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
162 * Async request percentage, converted to per-word depth for
163 * sbitmap_get_shallow().
165 unsigned int async_depth;
167 struct kyber_cpu_latency __percpu *cpu_latency;
169 /* Timer for stats aggregation and adjusting domain tokens. */
170 struct timer_list timer;
172 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
174 unsigned long latency_timeout[KYBER_OTHER];
176 int domain_p99[KYBER_OTHER];
178 /* Target latencies in nanoseconds. */
179 u64 latency_targets[KYBER_OTHER];
182 struct kyber_hctx_data {
184 struct list_head rqs[KYBER_NUM_DOMAINS];
185 unsigned int cur_domain;
186 unsigned int batching;
187 struct kyber_ctx_queue *kcqs;
188 struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
189 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
190 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
191 atomic_t wait_index[KYBER_NUM_DOMAINS];
194 static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
197 static unsigned int kyber_sched_domain(unsigned int op)
199 switch (op & REQ_OP_MASK) {
205 return KYBER_DISCARD;
211 static void flush_latency_buckets(struct kyber_queue_data *kqd,
212 struct kyber_cpu_latency *cpu_latency,
213 unsigned int sched_domain, unsigned int type)
215 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
216 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
219 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
220 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
224 * Calculate the histogram bucket with the given percentile rank, or -1 if there
225 * aren't enough samples yet.
227 static int calculate_percentile(struct kyber_queue_data *kqd,
228 unsigned int sched_domain, unsigned int type,
229 unsigned int percentile)
231 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
232 unsigned int bucket, samples = 0, percentile_samples;
234 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
235 samples += buckets[bucket];
241 * We do the calculation once we have 500 samples or one second passes
242 * since the first sample was recorded, whichever comes first.
244 if (!kqd->latency_timeout[sched_domain])
245 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
247 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
250 kqd->latency_timeout[sched_domain] = 0;
252 percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
253 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
254 if (buckets[bucket] >= percentile_samples)
256 percentile_samples -= buckets[bucket];
258 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
260 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
261 kyber_latency_type_names[type], percentile,
262 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
267 static void kyber_resize_domain(struct kyber_queue_data *kqd,
268 unsigned int sched_domain, unsigned int depth)
270 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
271 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
272 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
273 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
278 static void kyber_timer_fn(struct timer_list *t)
280 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
281 unsigned int sched_domain;
285 /* Sum all of the per-cpu latency histograms. */
286 for_each_online_cpu(cpu) {
287 struct kyber_cpu_latency *cpu_latency;
289 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
290 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
291 flush_latency_buckets(kqd, cpu_latency, sched_domain,
292 KYBER_TOTAL_LATENCY);
293 flush_latency_buckets(kqd, cpu_latency, sched_domain,
299 * Check if any domains have a high I/O latency, which might indicate
300 * congestion in the device. Note that we use the p90; we don't want to
301 * be too sensitive to outliers here.
303 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
306 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
308 if (p90 >= KYBER_GOOD_BUCKETS)
313 * Adjust the scheduling domain depths. If we determined that there was
314 * congestion, we throttle all domains with good latencies. Either way,
315 * we ease up on throttling domains with bad latencies.
317 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
318 unsigned int orig_depth, depth;
321 p99 = calculate_percentile(kqd, sched_domain,
322 KYBER_TOTAL_LATENCY, 99);
324 * This is kind of subtle: different domains will not
325 * necessarily have enough samples to calculate the latency
326 * percentiles during the same window, so we have to remember
327 * the p99 for the next time we observe congestion; once we do,
328 * we don't want to throttle again until we get more data, so we
333 p99 = kqd->domain_p99[sched_domain];
334 kqd->domain_p99[sched_domain] = -1;
335 } else if (p99 >= 0) {
336 kqd->domain_p99[sched_domain] = p99;
342 * If this domain has bad latency, throttle less. Otherwise,
343 * throttle more iff we determined that there is congestion.
345 * The new depth is scaled linearly with the p99 latency vs the
346 * latency target. E.g., if the p99 is 3/4 of the target, then
347 * we throttle down to 3/4 of the current depth, and if the p99
348 * is 2x the target, then we double the depth.
350 if (bad || p99 >= KYBER_GOOD_BUCKETS) {
351 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
352 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
353 kyber_resize_domain(kqd, sched_domain, depth);
358 static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
360 struct kyber_queue_data *kqd;
364 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
370 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
371 GFP_KERNEL | __GFP_ZERO);
372 if (!kqd->cpu_latency)
375 timer_setup(&kqd->timer, kyber_timer_fn, 0);
377 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
378 WARN_ON(!kyber_depth[i]);
379 WARN_ON(!kyber_batch_size[i]);
380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
381 kyber_depth[i], -1, false,
382 GFP_KERNEL, q->node);
385 sbitmap_queue_free(&kqd->domain_tokens[i]);
390 for (i = 0; i < KYBER_OTHER; i++) {
391 kqd->domain_p99[i] = -1;
392 kqd->latency_targets[i] = kyber_latency_targets[i];
398 free_percpu(kqd->cpu_latency);
405 static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
407 struct kyber_queue_data *kqd;
408 struct elevator_queue *eq;
410 eq = elevator_alloc(q, e);
414 kqd = kyber_queue_data_alloc(q);
416 kobject_put(&eq->kobj);
420 blk_stat_enable_accounting(q);
422 eq->elevator_data = kqd;
428 static void kyber_exit_sched(struct elevator_queue *e)
430 struct kyber_queue_data *kqd = e->elevator_data;
433 del_timer_sync(&kqd->timer);
435 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
436 sbitmap_queue_free(&kqd->domain_tokens[i]);
437 free_percpu(kqd->cpu_latency);
441 static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
445 spin_lock_init(&kcq->lock);
446 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
447 INIT_LIST_HEAD(&kcq->rq_list[i]);
450 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
452 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
453 struct blk_mq_tags *tags = hctx->sched_tags;
454 unsigned int shift = tags->bitmap_tags->sb.shift;
456 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
458 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
463 struct kyber_hctx_data *khd;
466 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
470 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
471 sizeof(struct kyber_ctx_queue),
472 GFP_KERNEL, hctx->numa_node);
476 for (i = 0; i < hctx->nr_ctx; i++)
477 kyber_ctx_queue_init(&khd->kcqs[i]);
479 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
480 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
481 ilog2(8), GFP_KERNEL, hctx->numa_node)) {
483 sbitmap_free(&khd->kcq_map[i]);
488 spin_lock_init(&khd->lock);
490 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
491 INIT_LIST_HEAD(&khd->rqs[i]);
492 khd->domain_wait[i].sbq = NULL;
493 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
495 khd->domain_wait[i].wait.private = hctx;
496 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
497 atomic_set(&khd->wait_index[i], 0);
503 hctx->sched_data = khd;
504 kyber_depth_updated(hctx);
515 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
517 struct kyber_hctx_data *khd = hctx->sched_data;
520 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
521 sbitmap_free(&khd->kcq_map[i]);
523 kfree(hctx->sched_data);
526 static int rq_get_domain_token(struct request *rq)
528 return (long)rq->elv.priv[0];
531 static void rq_set_domain_token(struct request *rq, int token)
533 rq->elv.priv[0] = (void *)(long)token;
536 static void rq_clear_domain_token(struct kyber_queue_data *kqd,
539 unsigned int sched_domain;
542 nr = rq_get_domain_token(rq);
544 sched_domain = kyber_sched_domain(rq->cmd_flags);
545 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
550 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
553 * We use the scheduler tags as per-hardware queue queueing tokens.
554 * Async requests can be limited at this stage.
556 if (!op_is_sync(op)) {
557 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
559 data->shallow_depth = kqd->async_depth;
563 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
564 unsigned int nr_segs)
566 struct kyber_hctx_data *khd = hctx->sched_data;
567 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
568 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
569 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
570 struct list_head *rq_list = &kcq->rq_list[sched_domain];
573 spin_lock(&kcq->lock);
574 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
575 spin_unlock(&kcq->lock);
580 static void kyber_prepare_request(struct request *rq)
582 rq_set_domain_token(rq, -1);
585 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
586 struct list_head *rq_list, bool at_head)
588 struct kyber_hctx_data *khd = hctx->sched_data;
589 struct request *rq, *next;
591 list_for_each_entry_safe(rq, next, rq_list, queuelist) {
592 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
593 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
594 struct list_head *head = &kcq->rq_list[sched_domain];
596 spin_lock(&kcq->lock);
598 list_move(&rq->queuelist, head);
600 list_move_tail(&rq->queuelist, head);
601 sbitmap_set_bit(&khd->kcq_map[sched_domain],
602 rq->mq_ctx->index_hw[hctx->type]);
603 trace_block_rq_insert(rq);
604 spin_unlock(&kcq->lock);
608 static void kyber_finish_request(struct request *rq)
610 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
612 rq_clear_domain_token(kqd, rq);
615 static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
616 unsigned int sched_domain, unsigned int type,
617 u64 target, u64 latency)
623 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
624 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
625 KYBER_LATENCY_BUCKETS - 1);
630 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
633 static void kyber_completed_request(struct request *rq, u64 now)
635 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
636 struct kyber_cpu_latency *cpu_latency;
637 unsigned int sched_domain;
640 sched_domain = kyber_sched_domain(rq->cmd_flags);
641 if (sched_domain == KYBER_OTHER)
644 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
645 target = kqd->latency_targets[sched_domain];
646 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
647 target, now - rq->start_time_ns);
648 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
649 now - rq->io_start_time_ns);
650 put_cpu_ptr(kqd->cpu_latency);
652 timer_reduce(&kqd->timer, jiffies + HZ / 10);
655 struct flush_kcq_data {
656 struct kyber_hctx_data *khd;
657 unsigned int sched_domain;
658 struct list_head *list;
661 static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
663 struct flush_kcq_data *flush_data = data;
664 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
666 spin_lock(&kcq->lock);
667 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
669 sbitmap_clear_bit(sb, bitnr);
670 spin_unlock(&kcq->lock);
675 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
676 unsigned int sched_domain,
677 struct list_head *list)
679 struct flush_kcq_data data = {
681 .sched_domain = sched_domain,
685 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
686 flush_busy_kcq, &data);
689 static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
692 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
693 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
695 sbitmap_del_wait_queue(wait);
696 blk_mq_run_hw_queue(hctx, true);
700 static int kyber_get_domain_token(struct kyber_queue_data *kqd,
701 struct kyber_hctx_data *khd,
702 struct blk_mq_hw_ctx *hctx)
704 unsigned int sched_domain = khd->cur_domain;
705 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
706 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
707 struct sbq_wait_state *ws;
710 nr = __sbitmap_queue_get(domain_tokens);
713 * If we failed to get a domain token, make sure the hardware queue is
714 * run when one becomes available. Note that this is serialized on
715 * khd->lock, but we still need to be careful about the waker.
717 if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
718 ws = sbq_wait_ptr(domain_tokens,
719 &khd->wait_index[sched_domain]);
720 khd->domain_ws[sched_domain] = ws;
721 sbitmap_add_wait_queue(domain_tokens, ws, wait);
724 * Try again in case a token was freed before we got on the wait
727 nr = __sbitmap_queue_get(domain_tokens);
731 * If we got a token while we were on the wait queue, remove ourselves
732 * from the wait queue to ensure that all wake ups make forward
733 * progress. It's possible that the waker already deleted the entry
734 * between the !list_empty_careful() check and us grabbing the lock, but
735 * list_del_init() is okay with that.
737 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
738 ws = khd->domain_ws[sched_domain];
739 spin_lock_irq(&ws->wait.lock);
740 sbitmap_del_wait_queue(wait);
741 spin_unlock_irq(&ws->wait.lock);
747 static struct request *
748 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
749 struct kyber_hctx_data *khd,
750 struct blk_mq_hw_ctx *hctx)
752 struct list_head *rqs;
756 rqs = &khd->rqs[khd->cur_domain];
759 * If we already have a flushed request, then we just need to get a
760 * token for it. Otherwise, if there are pending requests in the kcqs,
761 * flush the kcqs, but only if we can get a token. If not, we should
762 * leave the requests in the kcqs so that they can be merged. Note that
763 * khd->lock serializes the flushes, so if we observed any bit set in
764 * the kcq_map, we will always get a request.
766 rq = list_first_entry_or_null(rqs, struct request, queuelist);
768 nr = kyber_get_domain_token(kqd, khd, hctx);
771 rq_set_domain_token(rq, nr);
772 list_del_init(&rq->queuelist);
775 trace_kyber_throttled(kqd->q,
776 kyber_domain_names[khd->cur_domain]);
778 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
779 nr = kyber_get_domain_token(kqd, khd, hctx);
781 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
782 rq = list_first_entry(rqs, struct request, queuelist);
784 rq_set_domain_token(rq, nr);
785 list_del_init(&rq->queuelist);
788 trace_kyber_throttled(kqd->q,
789 kyber_domain_names[khd->cur_domain]);
793 /* There were either no pending requests or no tokens. */
797 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
799 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
800 struct kyber_hctx_data *khd = hctx->sched_data;
804 spin_lock(&khd->lock);
807 * First, if we are still entitled to batch, try to dispatch a request
810 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
811 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
818 * 1. We were no longer entitled to a batch.
819 * 2. The domain we were batching didn't have any requests.
820 * 3. The domain we were batching was out of tokens.
822 * Start another batch. Note that this wraps back around to the original
823 * domain if no other domains have requests or tokens.
826 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
827 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
832 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
839 spin_unlock(&khd->lock);
843 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
845 struct kyber_hctx_data *khd = hctx->sched_data;
848 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
849 if (!list_empty_careful(&khd->rqs[i]) ||
850 sbitmap_any_bit_set(&khd->kcq_map[i]))
857 #define KYBER_LAT_SHOW_STORE(domain, name) \
858 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
861 struct kyber_queue_data *kqd = e->elevator_data; \
863 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
866 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
867 const char *page, size_t count) \
869 struct kyber_queue_data *kqd = e->elevator_data; \
870 unsigned long long nsec; \
873 ret = kstrtoull(page, 10, &nsec); \
877 kqd->latency_targets[domain] = nsec; \
881 KYBER_LAT_SHOW_STORE(KYBER_READ, read);
882 KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
883 #undef KYBER_LAT_SHOW_STORE
885 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
886 static struct elv_fs_entry kyber_sched_attrs[] = {
887 KYBER_LAT_ATTR(read),
888 KYBER_LAT_ATTR(write),
891 #undef KYBER_LAT_ATTR
893 #ifdef CONFIG_BLK_DEBUG_FS
894 #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
895 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
897 struct request_queue *q = data; \
898 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
900 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
904 static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
905 __acquires(&khd->lock) \
907 struct blk_mq_hw_ctx *hctx = m->private; \
908 struct kyber_hctx_data *khd = hctx->sched_data; \
910 spin_lock(&khd->lock); \
911 return seq_list_start(&khd->rqs[domain], *pos); \
914 static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
917 struct blk_mq_hw_ctx *hctx = m->private; \
918 struct kyber_hctx_data *khd = hctx->sched_data; \
920 return seq_list_next(v, &khd->rqs[domain], pos); \
923 static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
924 __releases(&khd->lock) \
926 struct blk_mq_hw_ctx *hctx = m->private; \
927 struct kyber_hctx_data *khd = hctx->sched_data; \
929 spin_unlock(&khd->lock); \
932 static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
933 .start = kyber_##name##_rqs_start, \
934 .next = kyber_##name##_rqs_next, \
935 .stop = kyber_##name##_rqs_stop, \
936 .show = blk_mq_debugfs_rq_show, \
939 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
941 struct blk_mq_hw_ctx *hctx = data; \
942 struct kyber_hctx_data *khd = hctx->sched_data; \
943 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
945 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
948 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
949 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
950 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
951 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
952 #undef KYBER_DEBUGFS_DOMAIN_ATTRS
954 static int kyber_async_depth_show(void *data, struct seq_file *m)
956 struct request_queue *q = data;
957 struct kyber_queue_data *kqd = q->elevator->elevator_data;
959 seq_printf(m, "%u\n", kqd->async_depth);
963 static int kyber_cur_domain_show(void *data, struct seq_file *m)
965 struct blk_mq_hw_ctx *hctx = data;
966 struct kyber_hctx_data *khd = hctx->sched_data;
968 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
972 static int kyber_batching_show(void *data, struct seq_file *m)
974 struct blk_mq_hw_ctx *hctx = data;
975 struct kyber_hctx_data *khd = hctx->sched_data;
977 seq_printf(m, "%u\n", khd->batching);
981 #define KYBER_QUEUE_DOMAIN_ATTRS(name) \
982 {#name "_tokens", 0400, kyber_##name##_tokens_show}
983 static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
984 KYBER_QUEUE_DOMAIN_ATTRS(read),
985 KYBER_QUEUE_DOMAIN_ATTRS(write),
986 KYBER_QUEUE_DOMAIN_ATTRS(discard),
987 KYBER_QUEUE_DOMAIN_ATTRS(other),
988 {"async_depth", 0400, kyber_async_depth_show},
991 #undef KYBER_QUEUE_DOMAIN_ATTRS
993 #define KYBER_HCTX_DOMAIN_ATTRS(name) \
994 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
995 {#name "_waiting", 0400, kyber_##name##_waiting_show}
996 static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
997 KYBER_HCTX_DOMAIN_ATTRS(read),
998 KYBER_HCTX_DOMAIN_ATTRS(write),
999 KYBER_HCTX_DOMAIN_ATTRS(discard),
1000 KYBER_HCTX_DOMAIN_ATTRS(other),
1001 {"cur_domain", 0400, kyber_cur_domain_show},
1002 {"batching", 0400, kyber_batching_show},
1005 #undef KYBER_HCTX_DOMAIN_ATTRS
1008 static struct elevator_type kyber_sched = {
1010 .init_sched = kyber_init_sched,
1011 .exit_sched = kyber_exit_sched,
1012 .init_hctx = kyber_init_hctx,
1013 .exit_hctx = kyber_exit_hctx,
1014 .limit_depth = kyber_limit_depth,
1015 .bio_merge = kyber_bio_merge,
1016 .prepare_request = kyber_prepare_request,
1017 .insert_requests = kyber_insert_requests,
1018 .finish_request = kyber_finish_request,
1019 .requeue_request = kyber_finish_request,
1020 .completed_request = kyber_completed_request,
1021 .dispatch_request = kyber_dispatch_request,
1022 .has_work = kyber_has_work,
1023 .depth_updated = kyber_depth_updated,
1025 #ifdef CONFIG_BLK_DEBUG_FS
1026 .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1027 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1029 .elevator_attrs = kyber_sched_attrs,
1030 .elevator_name = "kyber",
1031 .elevator_features = ELEVATOR_F_MQ_AWARE,
1032 .elevator_owner = THIS_MODULE,
1035 static int __init kyber_init(void)
1037 return elv_register(&kyber_sched);
1040 static void __exit kyber_exit(void)
1042 elv_unregister(&kyber_sched);
1045 module_init(kyber_init);
1046 module_exit(kyber_exit);
1048 MODULE_AUTHOR("Omar Sandoval");
1049 MODULE_LICENSE("GPL");
1050 MODULE_DESCRIPTION("Kyber I/O scheduler");