1 // SPDX-License-Identifier: GPL-2.0
3 * Block rq-qos base io controller
5 * This works similar to wbt with a few exceptions
7 * - It's bio based, so the latency covers the whole block layer in addition to
9 * - We will throttle all IO that comes in here if we need to.
10 * - We use the mean latency over the 100ms window. This is because writes can
11 * be particularly fast, which could give us a false sense of the impact of
12 * other workloads on our protected workload.
13 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14 * that we can have as many outstanding bio's as we're allowed to. Only at
15 * throttle time do we pay attention to the actual queue depth.
17 * The hierarchy works like the cpu controller does, we track the latency at
18 * every configured node, and each configured node has it's own independent
19 * queue depth. This means that we only care about our latency targets at the
20 * peer level. Some group at the bottom of the hierarchy isn't going to affect
21 * a group at the end of some other path if we're only configred at leaf level.
23 * Consider the following
27 * fast (target=5ms) slow (target=10ms)
29 * a b normal(15ms) unloved
31 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32 * an average latency of 5ms. If it does then we will throttle the "slow"
33 * group. In the case of "normal", if it exceeds its 15ms target, we will
34 * throttle "unloved", but nobody else.
36 * In this example "fast", "slow", and "normal" will be the only groups actually
37 * accounting their io latencies. We have to walk up the heirarchy to the root
38 * on every submit and complete so we can do the appropriate stat recording and
39 * adjust the queue depth of ourselves if needed.
41 * There are 2 ways we throttle IO.
43 * 1) Queue depth throttling. As we throttle down we will adjust the maximum
44 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
45 * to 1. If the group is only ever submitting IO for itself then this is the
46 * only way we throttle.
48 * 2) Induced delay throttling. This is for the case that a group is generating
49 * IO that has to be issued by the root cg to avoid priority inversion. So think
50 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
51 * of work done for us on behalf of the root cg and are being asked to scale
52 * down more then we induce a latency at userspace return. We accumulate the
53 * total amount of time we need to be punished by doing
55 * total_time += min_lat_nsec - actual_io_completion
57 * and then at throttle time will do
59 * throttle_time = min(total_time, NSEC_PER_SEC)
61 * This induced delay will throttle back the activity that is generating the
62 * root cg issued io's, wethere that's some metadata intensive operation or the
63 * group is using so much memory that it is pushing us into swap.
65 * Copyright (C) 2018 Josef Bacik
67 #include <linux/kernel.h>
68 #include <linux/blk_types.h>
69 #include <linux/backing-dev.h>
70 #include <linux/module.h>
71 #include <linux/timer.h>
72 #include <linux/memcontrol.h>
73 #include <linux/sched/loadavg.h>
74 #include <linux/sched/signal.h>
75 #include <trace/events/block.h>
76 #include <linux/blk-mq.h>
77 #include <linux/blk-cgroup.h>
78 #include "blk-rq-qos.h"
82 #define DEFAULT_SCALE_COOKIE 1000000U
84 static struct blkcg_policy blkcg_policy_iolatency;
87 struct blk_iolatency {
89 struct timer_list timer;
93 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
95 return container_of(rqos, struct blk_iolatency, rqos);
98 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
100 return atomic_read(&blkiolat->enabled) > 0;
103 struct child_latency_info {
106 /* Last time we adjusted the scale of everybody. */
107 u64 last_scale_event;
109 /* The latency that we missed. */
112 /* Total io's from all of our children for the last summation. */
115 /* The guy who actually changed the latency numbers. */
116 struct iolatency_grp *scale_grp;
118 /* Cookie to tell if we need to scale up or down. */
119 atomic_t scale_cookie;
122 struct percentile_stats {
127 struct latency_stat {
129 struct percentile_stats ps;
130 struct blk_rq_stat rqs;
134 struct iolatency_grp {
135 struct blkg_policy_data pd;
136 struct latency_stat __percpu *stats;
137 struct latency_stat cur_stat;
138 struct blk_iolatency *blkiolat;
139 struct rq_depth rq_depth;
140 struct rq_wait rq_wait;
141 atomic64_t window_start;
142 atomic_t scale_cookie;
146 /* total running average of our io latency. */
149 /* Our current number of IO's for the last summation. */
153 struct child_latency_info child_lat;
156 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
157 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
159 * These are the constants used to fake the fixed-point moving average
160 * calculation just like load average. The call to calc_load() folds
161 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
162 * window size is bucketed to try to approximately calculate average
163 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
164 * elapse immediately. Note, windows only elapse with IO activity. Idle
165 * periods extend the most recent window.
167 #define BLKIOLATENCY_NR_EXP_FACTORS 5
168 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
169 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
170 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
171 2045, // exp(1/600) - 600 samples
172 2039, // exp(1/240) - 240 samples
173 2031, // exp(1/120) - 120 samples
174 2023, // exp(1/80) - 80 samples
175 2014, // exp(1/60) - 60 samples
178 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
180 return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
183 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
185 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
188 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
190 return pd_to_blkg(&iolat->pd);
193 static inline void latency_stat_init(struct iolatency_grp *iolat,
194 struct latency_stat *stat)
200 blk_rq_stat_init(&stat->rqs);
203 static inline void latency_stat_sum(struct iolatency_grp *iolat,
204 struct latency_stat *sum,
205 struct latency_stat *stat)
208 sum->ps.total += stat->ps.total;
209 sum->ps.missed += stat->ps.missed;
211 blk_rq_stat_sum(&sum->rqs, &stat->rqs);
214 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
217 struct latency_stat *stat = get_cpu_ptr(iolat->stats);
219 if (req_time >= iolat->min_lat_nsec)
223 blk_rq_stat_add(&stat->rqs, req_time);
227 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
228 struct latency_stat *stat)
231 u64 thresh = div64_u64(stat->ps.total, 10);
232 thresh = max(thresh, 1ULL);
233 return stat->ps.missed < thresh;
235 return stat->rqs.mean <= iolat->min_lat_nsec;
238 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
239 struct latency_stat *stat)
242 return stat->ps.total;
243 return stat->rqs.nr_samples;
246 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
247 struct latency_stat *stat)
255 * calc_load() takes in a number stored in fixed point representation.
256 * Because we are using this for IO time in ns, the values stored
257 * are significantly larger than the FIXED_1 denominator (2048).
258 * Therefore, rounding errors in the calculation are negligible and
261 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
262 div64_u64(iolat->cur_win_nsec,
263 BLKIOLATENCY_EXP_BUCKET_SIZE));
264 iolat->lat_avg = calc_load(iolat->lat_avg,
265 iolatency_exp_factors[exp_idx],
269 static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
271 atomic_dec(&rqw->inflight);
275 static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
277 struct iolatency_grp *iolat = private_data;
278 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
281 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
282 struct iolatency_grp *iolat,
286 struct rq_wait *rqw = &iolat->rq_wait;
287 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
290 blkcg_schedule_throttle(rqos->q, use_memdelay);
293 * To avoid priority inversions we want to just take a slot if we are
294 * issuing as root. If we're being killed off there's no point in
295 * delaying things, we may have been killed by OOM so throttling may
296 * make recovery take even longer, so just let the IO's through so the
299 if (issue_as_root || fatal_signal_pending(current)) {
300 atomic_inc(&rqw->inflight);
304 rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
307 #define SCALE_DOWN_FACTOR 2
308 #define SCALE_UP_FACTOR 4
310 static inline unsigned long scale_amount(unsigned long qd, bool up)
312 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
316 * We scale the qd down faster than we scale up, so we need to use this helper
317 * to adjust the scale_cookie accordingly so we don't prematurely get
318 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
320 * Each group has their own local copy of the last scale cookie they saw, so if
321 * the global scale cookie goes up or down they know which way they need to go
322 * based on their last knowledge of it.
324 static void scale_cookie_change(struct blk_iolatency *blkiolat,
325 struct child_latency_info *lat_info,
328 unsigned long qd = blkiolat->rqos.q->nr_requests;
329 unsigned long scale = scale_amount(qd, up);
330 unsigned long old = atomic_read(&lat_info->scale_cookie);
331 unsigned long max_scale = qd << 1;
332 unsigned long diff = 0;
334 if (old < DEFAULT_SCALE_COOKIE)
335 diff = DEFAULT_SCALE_COOKIE - old;
338 if (scale + old > DEFAULT_SCALE_COOKIE)
339 atomic_set(&lat_info->scale_cookie,
340 DEFAULT_SCALE_COOKIE);
342 atomic_inc(&lat_info->scale_cookie);
344 atomic_add(scale, &lat_info->scale_cookie);
347 * We don't want to dig a hole so deep that it takes us hours to
348 * dig out of it. Just enough that we don't throttle/unthrottle
349 * with jagged workloads but can still unthrottle once pressure
350 * has sufficiently dissipated.
353 if (diff < max_scale)
354 atomic_dec(&lat_info->scale_cookie);
356 atomic_sub(scale, &lat_info->scale_cookie);
362 * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
363 * queue depth at a time so we don't get wild swings and hopefully dial in to
364 * fairer distribution of the overall queue depth.
366 static void scale_change(struct iolatency_grp *iolat, bool up)
368 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
369 unsigned long scale = scale_amount(qd, up);
370 unsigned long old = iolat->rq_depth.max_depth;
376 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
382 iolat->rq_depth.max_depth = old;
383 wake_up_all(&iolat->rq_wait.wait);
387 iolat->rq_depth.max_depth = max(old, 1UL);
391 /* Check our parent and see if the scale cookie has changed. */
392 static void check_scale_change(struct iolatency_grp *iolat)
394 struct iolatency_grp *parent;
395 struct child_latency_info *lat_info;
396 unsigned int cur_cookie;
397 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
402 if (lat_to_blkg(iolat)->parent == NULL)
405 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
409 lat_info = &parent->child_lat;
410 cur_cookie = atomic_read(&lat_info->scale_cookie);
411 scale_lat = READ_ONCE(lat_info->scale_lat);
413 if (cur_cookie < our_cookie)
415 else if (cur_cookie > our_cookie)
420 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
422 /* Somebody beat us to the punch, just bail. */
423 if (old != our_cookie)
426 if (direction < 0 && iolat->min_lat_nsec) {
429 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
433 * Sometimes high priority groups are their own worst enemy, so
434 * instead of taking it out on some poor other group that did 5%
435 * or less of the IO's for the last summation just skip this
438 samples_thresh = lat_info->nr_samples * 5;
439 samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
440 if (iolat->nr_samples <= samples_thresh)
444 /* We're as low as we can go. */
445 if (iolat->rq_depth.max_depth == 1 && direction < 0) {
446 blkcg_use_delay(lat_to_blkg(iolat));
450 /* We're back to the default cookie, unthrottle all the things. */
451 if (cur_cookie == DEFAULT_SCALE_COOKIE) {
452 blkcg_clear_delay(lat_to_blkg(iolat));
453 iolat->rq_depth.max_depth = UINT_MAX;
454 wake_up_all(&iolat->rq_wait.wait);
458 scale_change(iolat, direction > 0);
461 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
463 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
464 struct blkcg_gq *blkg = bio->bi_blkg;
465 bool issue_as_root = bio_issue_as_root_blkg(bio);
467 if (!blk_iolatency_enabled(blkiolat))
470 while (blkg && blkg->parent) {
471 struct iolatency_grp *iolat = blkg_to_lat(blkg);
477 check_scale_change(iolat);
478 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
479 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
482 if (!timer_pending(&blkiolat->timer))
483 mod_timer(&blkiolat->timer, jiffies + HZ);
486 static void iolatency_record_time(struct iolatency_grp *iolat,
487 struct bio_issue *issue, u64 now,
490 u64 start = bio_issue_time(issue);
494 * Have to do this so we are truncated to the correct time that our
495 * issue is truncated to.
497 now = __bio_issue_time(now);
502 req_time = now - start;
505 * We don't want to count issue_as_root bio's in the cgroups latency
506 * statistics as it could skew the numbers downwards.
508 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
509 u64 sub = iolat->min_lat_nsec;
511 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
515 latency_stat_record_time(iolat, req_time);
518 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
519 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
521 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
523 struct blkcg_gq *blkg = lat_to_blkg(iolat);
524 struct iolatency_grp *parent;
525 struct child_latency_info *lat_info;
526 struct latency_stat stat;
530 latency_stat_init(iolat, &stat);
532 for_each_online_cpu(cpu) {
533 struct latency_stat *s;
534 s = per_cpu_ptr(iolat->stats, cpu);
535 latency_stat_sum(iolat, &stat, s);
536 latency_stat_init(iolat, s);
540 parent = blkg_to_lat(blkg->parent);
544 lat_info = &parent->child_lat;
546 iolat_update_total_lat_avg(iolat, &stat);
548 /* Everything is ok and we don't need to adjust the scale. */
549 if (latency_sum_ok(iolat, &stat) &&
550 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
553 /* Somebody beat us to the punch, just bail. */
554 spin_lock_irqsave(&lat_info->lock, flags);
556 latency_stat_sum(iolat, &iolat->cur_stat, &stat);
557 lat_info->nr_samples -= iolat->nr_samples;
558 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
559 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
561 if ((lat_info->last_scale_event >= now ||
562 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
565 if (latency_sum_ok(iolat, &iolat->cur_stat) &&
566 latency_sum_ok(iolat, &stat)) {
567 if (latency_stat_samples(iolat, &iolat->cur_stat) <
568 BLKIOLATENCY_MIN_GOOD_SAMPLES)
570 if (lat_info->scale_grp == iolat) {
571 lat_info->last_scale_event = now;
572 scale_cookie_change(iolat->blkiolat, lat_info, true);
574 } else if (lat_info->scale_lat == 0 ||
575 lat_info->scale_lat >= iolat->min_lat_nsec) {
576 lat_info->last_scale_event = now;
577 if (!lat_info->scale_grp ||
578 lat_info->scale_lat > iolat->min_lat_nsec) {
579 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
580 lat_info->scale_grp = iolat;
582 scale_cookie_change(iolat->blkiolat, lat_info, false);
584 latency_stat_init(iolat, &iolat->cur_stat);
586 spin_unlock_irqrestore(&lat_info->lock, flags);
589 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
591 struct blkcg_gq *blkg;
593 struct iolatency_grp *iolat;
596 bool issue_as_root = bio_issue_as_root_blkg(bio);
597 bool enabled = false;
601 if (!blkg || !bio_flagged(bio, BIO_TRACKED))
604 iolat = blkg_to_lat(bio->bi_blkg);
608 enabled = blk_iolatency_enabled(iolat->blkiolat);
612 now = ktime_to_ns(ktime_get());
613 while (blkg && blkg->parent) {
614 iolat = blkg_to_lat(blkg);
619 rqw = &iolat->rq_wait;
621 inflight = atomic_dec_return(&rqw->inflight);
622 WARN_ON_ONCE(inflight < 0);
624 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
625 * submitted, so do not account for it.
627 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
628 iolatency_record_time(iolat, &bio->bi_issue, now,
630 window_start = atomic64_read(&iolat->window_start);
631 if (now > window_start &&
632 (now - window_start) >= iolat->cur_win_nsec) {
633 if (atomic64_cmpxchg(&iolat->window_start,
634 window_start, now) == window_start)
635 iolatency_check_latencies(iolat, now);
643 static void blkcg_iolatency_exit(struct rq_qos *rqos)
645 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
647 del_timer_sync(&blkiolat->timer);
648 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
652 static struct rq_qos_ops blkcg_iolatency_ops = {
653 .throttle = blkcg_iolatency_throttle,
654 .done_bio = blkcg_iolatency_done_bio,
655 .exit = blkcg_iolatency_exit,
658 static void blkiolatency_timer_fn(struct timer_list *t)
660 struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
661 struct blkcg_gq *blkg;
662 struct cgroup_subsys_state *pos_css;
663 u64 now = ktime_to_ns(ktime_get());
666 blkg_for_each_descendant_pre(blkg, pos_css,
667 blkiolat->rqos.q->root_blkg) {
668 struct iolatency_grp *iolat;
669 struct child_latency_info *lat_info;
674 * We could be exiting, don't access the pd unless we have a
677 if (!blkg_tryget(blkg))
680 iolat = blkg_to_lat(blkg);
684 lat_info = &iolat->child_lat;
685 cookie = atomic_read(&lat_info->scale_cookie);
687 if (cookie >= DEFAULT_SCALE_COOKIE)
690 spin_lock_irqsave(&lat_info->lock, flags);
691 if (lat_info->last_scale_event >= now)
695 * We scaled down but don't have a scale_grp, scale up and carry
698 if (lat_info->scale_grp == NULL) {
699 scale_cookie_change(iolat->blkiolat, lat_info, true);
704 * It's been 5 seconds since our last scale event, clear the
705 * scale grp in case the group that needed the scale down isn't
706 * doing any IO currently.
708 if (now - lat_info->last_scale_event >=
709 ((u64)NSEC_PER_SEC * 5))
710 lat_info->scale_grp = NULL;
712 spin_unlock_irqrestore(&lat_info->lock, flags);
719 int blk_iolatency_init(struct request_queue *q)
721 struct blk_iolatency *blkiolat;
725 blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
729 rqos = &blkiolat->rqos;
730 rqos->id = RQ_QOS_LATENCY;
731 rqos->ops = &blkcg_iolatency_ops;
736 ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
743 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
749 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
752 static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
754 struct iolatency_grp *iolat = blkg_to_lat(blkg);
755 u64 oldval = iolat->min_lat_nsec;
757 iolat->min_lat_nsec = val;
758 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
759 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
760 BLKIOLATENCY_MAX_WIN_SIZE);
764 if (oldval && !val) {
765 blkcg_clear_delay(blkg);
771 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
774 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
775 struct child_latency_info *lat_info;
779 lat_info = &iolat->child_lat;
780 spin_lock(&lat_info->lock);
781 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
782 lat_info->last_scale_event = 0;
783 lat_info->scale_grp = NULL;
784 lat_info->scale_lat = 0;
785 spin_unlock(&lat_info->lock);
789 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
790 size_t nbytes, loff_t off)
792 struct blkcg *blkcg = css_to_blkcg(of_css(of));
793 struct blkcg_gq *blkg;
794 struct blkg_conf_ctx ctx;
795 struct iolatency_grp *iolat;
802 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
806 iolat = blkg_to_lat(ctx.blkg);
810 while ((tok = strsep(&p, " "))) {
812 char val[21]; /* 18446744073709551616 */
814 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
817 if (!strcmp(key, "target")) {
820 if (!strcmp(val, "max"))
822 else if (sscanf(val, "%llu", &v) == 1)
823 lat_val = v * NSEC_PER_USEC;
831 /* Walk up the tree to see if our new val is lower than it should be. */
833 oldval = iolat->min_lat_nsec;
835 enable = iolatency_set_min_lat_nsec(blkg, lat_val);
837 if (!blk_get_queue(blkg->q)) {
845 if (oldval != iolat->min_lat_nsec) {
846 iolatency_clear_scaling(blkg);
851 blkg_conf_finish(&ctx);
852 if (ret == 0 && enable) {
853 struct iolatency_grp *tmp = blkg_to_lat(blkg);
854 struct blk_iolatency *blkiolat = tmp->blkiolat;
856 blk_mq_freeze_queue(blkg->q);
859 atomic_inc(&blkiolat->enabled);
860 else if (enable == -1)
861 atomic_dec(&blkiolat->enabled);
865 blk_mq_unfreeze_queue(blkg->q);
868 blk_put_queue(blkg->q);
870 return ret ?: nbytes;
873 static u64 iolatency_prfill_limit(struct seq_file *sf,
874 struct blkg_policy_data *pd, int off)
876 struct iolatency_grp *iolat = pd_to_lat(pd);
877 const char *dname = blkg_dev_name(pd->blkg);
879 if (!dname || !iolat->min_lat_nsec)
881 seq_printf(sf, "%s target=%llu\n",
882 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
886 static int iolatency_print_limit(struct seq_file *sf, void *v)
888 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
889 iolatency_prfill_limit,
890 &blkcg_policy_iolatency, seq_cft(sf)->private, false);
894 static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
896 struct latency_stat stat;
899 latency_stat_init(iolat, &stat);
901 for_each_online_cpu(cpu) {
902 struct latency_stat *s;
903 s = per_cpu_ptr(iolat->stats, cpu);
904 latency_stat_sum(iolat, &stat, s);
908 if (iolat->rq_depth.max_depth == UINT_MAX)
909 seq_printf(s, " missed=%llu total=%llu depth=max",
910 (unsigned long long)stat.ps.missed,
911 (unsigned long long)stat.ps.total);
913 seq_printf(s, " missed=%llu total=%llu depth=%u",
914 (unsigned long long)stat.ps.missed,
915 (unsigned long long)stat.ps.total,
916 iolat->rq_depth.max_depth);
920 static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
922 struct iolatency_grp *iolat = pd_to_lat(pd);
923 unsigned long long avg_lat;
924 unsigned long long cur_win;
926 if (!blkcg_debug_stats)
930 return iolatency_ssd_stat(iolat, s);
932 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
933 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
934 if (iolat->rq_depth.max_depth == UINT_MAX)
935 seq_printf(s, " depth=max avg_lat=%llu win=%llu",
938 seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
939 iolat->rq_depth.max_depth, avg_lat, cur_win);
943 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
944 struct request_queue *q,
947 struct iolatency_grp *iolat;
949 iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
952 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
953 __alignof__(struct latency_stat), gfp);
961 static void iolatency_pd_init(struct blkg_policy_data *pd)
963 struct iolatency_grp *iolat = pd_to_lat(pd);
964 struct blkcg_gq *blkg = lat_to_blkg(iolat);
965 struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
966 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
967 u64 now = ktime_to_ns(ktime_get());
970 if (blk_queue_nonrot(blkg->q))
975 for_each_possible_cpu(cpu) {
976 struct latency_stat *stat;
977 stat = per_cpu_ptr(iolat->stats, cpu);
978 latency_stat_init(iolat, stat);
981 latency_stat_init(iolat, &iolat->cur_stat);
982 rq_wait_init(&iolat->rq_wait);
983 spin_lock_init(&iolat->child_lat.lock);
984 iolat->rq_depth.queue_depth = blkg->q->nr_requests;
985 iolat->rq_depth.max_depth = UINT_MAX;
986 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
987 iolat->blkiolat = blkiolat;
988 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
989 atomic64_set(&iolat->window_start, now);
992 * We init things in list order, so the pd for the parent may not be
993 * init'ed yet for whatever reason.
995 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
996 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
997 atomic_set(&iolat->scale_cookie,
998 atomic_read(&parent->child_lat.scale_cookie));
1000 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1003 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1006 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1008 struct iolatency_grp *iolat = pd_to_lat(pd);
1009 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1010 struct blk_iolatency *blkiolat = iolat->blkiolat;
1013 ret = iolatency_set_min_lat_nsec(blkg, 0);
1015 atomic_inc(&blkiolat->enabled);
1017 atomic_dec(&blkiolat->enabled);
1018 iolatency_clear_scaling(blkg);
1021 static void iolatency_pd_free(struct blkg_policy_data *pd)
1023 struct iolatency_grp *iolat = pd_to_lat(pd);
1024 free_percpu(iolat->stats);
1028 static struct cftype iolatency_files[] = {
1031 .flags = CFTYPE_NOT_ON_ROOT,
1032 .seq_show = iolatency_print_limit,
1033 .write = iolatency_set_limit,
1038 static struct blkcg_policy blkcg_policy_iolatency = {
1039 .dfl_cftypes = iolatency_files,
1040 .pd_alloc_fn = iolatency_pd_alloc,
1041 .pd_init_fn = iolatency_pd_init,
1042 .pd_offline_fn = iolatency_pd_offline,
1043 .pd_free_fn = iolatency_pd_free,
1044 .pd_stat_fn = iolatency_pd_stat,
1047 static int __init iolatency_init(void)
1049 return blkcg_policy_register(&blkcg_policy_iolatency);
1052 static void __exit iolatency_exit(void)
1054 blkcg_policy_unregister(&blkcg_policy_iolatency);
1057 module_init(iolatency_init);
1058 module_exit(iolatency_exit);