1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* switch iff the conditions are met for longer than this */
260 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
264 /* if IOs end up waiting for requests, issue less */
265 RQ_WAIT_BUSY_PCT = 5,
267 /* unbusy hysterisis */
271 * The effect of delay is indirect and non-linear and a huge amount of
272 * future debt can accumulate abruptly while unthrottled. Linearly scale
273 * up delay as debt is going up and then let it decay exponentially.
274 * This gives us quick ramp ups while delay is accumulating and long
275 * tails which can help reducing the frequency of debt explosions on
276 * unthrottle. The parameters are experimentally determined.
278 * The delay mechanism provides adequate protection and behavior in many
279 * cases. However, this is far from ideal and falls shorts on both
280 * fronts. The debtors are often throttled too harshly costing a
281 * significant level of fairness and possibly total work while the
282 * protection against their impacts on the system can be choppy and
285 * The shortcoming primarily stems from the fact that, unlike for page
286 * cache, the kernel doesn't have well-defined back-pressure propagation
287 * mechanism and policies for anonymous memory. Fully addressing this
288 * issue will likely require substantial improvements in the area.
290 MIN_DELAY_THR_PCT = 500,
291 MAX_DELAY_THR_PCT = 25000,
293 MAX_DELAY = 250 * USEC_PER_MSEC,
295 /* halve debts if avg usage over 100ms is under 50% */
297 DFGV_PERIOD = 100 * USEC_PER_MSEC,
299 /* don't let cmds which take a very long time pin lagging for too long */
300 MAX_LAGGING_PERIODS = 10,
303 * Count IO size in 4k pages. The 12bit shift helps keeping
304 * size-proportional components of cost calculation in closer
305 * numbers of digits to per-IO cost components.
308 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
309 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
311 /* if apart further than 16M, consider randio for linear model */
312 LCOEF_RANDIO_PAGES = 4096,
321 /* io.cost.qos controls including per-dev enable of the whole controller */
328 /* io.cost.qos params */
339 /* io.cost.model controls */
346 /* builtin linear cost model coefficients */
376 u32 qos[NR_QOS_PARAMS];
377 u64 i_lcoefs[NR_I_LCOEFS];
378 u64 lcoefs[NR_LCOEFS];
379 u32 too_fast_vrate_pct;
380 u32 too_slow_vrate_pct;
396 struct ioc_pcpu_stat {
397 struct ioc_missed missed[2];
399 local64_t rq_wait_ns;
409 struct ioc_params params;
410 struct ioc_margins margins;
417 struct timer_list timer;
418 struct list_head active_iocgs; /* active cgroups */
419 struct ioc_pcpu_stat __percpu *pcpu_stat;
421 enum ioc_running running;
422 atomic64_t vtime_rate;
426 seqcount_spinlock_t period_seqcount;
427 u64 period_at; /* wallclock starttime */
428 u64 period_at_vtime; /* vtime starttime */
430 atomic64_t cur_period; /* inc'd each period */
431 int busy_level; /* saturation history */
433 bool weights_updated;
434 atomic_t hweight_gen; /* for lazy hweights */
436 /* debt forgivness */
439 u64 dfgv_usage_us_sum;
441 u64 autop_too_fast_at;
442 u64 autop_too_slow_at;
444 bool user_qos_params:1;
445 bool user_cost_model:1;
448 struct iocg_pcpu_stat {
449 local64_t abs_vusage;
459 /* per device-cgroup pair */
461 struct blkg_policy_data pd;
465 * A iocg can get its weight from two sources - an explicit
466 * per-device-cgroup configuration or the default weight of the
467 * cgroup. `cfg_weight` is the explicit per-device-cgroup
468 * configuration. `weight` is the effective considering both
471 * When an idle cgroup becomes active its `active` goes from 0 to
472 * `weight`. `inuse` is the surplus adjusted active weight.
473 * `active` and `inuse` are used to calculate `hweight_active` and
476 * `last_inuse` remembers `inuse` while an iocg is idle to persist
477 * surplus adjustments.
479 * `inuse` may be adjusted dynamically during period. `saved_*` are used
480 * to determine and track adjustments.
490 sector_t cursor; /* to detect randio */
493 * `vtime` is this iocg's vtime cursor which progresses as IOs are
494 * issued. If lagging behind device vtime, the delta represents
495 * the currently available IO budget. If running ahead, the
498 * `vtime_done` is the same but progressed on completion rather
499 * than issue. The delta behind `vtime` represents the cost of
500 * currently in-flight IOs.
503 atomic64_t done_vtime;
506 /* current delay in effect and when it started */
511 * The period this iocg was last active in. Used for deactivation
512 * and invalidating `vtime`.
514 atomic64_t active_period;
515 struct list_head active_list;
517 /* see __propagate_weights() and current_hweight() for details */
518 u64 child_active_sum;
520 u64 child_adjusted_sum;
524 u32 hweight_donating;
525 u32 hweight_after_donation;
527 struct list_head walk_list;
528 struct list_head surplus_list;
530 struct wait_queue_head waitq;
531 struct hrtimer waitq_timer;
533 /* timestamp at the latest activation */
537 struct iocg_pcpu_stat __percpu *pcpu_stat;
538 struct iocg_stat stat;
539 struct iocg_stat last_stat;
540 u64 last_stat_abs_vusage;
546 /* this iocg's depth in the hierarchy and ancestors including self */
548 struct ioc_gq *ancestors[];
553 struct blkcg_policy_data cpd;
554 unsigned int dfl_weight;
565 struct wait_queue_entry wait;
571 struct iocg_wake_ctx {
577 static const struct ioc_params autop[] = {
580 [QOS_RLAT] = 250000, /* 250ms */
582 [QOS_MIN] = VRATE_MIN_PPM,
583 [QOS_MAX] = VRATE_MAX_PPM,
586 [I_LCOEF_RBPS] = 174019176,
587 [I_LCOEF_RSEQIOPS] = 41708,
588 [I_LCOEF_RRANDIOPS] = 370,
589 [I_LCOEF_WBPS] = 178075866,
590 [I_LCOEF_WSEQIOPS] = 42705,
591 [I_LCOEF_WRANDIOPS] = 378,
596 [QOS_RLAT] = 25000, /* 25ms */
598 [QOS_MIN] = VRATE_MIN_PPM,
599 [QOS_MAX] = VRATE_MAX_PPM,
602 [I_LCOEF_RBPS] = 245855193,
603 [I_LCOEF_RSEQIOPS] = 61575,
604 [I_LCOEF_RRANDIOPS] = 6946,
605 [I_LCOEF_WBPS] = 141365009,
606 [I_LCOEF_WSEQIOPS] = 33716,
607 [I_LCOEF_WRANDIOPS] = 26796,
612 [QOS_RLAT] = 25000, /* 25ms */
614 [QOS_MIN] = VRATE_MIN_PPM,
615 [QOS_MAX] = VRATE_MAX_PPM,
618 [I_LCOEF_RBPS] = 488636629,
619 [I_LCOEF_RSEQIOPS] = 8932,
620 [I_LCOEF_RRANDIOPS] = 8518,
621 [I_LCOEF_WBPS] = 427891549,
622 [I_LCOEF_WSEQIOPS] = 28755,
623 [I_LCOEF_WRANDIOPS] = 21940,
625 .too_fast_vrate_pct = 500,
629 [QOS_RLAT] = 5000, /* 5ms */
631 [QOS_MIN] = VRATE_MIN_PPM,
632 [QOS_MAX] = VRATE_MAX_PPM,
635 [I_LCOEF_RBPS] = 3102524156LLU,
636 [I_LCOEF_RSEQIOPS] = 724816,
637 [I_LCOEF_RRANDIOPS] = 778122,
638 [I_LCOEF_WBPS] = 1742780862LLU,
639 [I_LCOEF_WSEQIOPS] = 425702,
640 [I_LCOEF_WRANDIOPS] = 443193,
642 .too_slow_vrate_pct = 10,
647 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
648 * vtime credit shortage and down on device saturation.
650 static u32 vrate_adj_pct[] =
652 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
653 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
654 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
656 static struct blkcg_policy blkcg_policy_iocost;
658 /* accessors and helpers */
659 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
661 return container_of(rqos, struct ioc, rqos);
664 static struct ioc *q_to_ioc(struct request_queue *q)
666 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
669 static const char __maybe_unused *ioc_name(struct ioc *ioc)
671 struct gendisk *disk = ioc->rqos.q->disk;
675 return disk->disk_name;
678 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
680 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
683 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
685 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
688 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
690 return pd_to_blkg(&iocg->pd);
693 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
695 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
696 struct ioc_cgrp, cpd);
700 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
701 * weight, the more expensive each IO. Must round up.
703 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
705 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
709 * The inverse of abs_cost_to_cost(). Must round up.
711 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
713 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
716 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
717 u64 abs_cost, u64 cost)
719 struct iocg_pcpu_stat *gcs;
721 bio->bi_iocost_cost = cost;
722 atomic64_add(cost, &iocg->vtime);
724 gcs = get_cpu_ptr(iocg->pcpu_stat);
725 local64_add(abs_cost, &gcs->abs_vusage);
729 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
732 spin_lock_irqsave(&iocg->ioc->lock, *flags);
733 spin_lock(&iocg->waitq.lock);
735 spin_lock_irqsave(&iocg->waitq.lock, *flags);
739 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
742 spin_unlock(&iocg->waitq.lock);
743 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
745 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
749 #define CREATE_TRACE_POINTS
750 #include <trace/events/iocost.h>
752 static void ioc_refresh_margins(struct ioc *ioc)
754 struct ioc_margins *margins = &ioc->margins;
755 u32 period_us = ioc->period_us;
756 u64 vrate = ioc->vtime_base_rate;
758 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
759 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
760 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
763 /* latency Qos params changed, update period_us and all the dependent params */
764 static void ioc_refresh_period_us(struct ioc *ioc)
766 u32 ppm, lat, multi, period_us;
768 lockdep_assert_held(&ioc->lock);
770 /* pick the higher latency target */
771 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
772 ppm = ioc->params.qos[QOS_RPPM];
773 lat = ioc->params.qos[QOS_RLAT];
775 ppm = ioc->params.qos[QOS_WPPM];
776 lat = ioc->params.qos[QOS_WLAT];
780 * We want the period to be long enough to contain a healthy number
781 * of IOs while short enough for granular control. Define it as a
782 * multiple of the latency target. Ideally, the multiplier should
783 * be scaled according to the percentile so that it would nominally
784 * contain a certain number of requests. Let's be simpler and
785 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
788 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
791 period_us = multi * lat;
792 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
794 /* calculate dependent params */
795 ioc->period_us = period_us;
796 ioc->timer_slack_ns = div64_u64(
797 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
799 ioc_refresh_margins(ioc);
802 static int ioc_autop_idx(struct ioc *ioc)
804 int idx = ioc->autop_idx;
805 const struct ioc_params *p = &autop[idx];
810 if (!blk_queue_nonrot(ioc->rqos.q))
813 /* handle SATA SSDs w/ broken NCQ */
814 if (blk_queue_depth(ioc->rqos.q) == 1)
815 return AUTOP_SSD_QD1;
817 /* use one of the normal ssd sets */
818 if (idx < AUTOP_SSD_DFL)
819 return AUTOP_SSD_DFL;
821 /* if user is overriding anything, maintain what was there */
822 if (ioc->user_qos_params || ioc->user_cost_model)
825 /* step up/down based on the vrate */
826 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
827 now_ns = ktime_get_ns();
829 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
830 if (!ioc->autop_too_fast_at)
831 ioc->autop_too_fast_at = now_ns;
832 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
835 ioc->autop_too_fast_at = 0;
838 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
839 if (!ioc->autop_too_slow_at)
840 ioc->autop_too_slow_at = now_ns;
841 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
844 ioc->autop_too_slow_at = 0;
851 * Take the followings as input
853 * @bps maximum sequential throughput
854 * @seqiops maximum sequential 4k iops
855 * @randiops maximum random 4k iops
857 * and calculate the linear model cost coefficients.
859 * *@page per-page cost 1s / (@bps / 4096)
860 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
861 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
863 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
864 u64 *page, u64 *seqio, u64 *randio)
868 *page = *seqio = *randio = 0;
871 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
874 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
880 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
886 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
892 static void ioc_refresh_lcoefs(struct ioc *ioc)
894 u64 *u = ioc->params.i_lcoefs;
895 u64 *c = ioc->params.lcoefs;
897 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
898 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
899 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
900 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
903 static bool ioc_refresh_params(struct ioc *ioc, bool force)
905 const struct ioc_params *p;
908 lockdep_assert_held(&ioc->lock);
910 idx = ioc_autop_idx(ioc);
913 if (idx == ioc->autop_idx && !force)
916 if (idx != ioc->autop_idx)
917 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
919 ioc->autop_idx = idx;
920 ioc->autop_too_fast_at = 0;
921 ioc->autop_too_slow_at = 0;
923 if (!ioc->user_qos_params)
924 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
925 if (!ioc->user_cost_model)
926 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
928 ioc_refresh_period_us(ioc);
929 ioc_refresh_lcoefs(ioc);
931 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
932 VTIME_PER_USEC, MILLION);
933 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
934 VTIME_PER_USEC, MILLION);
940 * When an iocg accumulates too much vtime or gets deactivated, we throw away
941 * some vtime, which lowers the overall device utilization. As the exact amount
942 * which is being thrown away is known, we can compensate by accelerating the
943 * vrate accordingly so that the extra vtime generated in the current period
944 * matches what got lost.
946 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
948 s64 pleft = ioc->period_at + ioc->period_us - now->now;
949 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
950 s64 vcomp, vcomp_min, vcomp_max;
952 lockdep_assert_held(&ioc->lock);
954 /* we need some time left in this period */
959 * Calculate how much vrate should be adjusted to offset the error.
960 * Limit the amount of adjustment and deduct the adjusted amount from
963 vcomp = -div64_s64(ioc->vtime_err, pleft);
964 vcomp_min = -(ioc->vtime_base_rate >> 1);
965 vcomp_max = ioc->vtime_base_rate;
966 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
968 ioc->vtime_err += vcomp * pleft;
970 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
972 /* bound how much error can accumulate */
973 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
976 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
977 int nr_lagging, int nr_shortages,
978 int prev_busy_level, u32 *missed_ppm)
980 u64 vrate = ioc->vtime_base_rate;
981 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
983 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
984 if (ioc->busy_level != prev_busy_level || nr_lagging)
985 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
986 missed_ppm, rq_wait_pct,
987 nr_lagging, nr_shortages);
993 * If vrate is out of bounds, apply clamp gradually as the
994 * bounds can change abruptly. Otherwise, apply busy_level
997 if (vrate < vrate_min) {
998 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
999 vrate = min(vrate, vrate_min);
1000 } else if (vrate > vrate_max) {
1001 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1002 vrate = max(vrate, vrate_max);
1004 int idx = min_t(int, abs(ioc->busy_level),
1005 ARRAY_SIZE(vrate_adj_pct) - 1);
1006 u32 adj_pct = vrate_adj_pct[idx];
1008 if (ioc->busy_level > 0)
1009 adj_pct = 100 - adj_pct;
1011 adj_pct = 100 + adj_pct;
1013 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1014 vrate_min, vrate_max);
1017 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1018 nr_lagging, nr_shortages);
1020 ioc->vtime_base_rate = vrate;
1021 ioc_refresh_margins(ioc);
1024 /* take a snapshot of the current [v]time and vrate */
1025 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1029 now->now_ns = ktime_get();
1030 now->now = ktime_to_us(now->now_ns);
1031 now->vrate = atomic64_read(&ioc->vtime_rate);
1034 * The current vtime is
1036 * vtime at period start + (wallclock time since the start) * vrate
1038 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1039 * needed, they're seqcount protected.
1042 seq = read_seqcount_begin(&ioc->period_seqcount);
1043 now->vnow = ioc->period_at_vtime +
1044 (now->now - ioc->period_at) * now->vrate;
1045 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1048 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1050 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1052 write_seqcount_begin(&ioc->period_seqcount);
1053 ioc->period_at = now->now;
1054 ioc->period_at_vtime = now->vnow;
1055 write_seqcount_end(&ioc->period_seqcount);
1057 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1058 add_timer(&ioc->timer);
1062 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1063 * weight sums and propagate upwards accordingly. If @save, the current margin
1064 * is saved to be used as reference for later inuse in-period adjustments.
1066 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1067 bool save, struct ioc_now *now)
1069 struct ioc *ioc = iocg->ioc;
1072 lockdep_assert_held(&ioc->lock);
1075 * For an active leaf node, its inuse shouldn't be zero or exceed
1076 * @active. An active internal node's inuse is solely determined by the
1077 * inuse to active ratio of its children regardless of @inuse.
1079 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1080 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1081 iocg->child_active_sum);
1083 inuse = clamp_t(u32, inuse, 1, active);
1086 iocg->last_inuse = iocg->inuse;
1088 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1090 if (active == iocg->active && inuse == iocg->inuse)
1093 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1094 struct ioc_gq *parent = iocg->ancestors[lvl];
1095 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1096 u32 parent_active = 0, parent_inuse = 0;
1098 /* update the level sums */
1099 parent->child_active_sum += (s32)(active - child->active);
1100 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1101 /* apply the updates */
1102 child->active = active;
1103 child->inuse = inuse;
1106 * The delta between inuse and active sums indicates that
1107 * much of weight is being given away. Parent's inuse
1108 * and active should reflect the ratio.
1110 if (parent->child_active_sum) {
1111 parent_active = parent->weight;
1112 parent_inuse = DIV64_U64_ROUND_UP(
1113 parent_active * parent->child_inuse_sum,
1114 parent->child_active_sum);
1117 /* do we need to keep walking up? */
1118 if (parent_active == parent->active &&
1119 parent_inuse == parent->inuse)
1122 active = parent_active;
1123 inuse = parent_inuse;
1126 ioc->weights_updated = true;
1129 static void commit_weights(struct ioc *ioc)
1131 lockdep_assert_held(&ioc->lock);
1133 if (ioc->weights_updated) {
1134 /* paired with rmb in current_hweight(), see there */
1136 atomic_inc(&ioc->hweight_gen);
1137 ioc->weights_updated = false;
1141 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1142 bool save, struct ioc_now *now)
1144 __propagate_weights(iocg, active, inuse, save, now);
1145 commit_weights(iocg->ioc);
1148 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1150 struct ioc *ioc = iocg->ioc;
1155 /* hot path - if uptodate, use cached */
1156 ioc_gen = atomic_read(&ioc->hweight_gen);
1157 if (ioc_gen == iocg->hweight_gen)
1161 * Paired with wmb in commit_weights(). If we saw the updated
1162 * hweight_gen, all the weight updates from __propagate_weights() are
1165 * We can race with weight updates during calculation and get it
1166 * wrong. However, hweight_gen would have changed and a future
1167 * reader will recalculate and we're guaranteed to discard the
1168 * wrong result soon.
1172 hwa = hwi = WEIGHT_ONE;
1173 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1174 struct ioc_gq *parent = iocg->ancestors[lvl];
1175 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1176 u64 active_sum = READ_ONCE(parent->child_active_sum);
1177 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1178 u32 active = READ_ONCE(child->active);
1179 u32 inuse = READ_ONCE(child->inuse);
1181 /* we can race with deactivations and either may read as zero */
1182 if (!active_sum || !inuse_sum)
1185 active_sum = max_t(u64, active, active_sum);
1186 hwa = div64_u64((u64)hwa * active, active_sum);
1188 inuse_sum = max_t(u64, inuse, inuse_sum);
1189 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1192 iocg->hweight_active = max_t(u32, hwa, 1);
1193 iocg->hweight_inuse = max_t(u32, hwi, 1);
1194 iocg->hweight_gen = ioc_gen;
1197 *hw_activep = iocg->hweight_active;
1199 *hw_inusep = iocg->hweight_inuse;
1203 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1204 * other weights stay unchanged.
1206 static u32 current_hweight_max(struct ioc_gq *iocg)
1208 u32 hwm = WEIGHT_ONE;
1209 u32 inuse = iocg->active;
1210 u64 child_inuse_sum;
1213 lockdep_assert_held(&iocg->ioc->lock);
1215 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1216 struct ioc_gq *parent = iocg->ancestors[lvl];
1217 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1219 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1220 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1221 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1222 parent->child_active_sum);
1225 return max_t(u32, hwm, 1);
1228 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1230 struct ioc *ioc = iocg->ioc;
1231 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1232 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1235 lockdep_assert_held(&ioc->lock);
1237 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1238 if (weight != iocg->weight && iocg->active)
1239 propagate_weights(iocg, weight, iocg->inuse, true, now);
1240 iocg->weight = weight;
1243 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1245 struct ioc *ioc = iocg->ioc;
1246 u64 last_period, cur_period;
1251 * If seem to be already active, just update the stamp to tell the
1252 * timer that we're still active. We don't mind occassional races.
1254 if (!list_empty(&iocg->active_list)) {
1256 cur_period = atomic64_read(&ioc->cur_period);
1257 if (atomic64_read(&iocg->active_period) != cur_period)
1258 atomic64_set(&iocg->active_period, cur_period);
1262 /* racy check on internal node IOs, treat as root level IOs */
1263 if (iocg->child_active_sum)
1266 spin_lock_irq(&ioc->lock);
1271 cur_period = atomic64_read(&ioc->cur_period);
1272 last_period = atomic64_read(&iocg->active_period);
1273 atomic64_set(&iocg->active_period, cur_period);
1275 /* already activated or breaking leaf-only constraint? */
1276 if (!list_empty(&iocg->active_list))
1277 goto succeed_unlock;
1278 for (i = iocg->level - 1; i > 0; i--)
1279 if (!list_empty(&iocg->ancestors[i]->active_list))
1282 if (iocg->child_active_sum)
1286 * Always start with the target budget. On deactivation, we throw away
1287 * anything above it.
1289 vtarget = now->vnow - ioc->margins.target;
1290 vtime = atomic64_read(&iocg->vtime);
1292 atomic64_add(vtarget - vtime, &iocg->vtime);
1293 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1297 * Activate, propagate weight and start period timer if not
1298 * running. Reset hweight_gen to avoid accidental match from
1301 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1302 list_add(&iocg->active_list, &ioc->active_iocgs);
1304 propagate_weights(iocg, iocg->weight,
1305 iocg->last_inuse ?: iocg->weight, true, now);
1307 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1308 last_period, cur_period, vtime);
1310 iocg->activated_at = now->now;
1312 if (ioc->running == IOC_IDLE) {
1313 ioc->running = IOC_RUNNING;
1314 ioc->dfgv_period_at = now->now;
1315 ioc->dfgv_period_rem = 0;
1316 ioc_start_period(ioc, now);
1320 spin_unlock_irq(&ioc->lock);
1324 spin_unlock_irq(&ioc->lock);
1328 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1330 struct ioc *ioc = iocg->ioc;
1331 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1332 u64 tdelta, delay, new_delay;
1333 s64 vover, vover_pct;
1336 lockdep_assert_held(&iocg->waitq.lock);
1338 /* calculate the current delay in effect - 1/2 every second */
1339 tdelta = now->now - iocg->delay_at;
1341 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1345 /* calculate the new delay from the debt amount */
1346 current_hweight(iocg, &hwa, NULL);
1347 vover = atomic64_read(&iocg->vtime) +
1348 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1349 vover_pct = div64_s64(100 * vover,
1350 ioc->period_us * ioc->vtime_base_rate);
1352 if (vover_pct <= MIN_DELAY_THR_PCT)
1354 else if (vover_pct >= MAX_DELAY_THR_PCT)
1355 new_delay = MAX_DELAY;
1357 new_delay = MIN_DELAY +
1358 div_u64((MAX_DELAY - MIN_DELAY) *
1359 (vover_pct - MIN_DELAY_THR_PCT),
1360 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1362 /* pick the higher one and apply */
1363 if (new_delay > delay) {
1364 iocg->delay = new_delay;
1365 iocg->delay_at = now->now;
1369 if (delay >= MIN_DELAY) {
1370 if (!iocg->indelay_since)
1371 iocg->indelay_since = now->now;
1372 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1375 if (iocg->indelay_since) {
1376 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1377 iocg->indelay_since = 0;
1380 blkcg_clear_delay(blkg);
1385 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1386 struct ioc_now *now)
1388 struct iocg_pcpu_stat *gcs;
1390 lockdep_assert_held(&iocg->ioc->lock);
1391 lockdep_assert_held(&iocg->waitq.lock);
1392 WARN_ON_ONCE(list_empty(&iocg->active_list));
1395 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1396 * inuse donating all of it share to others until its debt is paid off.
1398 if (!iocg->abs_vdebt && abs_cost) {
1399 iocg->indebt_since = now->now;
1400 propagate_weights(iocg, iocg->active, 0, false, now);
1403 iocg->abs_vdebt += abs_cost;
1405 gcs = get_cpu_ptr(iocg->pcpu_stat);
1406 local64_add(abs_cost, &gcs->abs_vusage);
1410 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1411 struct ioc_now *now)
1413 lockdep_assert_held(&iocg->ioc->lock);
1414 lockdep_assert_held(&iocg->waitq.lock);
1416 /* make sure that nobody messed with @iocg */
1417 WARN_ON_ONCE(list_empty(&iocg->active_list));
1418 WARN_ON_ONCE(iocg->inuse > 1);
1420 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1422 /* if debt is paid in full, restore inuse */
1423 if (!iocg->abs_vdebt) {
1424 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1425 iocg->indebt_since = 0;
1427 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1432 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1433 int flags, void *key)
1435 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1436 struct iocg_wake_ctx *ctx = key;
1437 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1439 ctx->vbudget -= cost;
1441 if (ctx->vbudget < 0)
1444 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1445 wait->committed = true;
1448 * autoremove_wake_function() removes the wait entry only when it
1449 * actually changed the task state. We want the wait always removed.
1450 * Remove explicitly and use default_wake_function(). Note that the
1451 * order of operations is important as finish_wait() tests whether
1452 * @wq_entry is removed without grabbing the lock.
1454 default_wake_function(wq_entry, mode, flags, key);
1455 list_del_init_careful(&wq_entry->entry);
1460 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1461 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1462 * addition to iocg->waitq.lock.
1464 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1465 struct ioc_now *now)
1467 struct ioc *ioc = iocg->ioc;
1468 struct iocg_wake_ctx ctx = { .iocg = iocg };
1469 u64 vshortage, expires, oexpires;
1473 lockdep_assert_held(&iocg->waitq.lock);
1475 current_hweight(iocg, &hwa, NULL);
1476 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1479 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1480 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1481 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1482 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1484 lockdep_assert_held(&ioc->lock);
1486 atomic64_add(vpay, &iocg->vtime);
1487 atomic64_add(vpay, &iocg->done_vtime);
1488 iocg_pay_debt(iocg, abs_vpay, now);
1492 if (iocg->abs_vdebt || iocg->delay)
1493 iocg_kick_delay(iocg, now);
1496 * Debt can still be outstanding if we haven't paid all yet or the
1497 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1498 * under debt. Make sure @vbudget reflects the outstanding amount and is
1501 if (iocg->abs_vdebt) {
1502 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1503 vbudget = min_t(s64, 0, vbudget - vdebt);
1507 * Wake up the ones which are due and see how much vtime we'll need for
1508 * the next one. As paying off debt restores hw_inuse, it must be read
1509 * after the above debt payment.
1511 ctx.vbudget = vbudget;
1512 current_hweight(iocg, NULL, &ctx.hw_inuse);
1514 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1516 if (!waitqueue_active(&iocg->waitq)) {
1517 if (iocg->wait_since) {
1518 iocg->stat.wait_us += now->now - iocg->wait_since;
1519 iocg->wait_since = 0;
1524 if (!iocg->wait_since)
1525 iocg->wait_since = now->now;
1527 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1530 /* determine next wakeup, add a timer margin to guarantee chunking */
1531 vshortage = -ctx.vbudget;
1532 expires = now->now_ns +
1533 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1535 expires += ioc->timer_slack_ns;
1537 /* if already active and close enough, don't bother */
1538 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1539 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1540 abs(oexpires - expires) <= ioc->timer_slack_ns)
1543 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1544 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1547 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1549 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1550 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1552 unsigned long flags;
1554 ioc_now(iocg->ioc, &now);
1556 iocg_lock(iocg, pay_debt, &flags);
1557 iocg_kick_waitq(iocg, pay_debt, &now);
1558 iocg_unlock(iocg, pay_debt, &flags);
1560 return HRTIMER_NORESTART;
1563 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1565 u32 nr_met[2] = { };
1566 u32 nr_missed[2] = { };
1570 for_each_online_cpu(cpu) {
1571 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1572 u64 this_rq_wait_ns;
1574 for (rw = READ; rw <= WRITE; rw++) {
1575 u32 this_met = local_read(&stat->missed[rw].nr_met);
1576 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1578 nr_met[rw] += this_met - stat->missed[rw].last_met;
1579 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1580 stat->missed[rw].last_met = this_met;
1581 stat->missed[rw].last_missed = this_missed;
1584 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1585 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1586 stat->last_rq_wait_ns = this_rq_wait_ns;
1589 for (rw = READ; rw <= WRITE; rw++) {
1590 if (nr_met[rw] + nr_missed[rw])
1592 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1593 nr_met[rw] + nr_missed[rw]);
1595 missed_ppm_ar[rw] = 0;
1598 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1599 ioc->period_us * NSEC_PER_USEC);
1602 /* was iocg idle this period? */
1603 static bool iocg_is_idle(struct ioc_gq *iocg)
1605 struct ioc *ioc = iocg->ioc;
1607 /* did something get issued this period? */
1608 if (atomic64_read(&iocg->active_period) ==
1609 atomic64_read(&ioc->cur_period))
1612 /* is something in flight? */
1613 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1620 * Call this function on the target leaf @iocg's to build pre-order traversal
1621 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1622 * ->walk_list and the caller is responsible for dissolving the list after use.
1624 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1625 struct list_head *inner_walk)
1629 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1631 /* find the first ancestor which hasn't been visited yet */
1632 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1633 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1637 /* walk down and visit the inner nodes to get pre-order traversal */
1638 while (++lvl <= iocg->level - 1) {
1639 struct ioc_gq *inner = iocg->ancestors[lvl];
1641 /* record traversal order */
1642 list_add_tail(&inner->walk_list, inner_walk);
1646 /* propagate the deltas to the parent */
1647 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1649 if (iocg->level > 0) {
1650 struct iocg_stat *parent_stat =
1651 &iocg->ancestors[iocg->level - 1]->stat;
1653 parent_stat->usage_us +=
1654 iocg->stat.usage_us - iocg->last_stat.usage_us;
1655 parent_stat->wait_us +=
1656 iocg->stat.wait_us - iocg->last_stat.wait_us;
1657 parent_stat->indebt_us +=
1658 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1659 parent_stat->indelay_us +=
1660 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1663 iocg->last_stat = iocg->stat;
1666 /* collect per-cpu counters and propagate the deltas to the parent */
1667 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1669 struct ioc *ioc = iocg->ioc;
1674 lockdep_assert_held(&iocg->ioc->lock);
1676 /* collect per-cpu counters */
1677 for_each_possible_cpu(cpu) {
1678 abs_vusage += local64_read(
1679 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1681 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1682 iocg->last_stat_abs_vusage = abs_vusage;
1684 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1685 iocg->stat.usage_us += iocg->usage_delta_us;
1687 iocg_flush_stat_upward(iocg);
1690 /* get stat counters ready for reading on all active iocgs */
1691 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1693 LIST_HEAD(inner_walk);
1694 struct ioc_gq *iocg, *tiocg;
1696 /* flush leaves and build inner node walk list */
1697 list_for_each_entry(iocg, target_iocgs, active_list) {
1698 iocg_flush_stat_leaf(iocg, now);
1699 iocg_build_inner_walk(iocg, &inner_walk);
1702 /* keep flushing upwards by walking the inner list backwards */
1703 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1704 iocg_flush_stat_upward(iocg);
1705 list_del_init(&iocg->walk_list);
1710 * Determine what @iocg's hweight_inuse should be after donating unused
1711 * capacity. @hwm is the upper bound and used to signal no donation. This
1712 * function also throws away @iocg's excess budget.
1714 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1715 u32 usage, struct ioc_now *now)
1717 struct ioc *ioc = iocg->ioc;
1718 u64 vtime = atomic64_read(&iocg->vtime);
1719 s64 excess, delta, target, new_hwi;
1721 /* debt handling owns inuse for debtors */
1722 if (iocg->abs_vdebt)
1725 /* see whether minimum margin requirement is met */
1726 if (waitqueue_active(&iocg->waitq) ||
1727 time_after64(vtime, now->vnow - ioc->margins.min))
1730 /* throw away excess above target */
1731 excess = now->vnow - vtime - ioc->margins.target;
1733 atomic64_add(excess, &iocg->vtime);
1734 atomic64_add(excess, &iocg->done_vtime);
1736 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1740 * Let's say the distance between iocg's and device's vtimes as a
1741 * fraction of period duration is delta. Assuming that the iocg will
1742 * consume the usage determined above, we want to determine new_hwi so
1743 * that delta equals MARGIN_TARGET at the end of the next period.
1745 * We need to execute usage worth of IOs while spending the sum of the
1746 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1749 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1751 * Therefore, the new_hwi is:
1753 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1755 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1756 now->vnow - ioc->period_at_vtime);
1757 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1758 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1760 return clamp_t(s64, new_hwi, 1, hwm);
1764 * For work-conservation, an iocg which isn't using all of its share should
1765 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1766 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1768 * #1 is mathematically simpler but has the drawback of requiring synchronous
1769 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1770 * change due to donation snapbacks as it has the possibility of grossly
1771 * overshooting what's allowed by the model and vrate.
1773 * #2 is inherently safe with local operations. The donating iocg can easily
1774 * snap back to higher weights when needed without worrying about impacts on
1775 * other nodes as the impacts will be inherently correct. This also makes idle
1776 * iocg activations safe. The only effect activations have is decreasing
1777 * hweight_inuse of others, the right solution to which is for those iocgs to
1778 * snap back to higher weights.
1780 * So, we go with #2. The challenge is calculating how each donating iocg's
1781 * inuse should be adjusted to achieve the target donation amounts. This is done
1782 * using Andy's method described in the following pdf.
1784 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1786 * Given the weights and target after-donation hweight_inuse values, Andy's
1787 * method determines how the proportional distribution should look like at each
1788 * sibling level to maintain the relative relationship between all non-donating
1789 * pairs. To roughly summarize, it divides the tree into donating and
1790 * non-donating parts, calculates global donation rate which is used to
1791 * determine the target hweight_inuse for each node, and then derives per-level
1794 * The following pdf shows that global distribution calculated this way can be
1795 * achieved by scaling inuse weights of donating leaves and propagating the
1796 * adjustments upwards proportionally.
1798 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1800 * Combining the above two, we can determine how each leaf iocg's inuse should
1801 * be adjusted to achieve the target donation.
1803 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1805 * The inline comments use symbols from the last pdf.
1807 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1808 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1809 * t is the sum of the absolute budgets of donating nodes in the subtree.
1810 * w is the weight of the node. w = w_f + w_t
1811 * w_f is the non-donating portion of w. w_f = w * f / b
1812 * w_b is the donating portion of w. w_t = w * t / b
1813 * s is the sum of all sibling weights. s = Sum(w) for siblings
1814 * s_f and s_t are the non-donating and donating portions of s.
1816 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1817 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1818 * after adjustments. Subscript r denotes the root node's values.
1820 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1822 LIST_HEAD(over_hwa);
1823 LIST_HEAD(inner_walk);
1824 struct ioc_gq *iocg, *tiocg, *root_iocg;
1825 u32 after_sum, over_sum, over_target, gamma;
1828 * It's pretty unlikely but possible for the total sum of
1829 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1830 * confuse the following calculations. If such condition is detected,
1831 * scale down everyone over its full share equally to keep the sum below
1836 list_for_each_entry(iocg, surpluses, surplus_list) {
1839 current_hweight(iocg, &hwa, NULL);
1840 after_sum += iocg->hweight_after_donation;
1842 if (iocg->hweight_after_donation > hwa) {
1843 over_sum += iocg->hweight_after_donation;
1844 list_add(&iocg->walk_list, &over_hwa);
1848 if (after_sum >= WEIGHT_ONE) {
1850 * The delta should be deducted from the over_sum, calculate
1851 * target over_sum value.
1853 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1854 WARN_ON_ONCE(over_sum <= over_delta);
1855 over_target = over_sum - over_delta;
1860 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1862 iocg->hweight_after_donation =
1863 div_u64((u64)iocg->hweight_after_donation *
1864 over_target, over_sum);
1865 list_del_init(&iocg->walk_list);
1869 * Build pre-order inner node walk list and prepare for donation
1870 * adjustment calculations.
1872 list_for_each_entry(iocg, surpluses, surplus_list) {
1873 iocg_build_inner_walk(iocg, &inner_walk);
1876 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1877 WARN_ON_ONCE(root_iocg->level > 0);
1879 list_for_each_entry(iocg, &inner_walk, walk_list) {
1880 iocg->child_adjusted_sum = 0;
1881 iocg->hweight_donating = 0;
1882 iocg->hweight_after_donation = 0;
1886 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1889 list_for_each_entry(iocg, surpluses, surplus_list) {
1890 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1892 parent->hweight_donating += iocg->hweight_donating;
1893 parent->hweight_after_donation += iocg->hweight_after_donation;
1896 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1897 if (iocg->level > 0) {
1898 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1900 parent->hweight_donating += iocg->hweight_donating;
1901 parent->hweight_after_donation += iocg->hweight_after_donation;
1906 * Calculate inner hwa's (b) and make sure the donation values are
1907 * within the accepted ranges as we're doing low res calculations with
1910 list_for_each_entry(iocg, &inner_walk, walk_list) {
1912 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1914 iocg->hweight_active = DIV64_U64_ROUND_UP(
1915 (u64)parent->hweight_active * iocg->active,
1916 parent->child_active_sum);
1920 iocg->hweight_donating = min(iocg->hweight_donating,
1921 iocg->hweight_active);
1922 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1923 iocg->hweight_donating - 1);
1924 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1925 iocg->hweight_donating <= 1 ||
1926 iocg->hweight_after_donation == 0)) {
1927 pr_warn("iocg: invalid donation weights in ");
1928 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1929 pr_cont(": active=%u donating=%u after=%u\n",
1930 iocg->hweight_active, iocg->hweight_donating,
1931 iocg->hweight_after_donation);
1936 * Calculate the global donation rate (gamma) - the rate to adjust
1937 * non-donating budgets by.
1939 * No need to use 64bit multiplication here as the first operand is
1940 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1942 * We know that there are beneficiary nodes and the sum of the donating
1943 * hweights can't be whole; however, due to the round-ups during hweight
1944 * calculations, root_iocg->hweight_donating might still end up equal to
1945 * or greater than whole. Limit the range when calculating the divider.
1947 * gamma = (1 - t_r') / (1 - t_r)
1949 gamma = DIV_ROUND_UP(
1950 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1951 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1954 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1957 list_for_each_entry(iocg, &inner_walk, walk_list) {
1958 struct ioc_gq *parent;
1959 u32 inuse, wpt, wptp;
1962 if (iocg->level == 0) {
1963 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1964 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1965 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1966 WEIGHT_ONE - iocg->hweight_after_donation);
1970 parent = iocg->ancestors[iocg->level - 1];
1972 /* b' = gamma * b_f + b_t' */
1973 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1974 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1975 WEIGHT_ONE) + iocg->hweight_after_donation;
1977 /* w' = s' * b' / b'_p */
1978 inuse = DIV64_U64_ROUND_UP(
1979 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1980 parent->hweight_inuse);
1982 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1983 st = DIV64_U64_ROUND_UP(
1984 iocg->child_active_sum * iocg->hweight_donating,
1985 iocg->hweight_active);
1986 sf = iocg->child_active_sum - st;
1987 wpt = DIV64_U64_ROUND_UP(
1988 (u64)iocg->active * iocg->hweight_donating,
1989 iocg->hweight_active);
1990 wptp = DIV64_U64_ROUND_UP(
1991 (u64)inuse * iocg->hweight_after_donation,
1992 iocg->hweight_inuse);
1994 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1998 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1999 * we can finally determine leaf adjustments.
2001 list_for_each_entry(iocg, surpluses, surplus_list) {
2002 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2006 * In-debt iocgs participated in the donation calculation with
2007 * the minimum target hweight_inuse. Configuring inuse
2008 * accordingly would work fine but debt handling expects
2009 * @iocg->inuse stay at the minimum and we don't wanna
2012 if (iocg->abs_vdebt) {
2013 WARN_ON_ONCE(iocg->inuse > 1);
2017 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2018 inuse = DIV64_U64_ROUND_UP(
2019 parent->child_adjusted_sum * iocg->hweight_after_donation,
2020 parent->hweight_inuse);
2022 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2024 iocg->hweight_inuse,
2025 iocg->hweight_after_donation);
2027 __propagate_weights(iocg, iocg->active, inuse, true, now);
2030 /* walk list should be dissolved after use */
2031 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2032 list_del_init(&iocg->walk_list);
2036 * A low weight iocg can amass a large amount of debt, for example, when
2037 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2038 * memory paired with a slow IO device, the debt can span multiple seconds or
2039 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2040 * up blocked paying its debt while the IO device is idle.
2042 * The following protects against such cases. If the device has been
2043 * sufficiently idle for a while, the debts are halved and delays are
2046 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2047 struct ioc_now *now)
2049 struct ioc_gq *iocg;
2050 u64 dur, usage_pct, nr_cycles;
2052 /* if no debtor, reset the cycle */
2054 ioc->dfgv_period_at = now->now;
2055 ioc->dfgv_period_rem = 0;
2056 ioc->dfgv_usage_us_sum = 0;
2061 * Debtors can pass through a lot of writes choking the device and we
2062 * don't want to be forgiving debts while the device is struggling from
2063 * write bursts. If we're missing latency targets, consider the device
2066 if (ioc->busy_level > 0)
2067 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2069 ioc->dfgv_usage_us_sum += usage_us_sum;
2070 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2074 * At least DFGV_PERIOD has passed since the last period. Calculate the
2075 * average usage and reset the period counters.
2077 dur = now->now - ioc->dfgv_period_at;
2078 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2080 ioc->dfgv_period_at = now->now;
2081 ioc->dfgv_usage_us_sum = 0;
2083 /* if was too busy, reset everything */
2084 if (usage_pct > DFGV_USAGE_PCT) {
2085 ioc->dfgv_period_rem = 0;
2090 * Usage is lower than threshold. Let's forgive some debts. Debt
2091 * forgiveness runs off of the usual ioc timer but its period usually
2092 * doesn't match ioc's. Compensate the difference by performing the
2093 * reduction as many times as would fit in the duration since the last
2094 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2095 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2096 * reductions is doubled.
2098 nr_cycles = dur + ioc->dfgv_period_rem;
2099 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2101 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2102 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2104 if (!iocg->abs_vdebt && !iocg->delay)
2107 spin_lock(&iocg->waitq.lock);
2109 old_debt = iocg->abs_vdebt;
2110 old_delay = iocg->delay;
2112 if (iocg->abs_vdebt)
2113 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2115 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2117 iocg_kick_waitq(iocg, true, now);
2119 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2120 old_debt, iocg->abs_vdebt,
2121 old_delay, iocg->delay);
2123 spin_unlock(&iocg->waitq.lock);
2128 * Check the active iocgs' state to avoid oversleeping and deactive
2131 * Since waiters determine the sleep durations based on the vrate
2132 * they saw at the time of sleep, if vrate has increased, some
2133 * waiters could be sleeping for too long. Wake up tardy waiters
2134 * which should have woken up in the last period and expire idle
2137 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2140 struct ioc_gq *iocg, *tiocg;
2142 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2143 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2144 !iocg->delay && !iocg_is_idle(iocg))
2147 spin_lock(&iocg->waitq.lock);
2149 /* flush wait and indebt stat deltas */
2150 if (iocg->wait_since) {
2151 iocg->stat.wait_us += now->now - iocg->wait_since;
2152 iocg->wait_since = now->now;
2154 if (iocg->indebt_since) {
2155 iocg->stat.indebt_us +=
2156 now->now - iocg->indebt_since;
2157 iocg->indebt_since = now->now;
2159 if (iocg->indelay_since) {
2160 iocg->stat.indelay_us +=
2161 now->now - iocg->indelay_since;
2162 iocg->indelay_since = now->now;
2165 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2167 /* might be oversleeping vtime / hweight changes, kick */
2168 iocg_kick_waitq(iocg, true, now);
2169 if (iocg->abs_vdebt || iocg->delay)
2171 } else if (iocg_is_idle(iocg)) {
2172 /* no waiter and idle, deactivate */
2173 u64 vtime = atomic64_read(&iocg->vtime);
2177 * @iocg has been inactive for a full duration and will
2178 * have a high budget. Account anything above target as
2179 * error and throw away. On reactivation, it'll start
2180 * with the target budget.
2182 excess = now->vnow - vtime - ioc->margins.target;
2186 current_hweight(iocg, NULL, &old_hwi);
2187 ioc->vtime_err -= div64_u64(excess * old_hwi,
2191 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2192 atomic64_read(&iocg->active_period),
2193 atomic64_read(&ioc->cur_period), vtime);
2194 __propagate_weights(iocg, 0, 0, false, now);
2195 list_del_init(&iocg->active_list);
2198 spin_unlock(&iocg->waitq.lock);
2201 commit_weights(ioc);
2205 static void ioc_timer_fn(struct timer_list *timer)
2207 struct ioc *ioc = container_of(timer, struct ioc, timer);
2208 struct ioc_gq *iocg, *tiocg;
2210 LIST_HEAD(surpluses);
2211 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2212 u64 usage_us_sum = 0;
2213 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2214 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2215 u32 missed_ppm[2], rq_wait_pct;
2217 int prev_busy_level;
2219 /* how were the latencies during the period? */
2220 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2222 /* take care of active iocgs */
2223 spin_lock_irq(&ioc->lock);
2227 period_vtime = now.vnow - ioc->period_at_vtime;
2228 if (WARN_ON_ONCE(!period_vtime)) {
2229 spin_unlock_irq(&ioc->lock);
2233 nr_debtors = ioc_check_iocgs(ioc, &now);
2236 * Wait and indebt stat are flushed above and the donation calculation
2237 * below needs updated usage stat. Let's bring stat up-to-date.
2239 iocg_flush_stat(&ioc->active_iocgs, &now);
2241 /* calc usage and see whether some weights need to be moved around */
2242 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2243 u64 vdone, vtime, usage_us;
2244 u32 hw_active, hw_inuse;
2247 * Collect unused and wind vtime closer to vnow to prevent
2248 * iocgs from accumulating a large amount of budget.
2250 vdone = atomic64_read(&iocg->done_vtime);
2251 vtime = atomic64_read(&iocg->vtime);
2252 current_hweight(iocg, &hw_active, &hw_inuse);
2255 * Latency QoS detection doesn't account for IOs which are
2256 * in-flight for longer than a period. Detect them by
2257 * comparing vdone against period start. If lagging behind
2258 * IOs from past periods, don't increase vrate.
2260 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2261 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2262 time_after64(vtime, vdone) &&
2263 time_after64(vtime, now.vnow -
2264 MAX_LAGGING_PERIODS * period_vtime) &&
2265 time_before64(vdone, now.vnow - period_vtime))
2269 * Determine absolute usage factoring in in-flight IOs to avoid
2270 * high-latency completions appearing as idle.
2272 usage_us = iocg->usage_delta_us;
2273 usage_us_sum += usage_us;
2275 /* see whether there's surplus vtime */
2276 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2277 if (hw_inuse < hw_active ||
2278 (!waitqueue_active(&iocg->waitq) &&
2279 time_before64(vtime, now.vnow - ioc->margins.low))) {
2280 u32 hwa, old_hwi, hwm, new_hwi, usage;
2283 if (vdone != vtime) {
2284 u64 inflight_us = DIV64_U64_ROUND_UP(
2285 cost_to_abs_cost(vtime - vdone, hw_inuse),
2286 ioc->vtime_base_rate);
2288 usage_us = max(usage_us, inflight_us);
2291 /* convert to hweight based usage ratio */
2292 if (time_after64(iocg->activated_at, ioc->period_at))
2293 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2295 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2297 usage = clamp_t(u32,
2298 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2303 * Already donating or accumulated enough to start.
2304 * Determine the donation amount.
2306 current_hweight(iocg, &hwa, &old_hwi);
2307 hwm = current_hweight_max(iocg);
2308 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2311 * Donation calculation assumes hweight_after_donation
2312 * to be positive, a condition that a donor w/ hwa < 2
2313 * can't meet. Don't bother with donation if hwa is
2314 * below 2. It's not gonna make a meaningful difference
2317 if (new_hwi < hwm && hwa >= 2) {
2318 iocg->hweight_donating = hwa;
2319 iocg->hweight_after_donation = new_hwi;
2320 list_add(&iocg->surplus_list, &surpluses);
2321 } else if (!iocg->abs_vdebt) {
2323 * @iocg doesn't have enough to donate. Reset
2324 * its inuse to active.
2326 * Don't reset debtors as their inuse's are
2327 * owned by debt handling. This shouldn't affect
2328 * donation calculuation in any meaningful way
2329 * as @iocg doesn't have a meaningful amount of
2332 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2333 iocg->inuse, iocg->active,
2334 iocg->hweight_inuse, new_hwi);
2336 __propagate_weights(iocg, iocg->active,
2337 iocg->active, true, &now);
2341 /* genuinely short on vtime */
2346 if (!list_empty(&surpluses) && nr_shortages)
2347 transfer_surpluses(&surpluses, &now);
2349 commit_weights(ioc);
2351 /* surplus list should be dissolved after use */
2352 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2353 list_del_init(&iocg->surplus_list);
2356 * If q is getting clogged or we're missing too much, we're issuing
2357 * too much IO and should lower vtime rate. If we're not missing
2358 * and experiencing shortages but not surpluses, we're too stingy
2359 * and should increase vtime rate.
2361 prev_busy_level = ioc->busy_level;
2362 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2363 missed_ppm[READ] > ppm_rthr ||
2364 missed_ppm[WRITE] > ppm_wthr) {
2365 /* clearly missing QoS targets, slow down vrate */
2366 ioc->busy_level = max(ioc->busy_level, 0);
2368 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2369 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2370 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2371 /* QoS targets are being met with >25% margin */
2374 * We're throttling while the device has spare
2375 * capacity. If vrate was being slowed down, stop.
2377 ioc->busy_level = min(ioc->busy_level, 0);
2380 * If there are IOs spanning multiple periods, wait
2381 * them out before pushing the device harder.
2387 * Nobody is being throttled and the users aren't
2388 * issuing enough IOs to saturate the device. We
2389 * simply don't know how close the device is to
2390 * saturation. Coast.
2392 ioc->busy_level = 0;
2395 /* inside the hysterisis margin, we're good */
2396 ioc->busy_level = 0;
2399 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2401 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2402 prev_busy_level, missed_ppm);
2404 ioc_refresh_params(ioc, false);
2406 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2409 * This period is done. Move onto the next one. If nothing's
2410 * going on with the device, stop the timer.
2412 atomic64_inc(&ioc->cur_period);
2414 if (ioc->running != IOC_STOP) {
2415 if (!list_empty(&ioc->active_iocgs)) {
2416 ioc_start_period(ioc, &now);
2418 ioc->busy_level = 0;
2420 ioc->running = IOC_IDLE;
2423 ioc_refresh_vrate(ioc, &now);
2426 spin_unlock_irq(&ioc->lock);
2429 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2430 u64 abs_cost, struct ioc_now *now)
2432 struct ioc *ioc = iocg->ioc;
2433 struct ioc_margins *margins = &ioc->margins;
2434 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2437 u64 cost, new_inuse;
2439 current_hweight(iocg, NULL, &hwi);
2441 cost = abs_cost_to_cost(abs_cost, hwi);
2442 margin = now->vnow - vtime - cost;
2444 /* debt handling owns inuse for debtors */
2445 if (iocg->abs_vdebt)
2449 * We only increase inuse during period and do so if the margin has
2450 * deteriorated since the previous adjustment.
2452 if (margin >= iocg->saved_margin || margin >= margins->low ||
2453 iocg->inuse == iocg->active)
2456 spin_lock_irq(&ioc->lock);
2458 /* we own inuse only when @iocg is in the normal active state */
2459 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2460 spin_unlock_irq(&ioc->lock);
2465 * Bump up inuse till @abs_cost fits in the existing budget.
2466 * adj_step must be determined after acquiring ioc->lock - we might
2467 * have raced and lost to another thread for activation and could
2468 * be reading 0 iocg->active before ioc->lock which will lead to
2471 new_inuse = iocg->inuse;
2472 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2474 new_inuse = new_inuse + adj_step;
2475 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2476 current_hweight(iocg, NULL, &hwi);
2477 cost = abs_cost_to_cost(abs_cost, hwi);
2478 } while (time_after64(vtime + cost, now->vnow) &&
2479 iocg->inuse != iocg->active);
2481 spin_unlock_irq(&ioc->lock);
2483 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2484 old_inuse, iocg->inuse, old_hwi, hwi);
2489 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2490 bool is_merge, u64 *costp)
2492 struct ioc *ioc = iocg->ioc;
2493 u64 coef_seqio, coef_randio, coef_page;
2494 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2498 switch (bio_op(bio)) {
2500 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2501 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2502 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2505 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2506 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2507 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2514 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2515 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2519 if (seek_pages > LCOEF_RANDIO_PAGES) {
2520 cost += coef_randio;
2525 cost += pages * coef_page;
2530 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2534 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2538 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2541 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2543 switch (req_op(rq)) {
2545 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2548 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2555 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2559 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2563 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2565 struct blkcg_gq *blkg = bio->bi_blkg;
2566 struct ioc *ioc = rqos_to_ioc(rqos);
2567 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2569 struct iocg_wait wait;
2570 u64 abs_cost, cost, vtime;
2571 bool use_debt, ioc_locked;
2572 unsigned long flags;
2574 /* bypass IOs if disabled, still initializing, or for root cgroup */
2575 if (!ioc->enabled || !iocg || !iocg->level)
2578 /* calculate the absolute vtime cost */
2579 abs_cost = calc_vtime_cost(bio, iocg, false);
2583 if (!iocg_activate(iocg, &now))
2586 iocg->cursor = bio_end_sector(bio);
2587 vtime = atomic64_read(&iocg->vtime);
2588 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2591 * If no one's waiting and within budget, issue right away. The
2592 * tests are racy but the races aren't systemic - we only miss once
2593 * in a while which is fine.
2595 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2596 time_before_eq64(vtime + cost, now.vnow)) {
2597 iocg_commit_bio(iocg, bio, abs_cost, cost);
2602 * We're over budget. This can be handled in two ways. IOs which may
2603 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2604 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2605 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2606 * whether debt handling is needed and acquire locks accordingly.
2608 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2609 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2611 iocg_lock(iocg, ioc_locked, &flags);
2614 * @iocg must stay activated for debt and waitq handling. Deactivation
2615 * is synchronized against both ioc->lock and waitq.lock and we won't
2616 * get deactivated as long as we're waiting or has debt, so we're good
2617 * if we're activated here. In the unlikely cases that we aren't, just
2620 if (unlikely(list_empty(&iocg->active_list))) {
2621 iocg_unlock(iocg, ioc_locked, &flags);
2622 iocg_commit_bio(iocg, bio, abs_cost, cost);
2627 * We're over budget. If @bio has to be issued regardless, remember
2628 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2629 * off the debt before waking more IOs.
2631 * This way, the debt is continuously paid off each period with the
2632 * actual budget available to the cgroup. If we just wound vtime, we
2633 * would incorrectly use the current hw_inuse for the entire amount
2634 * which, for example, can lead to the cgroup staying blocked for a
2635 * long time even with substantially raised hw_inuse.
2637 * An iocg with vdebt should stay online so that the timer can keep
2638 * deducting its vdebt and [de]activate use_delay mechanism
2639 * accordingly. We don't want to race against the timer trying to
2640 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2641 * penalizing the cgroup and its descendants.
2644 iocg_incur_debt(iocg, abs_cost, &now);
2645 if (iocg_kick_delay(iocg, &now))
2646 blkcg_schedule_throttle(rqos->q->disk,
2647 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2648 iocg_unlock(iocg, ioc_locked, &flags);
2652 /* guarantee that iocgs w/ waiters have maximum inuse */
2653 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2655 iocg_unlock(iocg, false, &flags);
2659 propagate_weights(iocg, iocg->active, iocg->active, true,
2664 * Append self to the waitq and schedule the wakeup timer if we're
2665 * the first waiter. The timer duration is calculated based on the
2666 * current vrate. vtime and hweight changes can make it too short
2667 * or too long. Each wait entry records the absolute cost it's
2668 * waiting for to allow re-evaluation using a custom wait entry.
2670 * If too short, the timer simply reschedules itself. If too long,
2671 * the period timer will notice and trigger wakeups.
2673 * All waiters are on iocg->waitq and the wait states are
2674 * synchronized using waitq.lock.
2676 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2677 wait.wait.private = current;
2679 wait.abs_cost = abs_cost;
2680 wait.committed = false; /* will be set true by waker */
2682 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2683 iocg_kick_waitq(iocg, ioc_locked, &now);
2685 iocg_unlock(iocg, ioc_locked, &flags);
2688 set_current_state(TASK_UNINTERRUPTIBLE);
2694 /* waker already committed us, proceed */
2695 finish_wait(&iocg->waitq, &wait.wait);
2698 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2701 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2702 struct ioc *ioc = rqos_to_ioc(rqos);
2703 sector_t bio_end = bio_end_sector(bio);
2705 u64 vtime, abs_cost, cost;
2706 unsigned long flags;
2708 /* bypass if disabled, still initializing, or for root cgroup */
2709 if (!ioc->enabled || !iocg || !iocg->level)
2712 abs_cost = calc_vtime_cost(bio, iocg, true);
2718 vtime = atomic64_read(&iocg->vtime);
2719 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2721 /* update cursor if backmerging into the request at the cursor */
2722 if (blk_rq_pos(rq) < bio_end &&
2723 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2724 iocg->cursor = bio_end;
2727 * Charge if there's enough vtime budget and the existing request has
2730 if (rq->bio && rq->bio->bi_iocost_cost &&
2731 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2732 iocg_commit_bio(iocg, bio, abs_cost, cost);
2737 * Otherwise, account it as debt if @iocg is online, which it should
2738 * be for the vast majority of cases. See debt handling in
2739 * ioc_rqos_throttle() for details.
2741 spin_lock_irqsave(&ioc->lock, flags);
2742 spin_lock(&iocg->waitq.lock);
2744 if (likely(!list_empty(&iocg->active_list))) {
2745 iocg_incur_debt(iocg, abs_cost, &now);
2746 if (iocg_kick_delay(iocg, &now))
2747 blkcg_schedule_throttle(rqos->q->disk,
2748 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2750 iocg_commit_bio(iocg, bio, abs_cost, cost);
2753 spin_unlock(&iocg->waitq.lock);
2754 spin_unlock_irqrestore(&ioc->lock, flags);
2757 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2759 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2761 if (iocg && bio->bi_iocost_cost)
2762 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2765 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2767 struct ioc *ioc = rqos_to_ioc(rqos);
2768 struct ioc_pcpu_stat *ccs;
2769 u64 on_q_ns, rq_wait_ns, size_nsec;
2772 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2775 switch (req_op(rq)) {
2788 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2789 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2790 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2792 ccs = get_cpu_ptr(ioc->pcpu_stat);
2794 if (on_q_ns <= size_nsec ||
2795 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2796 local_inc(&ccs->missed[rw].nr_met);
2798 local_inc(&ccs->missed[rw].nr_missed);
2800 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2805 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2807 struct ioc *ioc = rqos_to_ioc(rqos);
2809 spin_lock_irq(&ioc->lock);
2810 ioc_refresh_params(ioc, false);
2811 spin_unlock_irq(&ioc->lock);
2814 static void ioc_rqos_exit(struct rq_qos *rqos)
2816 struct ioc *ioc = rqos_to_ioc(rqos);
2818 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2820 spin_lock_irq(&ioc->lock);
2821 ioc->running = IOC_STOP;
2822 spin_unlock_irq(&ioc->lock);
2824 del_timer_sync(&ioc->timer);
2825 free_percpu(ioc->pcpu_stat);
2829 static struct rq_qos_ops ioc_rqos_ops = {
2830 .throttle = ioc_rqos_throttle,
2831 .merge = ioc_rqos_merge,
2832 .done_bio = ioc_rqos_done_bio,
2833 .done = ioc_rqos_done,
2834 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2835 .exit = ioc_rqos_exit,
2838 static int blk_iocost_init(struct gendisk *disk)
2840 struct request_queue *q = disk->queue;
2842 struct rq_qos *rqos;
2845 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2849 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2850 if (!ioc->pcpu_stat) {
2855 for_each_possible_cpu(cpu) {
2856 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2858 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2859 local_set(&ccs->missed[i].nr_met, 0);
2860 local_set(&ccs->missed[i].nr_missed, 0);
2862 local64_set(&ccs->rq_wait_ns, 0);
2866 rqos->id = RQ_QOS_COST;
2867 rqos->ops = &ioc_rqos_ops;
2870 spin_lock_init(&ioc->lock);
2871 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2872 INIT_LIST_HEAD(&ioc->active_iocgs);
2874 ioc->running = IOC_IDLE;
2875 ioc->vtime_base_rate = VTIME_PER_USEC;
2876 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2877 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2878 ioc->period_at = ktime_to_us(ktime_get());
2879 atomic64_set(&ioc->cur_period, 0);
2880 atomic_set(&ioc->hweight_gen, 0);
2882 spin_lock_irq(&ioc->lock);
2883 ioc->autop_idx = AUTOP_INVALID;
2884 ioc_refresh_params(ioc, true);
2885 spin_unlock_irq(&ioc->lock);
2888 * rqos must be added before activation to allow iocg_pd_init() to
2889 * lookup the ioc from q. This means that the rqos methods may get
2890 * called before policy activation completion, can't assume that the
2891 * target bio has an iocg associated and need to test for NULL iocg.
2893 ret = rq_qos_add(q, rqos);
2897 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2903 rq_qos_del(q, rqos);
2905 free_percpu(ioc->pcpu_stat);
2910 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2912 struct ioc_cgrp *iocc;
2914 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2918 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2922 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2924 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2927 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2928 struct blkcg *blkcg)
2930 int levels = blkcg->css.cgroup->level + 1;
2931 struct ioc_gq *iocg;
2933 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2937 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2938 if (!iocg->pcpu_stat) {
2946 static void ioc_pd_init(struct blkg_policy_data *pd)
2948 struct ioc_gq *iocg = pd_to_iocg(pd);
2949 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2950 struct ioc *ioc = q_to_ioc(blkg->q);
2952 struct blkcg_gq *tblkg;
2953 unsigned long flags;
2958 atomic64_set(&iocg->vtime, now.vnow);
2959 atomic64_set(&iocg->done_vtime, now.vnow);
2960 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2961 INIT_LIST_HEAD(&iocg->active_list);
2962 INIT_LIST_HEAD(&iocg->walk_list);
2963 INIT_LIST_HEAD(&iocg->surplus_list);
2964 iocg->hweight_active = WEIGHT_ONE;
2965 iocg->hweight_inuse = WEIGHT_ONE;
2967 init_waitqueue_head(&iocg->waitq);
2968 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2969 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2971 iocg->level = blkg->blkcg->css.cgroup->level;
2973 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2974 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2975 iocg->ancestors[tiocg->level] = tiocg;
2978 spin_lock_irqsave(&ioc->lock, flags);
2979 weight_updated(iocg, &now);
2980 spin_unlock_irqrestore(&ioc->lock, flags);
2983 static void ioc_pd_free(struct blkg_policy_data *pd)
2985 struct ioc_gq *iocg = pd_to_iocg(pd);
2986 struct ioc *ioc = iocg->ioc;
2987 unsigned long flags;
2990 spin_lock_irqsave(&ioc->lock, flags);
2992 if (!list_empty(&iocg->active_list)) {
2996 propagate_weights(iocg, 0, 0, false, &now);
2997 list_del_init(&iocg->active_list);
3000 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3001 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3003 spin_unlock_irqrestore(&ioc->lock, flags);
3005 hrtimer_cancel(&iocg->waitq_timer);
3007 free_percpu(iocg->pcpu_stat);
3011 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3013 struct ioc_gq *iocg = pd_to_iocg(pd);
3014 struct ioc *ioc = iocg->ioc;
3019 if (iocg->level == 0) {
3020 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3021 ioc->vtime_base_rate * 10000,
3023 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3026 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3028 if (blkcg_debug_stats)
3029 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3030 iocg->last_stat.wait_us,
3031 iocg->last_stat.indebt_us,
3032 iocg->last_stat.indelay_us);
3035 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3038 const char *dname = blkg_dev_name(pd->blkg);
3039 struct ioc_gq *iocg = pd_to_iocg(pd);
3041 if (dname && iocg->cfg_weight)
3042 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3047 static int ioc_weight_show(struct seq_file *sf, void *v)
3049 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3050 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3052 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3053 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3054 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3058 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3059 size_t nbytes, loff_t off)
3061 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3062 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3063 struct blkg_conf_ctx ctx;
3065 struct ioc_gq *iocg;
3069 if (!strchr(buf, ':')) {
3070 struct blkcg_gq *blkg;
3072 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3075 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3078 spin_lock_irq(&blkcg->lock);
3079 iocc->dfl_weight = v * WEIGHT_ONE;
3080 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3081 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3084 spin_lock(&iocg->ioc->lock);
3085 ioc_now(iocg->ioc, &now);
3086 weight_updated(iocg, &now);
3087 spin_unlock(&iocg->ioc->lock);
3090 spin_unlock_irq(&blkcg->lock);
3095 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3099 iocg = blkg_to_iocg(ctx.blkg);
3101 if (!strncmp(ctx.body, "default", 7)) {
3104 if (!sscanf(ctx.body, "%u", &v))
3106 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3110 spin_lock(&iocg->ioc->lock);
3111 iocg->cfg_weight = v * WEIGHT_ONE;
3112 ioc_now(iocg->ioc, &now);
3113 weight_updated(iocg, &now);
3114 spin_unlock(&iocg->ioc->lock);
3116 blkg_conf_finish(&ctx);
3120 blkg_conf_finish(&ctx);
3124 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3127 const char *dname = blkg_dev_name(pd->blkg);
3128 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3133 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3134 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3135 ioc->params.qos[QOS_RPPM] / 10000,
3136 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3137 ioc->params.qos[QOS_RLAT],
3138 ioc->params.qos[QOS_WPPM] / 10000,
3139 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3140 ioc->params.qos[QOS_WLAT],
3141 ioc->params.qos[QOS_MIN] / 10000,
3142 ioc->params.qos[QOS_MIN] % 10000 / 100,
3143 ioc->params.qos[QOS_MAX] / 10000,
3144 ioc->params.qos[QOS_MAX] % 10000 / 100);
3148 static int ioc_qos_show(struct seq_file *sf, void *v)
3150 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3152 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3153 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3157 static const match_table_t qos_ctrl_tokens = {
3158 { QOS_ENABLE, "enable=%u" },
3159 { QOS_CTRL, "ctrl=%s" },
3160 { NR_QOS_CTRL_PARAMS, NULL },
3163 static const match_table_t qos_tokens = {
3164 { QOS_RPPM, "rpct=%s" },
3165 { QOS_RLAT, "rlat=%u" },
3166 { QOS_WPPM, "wpct=%s" },
3167 { QOS_WLAT, "wlat=%u" },
3168 { QOS_MIN, "min=%s" },
3169 { QOS_MAX, "max=%s" },
3170 { NR_QOS_PARAMS, NULL },
3173 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3174 size_t nbytes, loff_t off)
3176 struct block_device *bdev;
3177 struct gendisk *disk;
3179 u32 qos[NR_QOS_PARAMS];
3184 bdev = blkcg_conf_open_bdev(&input);
3186 return PTR_ERR(bdev);
3188 disk = bdev->bd_disk;
3189 ioc = q_to_ioc(disk->queue);
3191 ret = blk_iocost_init(disk);
3194 ioc = q_to_ioc(disk->queue);
3197 spin_lock_irq(&ioc->lock);
3198 memcpy(qos, ioc->params.qos, sizeof(qos));
3199 enable = ioc->enabled;
3200 user = ioc->user_qos_params;
3201 spin_unlock_irq(&ioc->lock);
3203 while ((p = strsep(&input, " \t\n"))) {
3204 substring_t args[MAX_OPT_ARGS];
3212 switch (match_token(p, qos_ctrl_tokens, args)) {
3214 match_u64(&args[0], &v);
3218 match_strlcpy(buf, &args[0], sizeof(buf));
3219 if (!strcmp(buf, "auto"))
3221 else if (!strcmp(buf, "user"))
3228 tok = match_token(p, qos_tokens, args);
3232 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3235 if (cgroup_parse_float(buf, 2, &v))
3237 if (v < 0 || v > 10000)
3243 if (match_u64(&args[0], &v))
3249 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3252 if (cgroup_parse_float(buf, 2, &v))
3256 qos[tok] = clamp_t(s64, v * 100,
3257 VRATE_MIN_PPM, VRATE_MAX_PPM);
3265 if (qos[QOS_MIN] > qos[QOS_MAX])
3268 spin_lock_irq(&ioc->lock);
3271 blk_stat_enable_accounting(disk->queue);
3272 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3273 ioc->enabled = true;
3275 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3276 ioc->enabled = false;
3280 memcpy(ioc->params.qos, qos, sizeof(qos));
3281 ioc->user_qos_params = true;
3283 ioc->user_qos_params = false;
3286 ioc_refresh_params(ioc, true);
3287 spin_unlock_irq(&ioc->lock);
3289 blkdev_put_no_open(bdev);
3294 blkdev_put_no_open(bdev);
3298 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3299 struct blkg_policy_data *pd, int off)
3301 const char *dname = blkg_dev_name(pd->blkg);
3302 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3303 u64 *u = ioc->params.i_lcoefs;
3308 seq_printf(sf, "%s ctrl=%s model=linear "
3309 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3310 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3311 dname, ioc->user_cost_model ? "user" : "auto",
3312 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3313 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3317 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3319 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3321 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3322 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3326 static const match_table_t cost_ctrl_tokens = {
3327 { COST_CTRL, "ctrl=%s" },
3328 { COST_MODEL, "model=%s" },
3329 { NR_COST_CTRL_PARAMS, NULL },
3332 static const match_table_t i_lcoef_tokens = {
3333 { I_LCOEF_RBPS, "rbps=%u" },
3334 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3335 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3336 { I_LCOEF_WBPS, "wbps=%u" },
3337 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3338 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3339 { NR_I_LCOEFS, NULL },
3342 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3343 size_t nbytes, loff_t off)
3345 struct block_device *bdev;
3352 bdev = blkcg_conf_open_bdev(&input);
3354 return PTR_ERR(bdev);
3356 ioc = q_to_ioc(bdev_get_queue(bdev));
3358 ret = blk_iocost_init(bdev->bd_disk);
3361 ioc = q_to_ioc(bdev_get_queue(bdev));
3364 spin_lock_irq(&ioc->lock);
3365 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3366 user = ioc->user_cost_model;
3367 spin_unlock_irq(&ioc->lock);
3369 while ((p = strsep(&input, " \t\n"))) {
3370 substring_t args[MAX_OPT_ARGS];
3378 switch (match_token(p, cost_ctrl_tokens, args)) {
3380 match_strlcpy(buf, &args[0], sizeof(buf));
3381 if (!strcmp(buf, "auto"))
3383 else if (!strcmp(buf, "user"))
3389 match_strlcpy(buf, &args[0], sizeof(buf));
3390 if (strcmp(buf, "linear"))
3395 tok = match_token(p, i_lcoef_tokens, args);
3396 if (tok == NR_I_LCOEFS)
3398 if (match_u64(&args[0], &v))
3404 spin_lock_irq(&ioc->lock);
3406 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3407 ioc->user_cost_model = true;
3409 ioc->user_cost_model = false;
3411 ioc_refresh_params(ioc, true);
3412 spin_unlock_irq(&ioc->lock);
3414 blkdev_put_no_open(bdev);
3420 blkdev_put_no_open(bdev);
3424 static struct cftype ioc_files[] = {
3427 .flags = CFTYPE_NOT_ON_ROOT,
3428 .seq_show = ioc_weight_show,
3429 .write = ioc_weight_write,
3433 .flags = CFTYPE_ONLY_ON_ROOT,
3434 .seq_show = ioc_qos_show,
3435 .write = ioc_qos_write,
3438 .name = "cost.model",
3439 .flags = CFTYPE_ONLY_ON_ROOT,
3440 .seq_show = ioc_cost_model_show,
3441 .write = ioc_cost_model_write,
3446 static struct blkcg_policy blkcg_policy_iocost = {
3447 .dfl_cftypes = ioc_files,
3448 .cpd_alloc_fn = ioc_cpd_alloc,
3449 .cpd_free_fn = ioc_cpd_free,
3450 .pd_alloc_fn = ioc_pd_alloc,
3451 .pd_init_fn = ioc_pd_init,
3452 .pd_free_fn = ioc_pd_free,
3453 .pd_stat_fn = ioc_pd_stat,
3456 static int __init ioc_init(void)
3458 return blkcg_policy_register(&blkcg_policy_iocost);
3461 static void __exit ioc_exit(void)
3463 blkcg_policy_unregister(&blkcg_policy_iocost);
3466 module_init(ioc_init);
3467 module_exit(ioc_exit);