1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, solely depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
249 VTIME_PER_SEC_SHIFT = 37,
250 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
251 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
252 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM = 10000, /* 1% */
256 VRATE_MAX_PPM = 100000000, /* 10000% */
258 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 VRATE_CLAMP_ADJ_PCT = 4,
261 /* if IOs end up waiting for requests, issue less */
262 RQ_WAIT_BUSY_PCT = 5,
264 /* unbusy hysterisis */
268 * The effect of delay is indirect and non-linear and a huge amount of
269 * future debt can accumulate abruptly while unthrottled. Linearly scale
270 * up delay as debt is going up and then let it decay exponentially.
271 * This gives us quick ramp ups while delay is accumulating and long
272 * tails which can help reducing the frequency of debt explosions on
273 * unthrottle. The parameters are experimentally determined.
275 * The delay mechanism provides adequate protection and behavior in many
276 * cases. However, this is far from ideal and falls shorts on both
277 * fronts. The debtors are often throttled too harshly costing a
278 * significant level of fairness and possibly total work while the
279 * protection against their impacts on the system can be choppy and
282 * The shortcoming primarily stems from the fact that, unlike for page
283 * cache, the kernel doesn't have well-defined back-pressure propagation
284 * mechanism and policies for anonymous memory. Fully addressing this
285 * issue will likely require substantial improvements in the area.
287 MIN_DELAY_THR_PCT = 500,
288 MAX_DELAY_THR_PCT = 25000,
290 MAX_DELAY = 250 * USEC_PER_MSEC,
292 /* halve debts if avg usage over 100ms is under 50% */
294 DFGV_PERIOD = 100 * USEC_PER_MSEC,
296 /* don't let cmds which take a very long time pin lagging for too long */
297 MAX_LAGGING_PERIODS = 10,
299 /* switch iff the conditions are met for longer than this */
300 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
303 * Count IO size in 4k pages. The 12bit shift helps keeping
304 * size-proportional components of cost calculation in closer
305 * numbers of digits to per-IO cost components.
308 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
309 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
311 /* if apart further than 16M, consider randio for linear model */
312 LCOEF_RANDIO_PAGES = 4096,
321 /* io.cost.qos controls including per-dev enable of the whole controller */
328 /* io.cost.qos params */
339 /* io.cost.model controls */
346 /* builtin linear cost model coefficients */
376 u32 qos[NR_QOS_PARAMS];
377 u64 i_lcoefs[NR_I_LCOEFS];
378 u64 lcoefs[NR_LCOEFS];
379 u32 too_fast_vrate_pct;
380 u32 too_slow_vrate_pct;
396 struct ioc_pcpu_stat {
397 struct ioc_missed missed[2];
399 local64_t rq_wait_ns;
409 struct ioc_params params;
410 struct ioc_margins margins;
417 struct timer_list timer;
418 struct list_head active_iocgs; /* active cgroups */
419 struct ioc_pcpu_stat __percpu *pcpu_stat;
421 enum ioc_running running;
422 atomic64_t vtime_rate;
426 seqcount_spinlock_t period_seqcount;
427 u64 period_at; /* wallclock starttime */
428 u64 period_at_vtime; /* vtime starttime */
430 atomic64_t cur_period; /* inc'd each period */
431 int busy_level; /* saturation history */
433 bool weights_updated;
434 atomic_t hweight_gen; /* for lazy hweights */
436 /* debt forgivness */
439 u64 dfgv_usage_us_sum;
441 u64 autop_too_fast_at;
442 u64 autop_too_slow_at;
444 bool user_qos_params:1;
445 bool user_cost_model:1;
448 struct iocg_pcpu_stat {
449 local64_t abs_vusage;
459 /* per device-cgroup pair */
461 struct blkg_policy_data pd;
465 * A iocg can get its weight from two sources - an explicit
466 * per-device-cgroup configuration or the default weight of the
467 * cgroup. `cfg_weight` is the explicit per-device-cgroup
468 * configuration. `weight` is the effective considering both
471 * When an idle cgroup becomes active its `active` goes from 0 to
472 * `weight`. `inuse` is the surplus adjusted active weight.
473 * `active` and `inuse` are used to calculate `hweight_active` and
476 * `last_inuse` remembers `inuse` while an iocg is idle to persist
477 * surplus adjustments.
479 * `inuse` may be adjusted dynamically during period. `saved_*` are used
480 * to determine and track adjustments.
490 sector_t cursor; /* to detect randio */
493 * `vtime` is this iocg's vtime cursor which progresses as IOs are
494 * issued. If lagging behind device vtime, the delta represents
495 * the currently available IO budget. If running ahead, the
498 * `vtime_done` is the same but progressed on completion rather
499 * than issue. The delta behind `vtime` represents the cost of
500 * currently in-flight IOs.
503 atomic64_t done_vtime;
506 /* current delay in effect and when it started */
511 * The period this iocg was last active in. Used for deactivation
512 * and invalidating `vtime`.
514 atomic64_t active_period;
515 struct list_head active_list;
517 /* see __propagate_weights() and current_hweight() for details */
518 u64 child_active_sum;
520 u64 child_adjusted_sum;
524 u32 hweight_donating;
525 u32 hweight_after_donation;
527 struct list_head walk_list;
528 struct list_head surplus_list;
530 struct wait_queue_head waitq;
531 struct hrtimer waitq_timer;
533 /* timestamp at the latest activation */
537 struct iocg_pcpu_stat __percpu *pcpu_stat;
538 struct iocg_stat stat;
539 struct iocg_stat last_stat;
540 u64 last_stat_abs_vusage;
546 /* this iocg's depth in the hierarchy and ancestors including self */
548 struct ioc_gq *ancestors[];
553 struct blkcg_policy_data cpd;
554 unsigned int dfl_weight;
564 struct wait_queue_entry wait;
570 struct iocg_wake_ctx {
576 static const struct ioc_params autop[] = {
579 [QOS_RLAT] = 250000, /* 250ms */
581 [QOS_MIN] = VRATE_MIN_PPM,
582 [QOS_MAX] = VRATE_MAX_PPM,
585 [I_LCOEF_RBPS] = 174019176,
586 [I_LCOEF_RSEQIOPS] = 41708,
587 [I_LCOEF_RRANDIOPS] = 370,
588 [I_LCOEF_WBPS] = 178075866,
589 [I_LCOEF_WSEQIOPS] = 42705,
590 [I_LCOEF_WRANDIOPS] = 378,
595 [QOS_RLAT] = 25000, /* 25ms */
597 [QOS_MIN] = VRATE_MIN_PPM,
598 [QOS_MAX] = VRATE_MAX_PPM,
601 [I_LCOEF_RBPS] = 245855193,
602 [I_LCOEF_RSEQIOPS] = 61575,
603 [I_LCOEF_RRANDIOPS] = 6946,
604 [I_LCOEF_WBPS] = 141365009,
605 [I_LCOEF_WSEQIOPS] = 33716,
606 [I_LCOEF_WRANDIOPS] = 26796,
611 [QOS_RLAT] = 25000, /* 25ms */
613 [QOS_MIN] = VRATE_MIN_PPM,
614 [QOS_MAX] = VRATE_MAX_PPM,
617 [I_LCOEF_RBPS] = 488636629,
618 [I_LCOEF_RSEQIOPS] = 8932,
619 [I_LCOEF_RRANDIOPS] = 8518,
620 [I_LCOEF_WBPS] = 427891549,
621 [I_LCOEF_WSEQIOPS] = 28755,
622 [I_LCOEF_WRANDIOPS] = 21940,
624 .too_fast_vrate_pct = 500,
628 [QOS_RLAT] = 5000, /* 5ms */
630 [QOS_MIN] = VRATE_MIN_PPM,
631 [QOS_MAX] = VRATE_MAX_PPM,
634 [I_LCOEF_RBPS] = 3102524156LLU,
635 [I_LCOEF_RSEQIOPS] = 724816,
636 [I_LCOEF_RRANDIOPS] = 778122,
637 [I_LCOEF_WBPS] = 1742780862LLU,
638 [I_LCOEF_WSEQIOPS] = 425702,
639 [I_LCOEF_WRANDIOPS] = 443193,
641 .too_slow_vrate_pct = 10,
646 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
647 * vtime credit shortage and down on device saturation.
649 static u32 vrate_adj_pct[] =
651 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
652 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
653 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
655 static struct blkcg_policy blkcg_policy_iocost;
657 /* accessors and helpers */
658 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
660 return container_of(rqos, struct ioc, rqos);
663 static struct ioc *q_to_ioc(struct request_queue *q)
665 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668 static const char __maybe_unused *ioc_name(struct ioc *ioc)
670 struct gendisk *disk = ioc->rqos.q->disk;
674 return disk->disk_name;
677 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
679 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
682 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
684 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
687 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
689 return pd_to_blkg(&iocg->pd);
692 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
694 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
695 struct ioc_cgrp, cpd);
699 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
700 * weight, the more expensive each IO. Must round up.
702 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
704 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
708 * The inverse of abs_cost_to_cost(). Must round up.
710 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
712 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
715 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
716 u64 abs_cost, u64 cost)
718 struct iocg_pcpu_stat *gcs;
720 bio->bi_iocost_cost = cost;
721 atomic64_add(cost, &iocg->vtime);
723 gcs = get_cpu_ptr(iocg->pcpu_stat);
724 local64_add(abs_cost, &gcs->abs_vusage);
728 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
731 spin_lock_irqsave(&iocg->ioc->lock, *flags);
732 spin_lock(&iocg->waitq.lock);
734 spin_lock_irqsave(&iocg->waitq.lock, *flags);
738 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
741 spin_unlock(&iocg->waitq.lock);
742 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
744 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
748 #define CREATE_TRACE_POINTS
749 #include <trace/events/iocost.h>
751 static void ioc_refresh_margins(struct ioc *ioc)
753 struct ioc_margins *margins = &ioc->margins;
754 u32 period_us = ioc->period_us;
755 u64 vrate = ioc->vtime_base_rate;
757 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
758 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
759 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
762 /* latency Qos params changed, update period_us and all the dependent params */
763 static void ioc_refresh_period_us(struct ioc *ioc)
765 u32 ppm, lat, multi, period_us;
767 lockdep_assert_held(&ioc->lock);
769 /* pick the higher latency target */
770 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
771 ppm = ioc->params.qos[QOS_RPPM];
772 lat = ioc->params.qos[QOS_RLAT];
774 ppm = ioc->params.qos[QOS_WPPM];
775 lat = ioc->params.qos[QOS_WLAT];
779 * We want the period to be long enough to contain a healthy number
780 * of IOs while short enough for granular control. Define it as a
781 * multiple of the latency target. Ideally, the multiplier should
782 * be scaled according to the percentile so that it would nominally
783 * contain a certain number of requests. Let's be simpler and
784 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
787 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
790 period_us = multi * lat;
791 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
793 /* calculate dependent params */
794 ioc->period_us = period_us;
795 ioc->timer_slack_ns = div64_u64(
796 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
798 ioc_refresh_margins(ioc);
801 static int ioc_autop_idx(struct ioc *ioc)
803 int idx = ioc->autop_idx;
804 const struct ioc_params *p = &autop[idx];
809 if (!blk_queue_nonrot(ioc->rqos.q))
812 /* handle SATA SSDs w/ broken NCQ */
813 if (blk_queue_depth(ioc->rqos.q) == 1)
814 return AUTOP_SSD_QD1;
816 /* use one of the normal ssd sets */
817 if (idx < AUTOP_SSD_DFL)
818 return AUTOP_SSD_DFL;
820 /* if user is overriding anything, maintain what was there */
821 if (ioc->user_qos_params || ioc->user_cost_model)
824 /* step up/down based on the vrate */
825 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
826 now_ns = ktime_get_ns();
828 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
829 if (!ioc->autop_too_fast_at)
830 ioc->autop_too_fast_at = now_ns;
831 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
834 ioc->autop_too_fast_at = 0;
837 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
838 if (!ioc->autop_too_slow_at)
839 ioc->autop_too_slow_at = now_ns;
840 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
843 ioc->autop_too_slow_at = 0;
850 * Take the followings as input
852 * @bps maximum sequential throughput
853 * @seqiops maximum sequential 4k iops
854 * @randiops maximum random 4k iops
856 * and calculate the linear model cost coefficients.
858 * *@page per-page cost 1s / (@bps / 4096)
859 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
860 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
862 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
863 u64 *page, u64 *seqio, u64 *randio)
867 *page = *seqio = *randio = 0;
870 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
871 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
874 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
880 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
886 static void ioc_refresh_lcoefs(struct ioc *ioc)
888 u64 *u = ioc->params.i_lcoefs;
889 u64 *c = ioc->params.lcoefs;
891 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
892 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
893 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
894 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
897 static bool ioc_refresh_params(struct ioc *ioc, bool force)
899 const struct ioc_params *p;
902 lockdep_assert_held(&ioc->lock);
904 idx = ioc_autop_idx(ioc);
907 if (idx == ioc->autop_idx && !force)
910 if (idx != ioc->autop_idx) {
911 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
912 ioc->vtime_base_rate = VTIME_PER_USEC;
915 ioc->autop_idx = idx;
916 ioc->autop_too_fast_at = 0;
917 ioc->autop_too_slow_at = 0;
919 if (!ioc->user_qos_params)
920 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
921 if (!ioc->user_cost_model)
922 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
924 ioc_refresh_period_us(ioc);
925 ioc_refresh_lcoefs(ioc);
927 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
928 VTIME_PER_USEC, MILLION);
929 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
930 VTIME_PER_USEC, MILLION);
936 * When an iocg accumulates too much vtime or gets deactivated, we throw away
937 * some vtime, which lowers the overall device utilization. As the exact amount
938 * which is being thrown away is known, we can compensate by accelerating the
939 * vrate accordingly so that the extra vtime generated in the current period
940 * matches what got lost.
942 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
944 s64 pleft = ioc->period_at + ioc->period_us - now->now;
945 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
946 s64 vcomp, vcomp_min, vcomp_max;
948 lockdep_assert_held(&ioc->lock);
950 /* we need some time left in this period */
955 * Calculate how much vrate should be adjusted to offset the error.
956 * Limit the amount of adjustment and deduct the adjusted amount from
959 vcomp = -div64_s64(ioc->vtime_err, pleft);
960 vcomp_min = -(ioc->vtime_base_rate >> 1);
961 vcomp_max = ioc->vtime_base_rate;
962 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
964 ioc->vtime_err += vcomp * pleft;
966 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
968 /* bound how much error can accumulate */
969 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
972 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
973 int nr_lagging, int nr_shortages,
974 int prev_busy_level, u32 *missed_ppm)
976 u64 vrate = ioc->vtime_base_rate;
977 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
979 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
980 if (ioc->busy_level != prev_busy_level || nr_lagging)
981 trace_iocost_ioc_vrate_adj(ioc, vrate,
982 missed_ppm, rq_wait_pct,
983 nr_lagging, nr_shortages);
989 * If vrate is out of bounds, apply clamp gradually as the
990 * bounds can change abruptly. Otherwise, apply busy_level
993 if (vrate < vrate_min) {
994 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
995 vrate = min(vrate, vrate_min);
996 } else if (vrate > vrate_max) {
997 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
998 vrate = max(vrate, vrate_max);
1000 int idx = min_t(int, abs(ioc->busy_level),
1001 ARRAY_SIZE(vrate_adj_pct) - 1);
1002 u32 adj_pct = vrate_adj_pct[idx];
1004 if (ioc->busy_level > 0)
1005 adj_pct = 100 - adj_pct;
1007 adj_pct = 100 + adj_pct;
1009 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1010 vrate_min, vrate_max);
1013 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1014 nr_lagging, nr_shortages);
1016 ioc->vtime_base_rate = vrate;
1017 ioc_refresh_margins(ioc);
1020 /* take a snapshot of the current [v]time and vrate */
1021 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1026 now->now_ns = ktime_get();
1027 now->now = ktime_to_us(now->now_ns);
1028 vrate = atomic64_read(&ioc->vtime_rate);
1031 * The current vtime is
1033 * vtime at period start + (wallclock time since the start) * vrate
1035 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1036 * needed, they're seqcount protected.
1039 seq = read_seqcount_begin(&ioc->period_seqcount);
1040 now->vnow = ioc->period_at_vtime +
1041 (now->now - ioc->period_at) * vrate;
1042 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1045 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1047 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1049 write_seqcount_begin(&ioc->period_seqcount);
1050 ioc->period_at = now->now;
1051 ioc->period_at_vtime = now->vnow;
1052 write_seqcount_end(&ioc->period_seqcount);
1054 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1055 add_timer(&ioc->timer);
1059 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1060 * weight sums and propagate upwards accordingly. If @save, the current margin
1061 * is saved to be used as reference for later inuse in-period adjustments.
1063 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1064 bool save, struct ioc_now *now)
1066 struct ioc *ioc = iocg->ioc;
1069 lockdep_assert_held(&ioc->lock);
1072 * For an active leaf node, its inuse shouldn't be zero or exceed
1073 * @active. An active internal node's inuse is solely determined by the
1074 * inuse to active ratio of its children regardless of @inuse.
1076 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1077 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1078 iocg->child_active_sum);
1080 inuse = clamp_t(u32, inuse, 1, active);
1083 iocg->last_inuse = iocg->inuse;
1085 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1087 if (active == iocg->active && inuse == iocg->inuse)
1090 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1091 struct ioc_gq *parent = iocg->ancestors[lvl];
1092 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1093 u32 parent_active = 0, parent_inuse = 0;
1095 /* update the level sums */
1096 parent->child_active_sum += (s32)(active - child->active);
1097 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1098 /* apply the updates */
1099 child->active = active;
1100 child->inuse = inuse;
1103 * The delta between inuse and active sums indicates that
1104 * much of weight is being given away. Parent's inuse
1105 * and active should reflect the ratio.
1107 if (parent->child_active_sum) {
1108 parent_active = parent->weight;
1109 parent_inuse = DIV64_U64_ROUND_UP(
1110 parent_active * parent->child_inuse_sum,
1111 parent->child_active_sum);
1114 /* do we need to keep walking up? */
1115 if (parent_active == parent->active &&
1116 parent_inuse == parent->inuse)
1119 active = parent_active;
1120 inuse = parent_inuse;
1123 ioc->weights_updated = true;
1126 static void commit_weights(struct ioc *ioc)
1128 lockdep_assert_held(&ioc->lock);
1130 if (ioc->weights_updated) {
1131 /* paired with rmb in current_hweight(), see there */
1133 atomic_inc(&ioc->hweight_gen);
1134 ioc->weights_updated = false;
1138 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1139 bool save, struct ioc_now *now)
1141 __propagate_weights(iocg, active, inuse, save, now);
1142 commit_weights(iocg->ioc);
1145 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1147 struct ioc *ioc = iocg->ioc;
1152 /* hot path - if uptodate, use cached */
1153 ioc_gen = atomic_read(&ioc->hweight_gen);
1154 if (ioc_gen == iocg->hweight_gen)
1158 * Paired with wmb in commit_weights(). If we saw the updated
1159 * hweight_gen, all the weight updates from __propagate_weights() are
1162 * We can race with weight updates during calculation and get it
1163 * wrong. However, hweight_gen would have changed and a future
1164 * reader will recalculate and we're guaranteed to discard the
1165 * wrong result soon.
1169 hwa = hwi = WEIGHT_ONE;
1170 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1171 struct ioc_gq *parent = iocg->ancestors[lvl];
1172 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1173 u64 active_sum = READ_ONCE(parent->child_active_sum);
1174 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1175 u32 active = READ_ONCE(child->active);
1176 u32 inuse = READ_ONCE(child->inuse);
1178 /* we can race with deactivations and either may read as zero */
1179 if (!active_sum || !inuse_sum)
1182 active_sum = max_t(u64, active, active_sum);
1183 hwa = div64_u64((u64)hwa * active, active_sum);
1185 inuse_sum = max_t(u64, inuse, inuse_sum);
1186 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1189 iocg->hweight_active = max_t(u32, hwa, 1);
1190 iocg->hweight_inuse = max_t(u32, hwi, 1);
1191 iocg->hweight_gen = ioc_gen;
1194 *hw_activep = iocg->hweight_active;
1196 *hw_inusep = iocg->hweight_inuse;
1200 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1201 * other weights stay unchanged.
1203 static u32 current_hweight_max(struct ioc_gq *iocg)
1205 u32 hwm = WEIGHT_ONE;
1206 u32 inuse = iocg->active;
1207 u64 child_inuse_sum;
1210 lockdep_assert_held(&iocg->ioc->lock);
1212 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1213 struct ioc_gq *parent = iocg->ancestors[lvl];
1214 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1216 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1217 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1218 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1219 parent->child_active_sum);
1222 return max_t(u32, hwm, 1);
1225 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1227 struct ioc *ioc = iocg->ioc;
1228 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1229 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1232 lockdep_assert_held(&ioc->lock);
1234 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1235 if (weight != iocg->weight && iocg->active)
1236 propagate_weights(iocg, weight, iocg->inuse, true, now);
1237 iocg->weight = weight;
1240 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1242 struct ioc *ioc = iocg->ioc;
1243 u64 last_period, cur_period;
1248 * If seem to be already active, just update the stamp to tell the
1249 * timer that we're still active. We don't mind occassional races.
1251 if (!list_empty(&iocg->active_list)) {
1253 cur_period = atomic64_read(&ioc->cur_period);
1254 if (atomic64_read(&iocg->active_period) != cur_period)
1255 atomic64_set(&iocg->active_period, cur_period);
1259 /* racy check on internal node IOs, treat as root level IOs */
1260 if (iocg->child_active_sum)
1263 spin_lock_irq(&ioc->lock);
1268 cur_period = atomic64_read(&ioc->cur_period);
1269 last_period = atomic64_read(&iocg->active_period);
1270 atomic64_set(&iocg->active_period, cur_period);
1272 /* already activated or breaking leaf-only constraint? */
1273 if (!list_empty(&iocg->active_list))
1274 goto succeed_unlock;
1275 for (i = iocg->level - 1; i > 0; i--)
1276 if (!list_empty(&iocg->ancestors[i]->active_list))
1279 if (iocg->child_active_sum)
1283 * Always start with the target budget. On deactivation, we throw away
1284 * anything above it.
1286 vtarget = now->vnow - ioc->margins.target;
1287 vtime = atomic64_read(&iocg->vtime);
1289 atomic64_add(vtarget - vtime, &iocg->vtime);
1290 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1294 * Activate, propagate weight and start period timer if not
1295 * running. Reset hweight_gen to avoid accidental match from
1298 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1299 list_add(&iocg->active_list, &ioc->active_iocgs);
1301 propagate_weights(iocg, iocg->weight,
1302 iocg->last_inuse ?: iocg->weight, true, now);
1304 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1305 last_period, cur_period, vtime);
1307 iocg->activated_at = now->now;
1309 if (ioc->running == IOC_IDLE) {
1310 ioc->running = IOC_RUNNING;
1311 ioc->dfgv_period_at = now->now;
1312 ioc->dfgv_period_rem = 0;
1313 ioc_start_period(ioc, now);
1317 spin_unlock_irq(&ioc->lock);
1321 spin_unlock_irq(&ioc->lock);
1325 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1327 struct ioc *ioc = iocg->ioc;
1328 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1329 u64 tdelta, delay, new_delay;
1330 s64 vover, vover_pct;
1333 lockdep_assert_held(&iocg->waitq.lock);
1335 /* calculate the current delay in effect - 1/2 every second */
1336 tdelta = now->now - iocg->delay_at;
1338 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1342 /* calculate the new delay from the debt amount */
1343 current_hweight(iocg, &hwa, NULL);
1344 vover = atomic64_read(&iocg->vtime) +
1345 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1346 vover_pct = div64_s64(100 * vover,
1347 ioc->period_us * ioc->vtime_base_rate);
1349 if (vover_pct <= MIN_DELAY_THR_PCT)
1351 else if (vover_pct >= MAX_DELAY_THR_PCT)
1352 new_delay = MAX_DELAY;
1354 new_delay = MIN_DELAY +
1355 div_u64((MAX_DELAY - MIN_DELAY) *
1356 (vover_pct - MIN_DELAY_THR_PCT),
1357 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1359 /* pick the higher one and apply */
1360 if (new_delay > delay) {
1361 iocg->delay = new_delay;
1362 iocg->delay_at = now->now;
1366 if (delay >= MIN_DELAY) {
1367 if (!iocg->indelay_since)
1368 iocg->indelay_since = now->now;
1369 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1372 if (iocg->indelay_since) {
1373 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1374 iocg->indelay_since = 0;
1377 blkcg_clear_delay(blkg);
1382 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1383 struct ioc_now *now)
1385 struct iocg_pcpu_stat *gcs;
1387 lockdep_assert_held(&iocg->ioc->lock);
1388 lockdep_assert_held(&iocg->waitq.lock);
1389 WARN_ON_ONCE(list_empty(&iocg->active_list));
1392 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1393 * inuse donating all of it share to others until its debt is paid off.
1395 if (!iocg->abs_vdebt && abs_cost) {
1396 iocg->indebt_since = now->now;
1397 propagate_weights(iocg, iocg->active, 0, false, now);
1400 iocg->abs_vdebt += abs_cost;
1402 gcs = get_cpu_ptr(iocg->pcpu_stat);
1403 local64_add(abs_cost, &gcs->abs_vusage);
1407 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1408 struct ioc_now *now)
1410 lockdep_assert_held(&iocg->ioc->lock);
1411 lockdep_assert_held(&iocg->waitq.lock);
1413 /* make sure that nobody messed with @iocg */
1414 WARN_ON_ONCE(list_empty(&iocg->active_list));
1415 WARN_ON_ONCE(iocg->inuse > 1);
1417 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1419 /* if debt is paid in full, restore inuse */
1420 if (!iocg->abs_vdebt) {
1421 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1422 iocg->indebt_since = 0;
1424 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1429 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1430 int flags, void *key)
1432 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1433 struct iocg_wake_ctx *ctx = key;
1434 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1436 ctx->vbudget -= cost;
1438 if (ctx->vbudget < 0)
1441 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1442 wait->committed = true;
1445 * autoremove_wake_function() removes the wait entry only when it
1446 * actually changed the task state. We want the wait always removed.
1447 * Remove explicitly and use default_wake_function(). Note that the
1448 * order of operations is important as finish_wait() tests whether
1449 * @wq_entry is removed without grabbing the lock.
1451 default_wake_function(wq_entry, mode, flags, key);
1452 list_del_init_careful(&wq_entry->entry);
1457 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1458 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1459 * addition to iocg->waitq.lock.
1461 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1462 struct ioc_now *now)
1464 struct ioc *ioc = iocg->ioc;
1465 struct iocg_wake_ctx ctx = { .iocg = iocg };
1466 u64 vshortage, expires, oexpires;
1470 lockdep_assert_held(&iocg->waitq.lock);
1472 current_hweight(iocg, &hwa, NULL);
1473 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1476 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1477 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1478 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1479 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1481 lockdep_assert_held(&ioc->lock);
1483 atomic64_add(vpay, &iocg->vtime);
1484 atomic64_add(vpay, &iocg->done_vtime);
1485 iocg_pay_debt(iocg, abs_vpay, now);
1489 if (iocg->abs_vdebt || iocg->delay)
1490 iocg_kick_delay(iocg, now);
1493 * Debt can still be outstanding if we haven't paid all yet or the
1494 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1495 * under debt. Make sure @vbudget reflects the outstanding amount and is
1498 if (iocg->abs_vdebt) {
1499 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1500 vbudget = min_t(s64, 0, vbudget - vdebt);
1504 * Wake up the ones which are due and see how much vtime we'll need for
1505 * the next one. As paying off debt restores hw_inuse, it must be read
1506 * after the above debt payment.
1508 ctx.vbudget = vbudget;
1509 current_hweight(iocg, NULL, &ctx.hw_inuse);
1511 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1513 if (!waitqueue_active(&iocg->waitq)) {
1514 if (iocg->wait_since) {
1515 iocg->stat.wait_us += now->now - iocg->wait_since;
1516 iocg->wait_since = 0;
1521 if (!iocg->wait_since)
1522 iocg->wait_since = now->now;
1524 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1527 /* determine next wakeup, add a timer margin to guarantee chunking */
1528 vshortage = -ctx.vbudget;
1529 expires = now->now_ns +
1530 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1532 expires += ioc->timer_slack_ns;
1534 /* if already active and close enough, don't bother */
1535 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1536 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1537 abs(oexpires - expires) <= ioc->timer_slack_ns)
1540 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1541 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1544 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1546 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1547 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1549 unsigned long flags;
1551 ioc_now(iocg->ioc, &now);
1553 iocg_lock(iocg, pay_debt, &flags);
1554 iocg_kick_waitq(iocg, pay_debt, &now);
1555 iocg_unlock(iocg, pay_debt, &flags);
1557 return HRTIMER_NORESTART;
1560 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1562 u32 nr_met[2] = { };
1563 u32 nr_missed[2] = { };
1567 for_each_online_cpu(cpu) {
1568 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1569 u64 this_rq_wait_ns;
1571 for (rw = READ; rw <= WRITE; rw++) {
1572 u32 this_met = local_read(&stat->missed[rw].nr_met);
1573 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1575 nr_met[rw] += this_met - stat->missed[rw].last_met;
1576 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1577 stat->missed[rw].last_met = this_met;
1578 stat->missed[rw].last_missed = this_missed;
1581 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1582 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1583 stat->last_rq_wait_ns = this_rq_wait_ns;
1586 for (rw = READ; rw <= WRITE; rw++) {
1587 if (nr_met[rw] + nr_missed[rw])
1589 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1590 nr_met[rw] + nr_missed[rw]);
1592 missed_ppm_ar[rw] = 0;
1595 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1596 ioc->period_us * NSEC_PER_USEC);
1599 /* was iocg idle this period? */
1600 static bool iocg_is_idle(struct ioc_gq *iocg)
1602 struct ioc *ioc = iocg->ioc;
1604 /* did something get issued this period? */
1605 if (atomic64_read(&iocg->active_period) ==
1606 atomic64_read(&ioc->cur_period))
1609 /* is something in flight? */
1610 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1617 * Call this function on the target leaf @iocg's to build pre-order traversal
1618 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1619 * ->walk_list and the caller is responsible for dissolving the list after use.
1621 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1622 struct list_head *inner_walk)
1626 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1628 /* find the first ancestor which hasn't been visited yet */
1629 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1630 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1634 /* walk down and visit the inner nodes to get pre-order traversal */
1635 while (++lvl <= iocg->level - 1) {
1636 struct ioc_gq *inner = iocg->ancestors[lvl];
1638 /* record traversal order */
1639 list_add_tail(&inner->walk_list, inner_walk);
1643 /* propagate the deltas to the parent */
1644 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1646 if (iocg->level > 0) {
1647 struct iocg_stat *parent_stat =
1648 &iocg->ancestors[iocg->level - 1]->stat;
1650 parent_stat->usage_us +=
1651 iocg->stat.usage_us - iocg->last_stat.usage_us;
1652 parent_stat->wait_us +=
1653 iocg->stat.wait_us - iocg->last_stat.wait_us;
1654 parent_stat->indebt_us +=
1655 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1656 parent_stat->indelay_us +=
1657 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1660 iocg->last_stat = iocg->stat;
1663 /* collect per-cpu counters and propagate the deltas to the parent */
1664 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1666 struct ioc *ioc = iocg->ioc;
1671 lockdep_assert_held(&iocg->ioc->lock);
1673 /* collect per-cpu counters */
1674 for_each_possible_cpu(cpu) {
1675 abs_vusage += local64_read(
1676 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1678 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1679 iocg->last_stat_abs_vusage = abs_vusage;
1681 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1682 iocg->stat.usage_us += iocg->usage_delta_us;
1684 iocg_flush_stat_upward(iocg);
1687 /* get stat counters ready for reading on all active iocgs */
1688 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1690 LIST_HEAD(inner_walk);
1691 struct ioc_gq *iocg, *tiocg;
1693 /* flush leaves and build inner node walk list */
1694 list_for_each_entry(iocg, target_iocgs, active_list) {
1695 iocg_flush_stat_leaf(iocg, now);
1696 iocg_build_inner_walk(iocg, &inner_walk);
1699 /* keep flushing upwards by walking the inner list backwards */
1700 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1701 iocg_flush_stat_upward(iocg);
1702 list_del_init(&iocg->walk_list);
1707 * Determine what @iocg's hweight_inuse should be after donating unused
1708 * capacity. @hwm is the upper bound and used to signal no donation. This
1709 * function also throws away @iocg's excess budget.
1711 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1712 u32 usage, struct ioc_now *now)
1714 struct ioc *ioc = iocg->ioc;
1715 u64 vtime = atomic64_read(&iocg->vtime);
1716 s64 excess, delta, target, new_hwi;
1718 /* debt handling owns inuse for debtors */
1719 if (iocg->abs_vdebt)
1722 /* see whether minimum margin requirement is met */
1723 if (waitqueue_active(&iocg->waitq) ||
1724 time_after64(vtime, now->vnow - ioc->margins.min))
1727 /* throw away excess above target */
1728 excess = now->vnow - vtime - ioc->margins.target;
1730 atomic64_add(excess, &iocg->vtime);
1731 atomic64_add(excess, &iocg->done_vtime);
1733 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1737 * Let's say the distance between iocg's and device's vtimes as a
1738 * fraction of period duration is delta. Assuming that the iocg will
1739 * consume the usage determined above, we want to determine new_hwi so
1740 * that delta equals MARGIN_TARGET at the end of the next period.
1742 * We need to execute usage worth of IOs while spending the sum of the
1743 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1746 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1748 * Therefore, the new_hwi is:
1750 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1752 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1753 now->vnow - ioc->period_at_vtime);
1754 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1755 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1757 return clamp_t(s64, new_hwi, 1, hwm);
1761 * For work-conservation, an iocg which isn't using all of its share should
1762 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1763 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1765 * #1 is mathematically simpler but has the drawback of requiring synchronous
1766 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1767 * change due to donation snapbacks as it has the possibility of grossly
1768 * overshooting what's allowed by the model and vrate.
1770 * #2 is inherently safe with local operations. The donating iocg can easily
1771 * snap back to higher weights when needed without worrying about impacts on
1772 * other nodes as the impacts will be inherently correct. This also makes idle
1773 * iocg activations safe. The only effect activations have is decreasing
1774 * hweight_inuse of others, the right solution to which is for those iocgs to
1775 * snap back to higher weights.
1777 * So, we go with #2. The challenge is calculating how each donating iocg's
1778 * inuse should be adjusted to achieve the target donation amounts. This is done
1779 * using Andy's method described in the following pdf.
1781 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1783 * Given the weights and target after-donation hweight_inuse values, Andy's
1784 * method determines how the proportional distribution should look like at each
1785 * sibling level to maintain the relative relationship between all non-donating
1786 * pairs. To roughly summarize, it divides the tree into donating and
1787 * non-donating parts, calculates global donation rate which is used to
1788 * determine the target hweight_inuse for each node, and then derives per-level
1791 * The following pdf shows that global distribution calculated this way can be
1792 * achieved by scaling inuse weights of donating leaves and propagating the
1793 * adjustments upwards proportionally.
1795 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1797 * Combining the above two, we can determine how each leaf iocg's inuse should
1798 * be adjusted to achieve the target donation.
1800 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1802 * The inline comments use symbols from the last pdf.
1804 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1805 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1806 * t is the sum of the absolute budgets of donating nodes in the subtree.
1807 * w is the weight of the node. w = w_f + w_t
1808 * w_f is the non-donating portion of w. w_f = w * f / b
1809 * w_b is the donating portion of w. w_t = w * t / b
1810 * s is the sum of all sibling weights. s = Sum(w) for siblings
1811 * s_f and s_t are the non-donating and donating portions of s.
1813 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1814 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1815 * after adjustments. Subscript r denotes the root node's values.
1817 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1819 LIST_HEAD(over_hwa);
1820 LIST_HEAD(inner_walk);
1821 struct ioc_gq *iocg, *tiocg, *root_iocg;
1822 u32 after_sum, over_sum, over_target, gamma;
1825 * It's pretty unlikely but possible for the total sum of
1826 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1827 * confuse the following calculations. If such condition is detected,
1828 * scale down everyone over its full share equally to keep the sum below
1833 list_for_each_entry(iocg, surpluses, surplus_list) {
1836 current_hweight(iocg, &hwa, NULL);
1837 after_sum += iocg->hweight_after_donation;
1839 if (iocg->hweight_after_donation > hwa) {
1840 over_sum += iocg->hweight_after_donation;
1841 list_add(&iocg->walk_list, &over_hwa);
1845 if (after_sum >= WEIGHT_ONE) {
1847 * The delta should be deducted from the over_sum, calculate
1848 * target over_sum value.
1850 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1851 WARN_ON_ONCE(over_sum <= over_delta);
1852 over_target = over_sum - over_delta;
1857 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1859 iocg->hweight_after_donation =
1860 div_u64((u64)iocg->hweight_after_donation *
1861 over_target, over_sum);
1862 list_del_init(&iocg->walk_list);
1866 * Build pre-order inner node walk list and prepare for donation
1867 * adjustment calculations.
1869 list_for_each_entry(iocg, surpluses, surplus_list) {
1870 iocg_build_inner_walk(iocg, &inner_walk);
1873 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1874 WARN_ON_ONCE(root_iocg->level > 0);
1876 list_for_each_entry(iocg, &inner_walk, walk_list) {
1877 iocg->child_adjusted_sum = 0;
1878 iocg->hweight_donating = 0;
1879 iocg->hweight_after_donation = 0;
1883 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1886 list_for_each_entry(iocg, surpluses, surplus_list) {
1887 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1889 parent->hweight_donating += iocg->hweight_donating;
1890 parent->hweight_after_donation += iocg->hweight_after_donation;
1893 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1894 if (iocg->level > 0) {
1895 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1897 parent->hweight_donating += iocg->hweight_donating;
1898 parent->hweight_after_donation += iocg->hweight_after_donation;
1903 * Calculate inner hwa's (b) and make sure the donation values are
1904 * within the accepted ranges as we're doing low res calculations with
1907 list_for_each_entry(iocg, &inner_walk, walk_list) {
1909 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1911 iocg->hweight_active = DIV64_U64_ROUND_UP(
1912 (u64)parent->hweight_active * iocg->active,
1913 parent->child_active_sum);
1917 iocg->hweight_donating = min(iocg->hweight_donating,
1918 iocg->hweight_active);
1919 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1920 iocg->hweight_donating - 1);
1921 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1922 iocg->hweight_donating <= 1 ||
1923 iocg->hweight_after_donation == 0)) {
1924 pr_warn("iocg: invalid donation weights in ");
1925 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1926 pr_cont(": active=%u donating=%u after=%u\n",
1927 iocg->hweight_active, iocg->hweight_donating,
1928 iocg->hweight_after_donation);
1933 * Calculate the global donation rate (gamma) - the rate to adjust
1934 * non-donating budgets by.
1936 * No need to use 64bit multiplication here as the first operand is
1937 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1939 * We know that there are beneficiary nodes and the sum of the donating
1940 * hweights can't be whole; however, due to the round-ups during hweight
1941 * calculations, root_iocg->hweight_donating might still end up equal to
1942 * or greater than whole. Limit the range when calculating the divider.
1944 * gamma = (1 - t_r') / (1 - t_r)
1946 gamma = DIV_ROUND_UP(
1947 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1948 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1951 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1954 list_for_each_entry(iocg, &inner_walk, walk_list) {
1955 struct ioc_gq *parent;
1956 u32 inuse, wpt, wptp;
1959 if (iocg->level == 0) {
1960 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1961 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1962 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1963 WEIGHT_ONE - iocg->hweight_after_donation);
1967 parent = iocg->ancestors[iocg->level - 1];
1969 /* b' = gamma * b_f + b_t' */
1970 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1971 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1972 WEIGHT_ONE) + iocg->hweight_after_donation;
1974 /* w' = s' * b' / b'_p */
1975 inuse = DIV64_U64_ROUND_UP(
1976 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1977 parent->hweight_inuse);
1979 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1980 st = DIV64_U64_ROUND_UP(
1981 iocg->child_active_sum * iocg->hweight_donating,
1982 iocg->hweight_active);
1983 sf = iocg->child_active_sum - st;
1984 wpt = DIV64_U64_ROUND_UP(
1985 (u64)iocg->active * iocg->hweight_donating,
1986 iocg->hweight_active);
1987 wptp = DIV64_U64_ROUND_UP(
1988 (u64)inuse * iocg->hweight_after_donation,
1989 iocg->hweight_inuse);
1991 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1995 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1996 * we can finally determine leaf adjustments.
1998 list_for_each_entry(iocg, surpluses, surplus_list) {
1999 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2003 * In-debt iocgs participated in the donation calculation with
2004 * the minimum target hweight_inuse. Configuring inuse
2005 * accordingly would work fine but debt handling expects
2006 * @iocg->inuse stay at the minimum and we don't wanna
2009 if (iocg->abs_vdebt) {
2010 WARN_ON_ONCE(iocg->inuse > 1);
2014 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2015 inuse = DIV64_U64_ROUND_UP(
2016 parent->child_adjusted_sum * iocg->hweight_after_donation,
2017 parent->hweight_inuse);
2019 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2021 iocg->hweight_inuse,
2022 iocg->hweight_after_donation);
2024 __propagate_weights(iocg, iocg->active, inuse, true, now);
2027 /* walk list should be dissolved after use */
2028 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2029 list_del_init(&iocg->walk_list);
2033 * A low weight iocg can amass a large amount of debt, for example, when
2034 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2035 * memory paired with a slow IO device, the debt can span multiple seconds or
2036 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2037 * up blocked paying its debt while the IO device is idle.
2039 * The following protects against such cases. If the device has been
2040 * sufficiently idle for a while, the debts are halved and delays are
2043 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2044 struct ioc_now *now)
2046 struct ioc_gq *iocg;
2047 u64 dur, usage_pct, nr_cycles;
2049 /* if no debtor, reset the cycle */
2051 ioc->dfgv_period_at = now->now;
2052 ioc->dfgv_period_rem = 0;
2053 ioc->dfgv_usage_us_sum = 0;
2058 * Debtors can pass through a lot of writes choking the device and we
2059 * don't want to be forgiving debts while the device is struggling from
2060 * write bursts. If we're missing latency targets, consider the device
2063 if (ioc->busy_level > 0)
2064 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2066 ioc->dfgv_usage_us_sum += usage_us_sum;
2067 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2071 * At least DFGV_PERIOD has passed since the last period. Calculate the
2072 * average usage and reset the period counters.
2074 dur = now->now - ioc->dfgv_period_at;
2075 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2077 ioc->dfgv_period_at = now->now;
2078 ioc->dfgv_usage_us_sum = 0;
2080 /* if was too busy, reset everything */
2081 if (usage_pct > DFGV_USAGE_PCT) {
2082 ioc->dfgv_period_rem = 0;
2087 * Usage is lower than threshold. Let's forgive some debts. Debt
2088 * forgiveness runs off of the usual ioc timer but its period usually
2089 * doesn't match ioc's. Compensate the difference by performing the
2090 * reduction as many times as would fit in the duration since the last
2091 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2092 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2093 * reductions is doubled.
2095 nr_cycles = dur + ioc->dfgv_period_rem;
2096 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2098 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2099 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2101 if (!iocg->abs_vdebt && !iocg->delay)
2104 spin_lock(&iocg->waitq.lock);
2106 old_debt = iocg->abs_vdebt;
2107 old_delay = iocg->delay;
2109 if (iocg->abs_vdebt)
2110 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2112 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2114 iocg_kick_waitq(iocg, true, now);
2116 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2117 old_debt, iocg->abs_vdebt,
2118 old_delay, iocg->delay);
2120 spin_unlock(&iocg->waitq.lock);
2125 * Check the active iocgs' state to avoid oversleeping and deactive
2128 * Since waiters determine the sleep durations based on the vrate
2129 * they saw at the time of sleep, if vrate has increased, some
2130 * waiters could be sleeping for too long. Wake up tardy waiters
2131 * which should have woken up in the last period and expire idle
2134 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2137 struct ioc_gq *iocg, *tiocg;
2139 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2140 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2141 !iocg->delay && !iocg_is_idle(iocg))
2144 spin_lock(&iocg->waitq.lock);
2146 /* flush wait and indebt stat deltas */
2147 if (iocg->wait_since) {
2148 iocg->stat.wait_us += now->now - iocg->wait_since;
2149 iocg->wait_since = now->now;
2151 if (iocg->indebt_since) {
2152 iocg->stat.indebt_us +=
2153 now->now - iocg->indebt_since;
2154 iocg->indebt_since = now->now;
2156 if (iocg->indelay_since) {
2157 iocg->stat.indelay_us +=
2158 now->now - iocg->indelay_since;
2159 iocg->indelay_since = now->now;
2162 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2164 /* might be oversleeping vtime / hweight changes, kick */
2165 iocg_kick_waitq(iocg, true, now);
2166 if (iocg->abs_vdebt || iocg->delay)
2168 } else if (iocg_is_idle(iocg)) {
2169 /* no waiter and idle, deactivate */
2170 u64 vtime = atomic64_read(&iocg->vtime);
2174 * @iocg has been inactive for a full duration and will
2175 * have a high budget. Account anything above target as
2176 * error and throw away. On reactivation, it'll start
2177 * with the target budget.
2179 excess = now->vnow - vtime - ioc->margins.target;
2183 current_hweight(iocg, NULL, &old_hwi);
2184 ioc->vtime_err -= div64_u64(excess * old_hwi,
2188 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2189 atomic64_read(&iocg->active_period),
2190 atomic64_read(&ioc->cur_period), vtime);
2191 __propagate_weights(iocg, 0, 0, false, now);
2192 list_del_init(&iocg->active_list);
2195 spin_unlock(&iocg->waitq.lock);
2198 commit_weights(ioc);
2202 static void ioc_timer_fn(struct timer_list *timer)
2204 struct ioc *ioc = container_of(timer, struct ioc, timer);
2205 struct ioc_gq *iocg, *tiocg;
2207 LIST_HEAD(surpluses);
2208 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2209 u64 usage_us_sum = 0;
2212 u32 missed_ppm[2], rq_wait_pct;
2214 int prev_busy_level;
2216 /* how were the latencies during the period? */
2217 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2219 /* take care of active iocgs */
2220 spin_lock_irq(&ioc->lock);
2222 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2223 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2226 period_vtime = now.vnow - ioc->period_at_vtime;
2227 if (WARN_ON_ONCE(!period_vtime)) {
2228 spin_unlock_irq(&ioc->lock);
2232 nr_debtors = ioc_check_iocgs(ioc, &now);
2235 * Wait and indebt stat are flushed above and the donation calculation
2236 * below needs updated usage stat. Let's bring stat up-to-date.
2238 iocg_flush_stat(&ioc->active_iocgs, &now);
2240 /* calc usage and see whether some weights need to be moved around */
2241 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2242 u64 vdone, vtime, usage_us;
2243 u32 hw_active, hw_inuse;
2246 * Collect unused and wind vtime closer to vnow to prevent
2247 * iocgs from accumulating a large amount of budget.
2249 vdone = atomic64_read(&iocg->done_vtime);
2250 vtime = atomic64_read(&iocg->vtime);
2251 current_hweight(iocg, &hw_active, &hw_inuse);
2254 * Latency QoS detection doesn't account for IOs which are
2255 * in-flight for longer than a period. Detect them by
2256 * comparing vdone against period start. If lagging behind
2257 * IOs from past periods, don't increase vrate.
2259 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2260 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2261 time_after64(vtime, vdone) &&
2262 time_after64(vtime, now.vnow -
2263 MAX_LAGGING_PERIODS * period_vtime) &&
2264 time_before64(vdone, now.vnow - period_vtime))
2268 * Determine absolute usage factoring in in-flight IOs to avoid
2269 * high-latency completions appearing as idle.
2271 usage_us = iocg->usage_delta_us;
2272 usage_us_sum += usage_us;
2274 /* see whether there's surplus vtime */
2275 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2276 if (hw_inuse < hw_active ||
2277 (!waitqueue_active(&iocg->waitq) &&
2278 time_before64(vtime, now.vnow - ioc->margins.low))) {
2279 u32 hwa, old_hwi, hwm, new_hwi, usage;
2282 if (vdone != vtime) {
2283 u64 inflight_us = DIV64_U64_ROUND_UP(
2284 cost_to_abs_cost(vtime - vdone, hw_inuse),
2285 ioc->vtime_base_rate);
2287 usage_us = max(usage_us, inflight_us);
2290 /* convert to hweight based usage ratio */
2291 if (time_after64(iocg->activated_at, ioc->period_at))
2292 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2294 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2296 usage = clamp_t(u32,
2297 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2302 * Already donating or accumulated enough to start.
2303 * Determine the donation amount.
2305 current_hweight(iocg, &hwa, &old_hwi);
2306 hwm = current_hweight_max(iocg);
2307 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2310 * Donation calculation assumes hweight_after_donation
2311 * to be positive, a condition that a donor w/ hwa < 2
2312 * can't meet. Don't bother with donation if hwa is
2313 * below 2. It's not gonna make a meaningful difference
2316 if (new_hwi < hwm && hwa >= 2) {
2317 iocg->hweight_donating = hwa;
2318 iocg->hweight_after_donation = new_hwi;
2319 list_add(&iocg->surplus_list, &surpluses);
2320 } else if (!iocg->abs_vdebt) {
2322 * @iocg doesn't have enough to donate. Reset
2323 * its inuse to active.
2325 * Don't reset debtors as their inuse's are
2326 * owned by debt handling. This shouldn't affect
2327 * donation calculuation in any meaningful way
2328 * as @iocg doesn't have a meaningful amount of
2331 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2332 iocg->inuse, iocg->active,
2333 iocg->hweight_inuse, new_hwi);
2335 __propagate_weights(iocg, iocg->active,
2336 iocg->active, true, &now);
2340 /* genuinely short on vtime */
2345 if (!list_empty(&surpluses) && nr_shortages)
2346 transfer_surpluses(&surpluses, &now);
2348 commit_weights(ioc);
2350 /* surplus list should be dissolved after use */
2351 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2352 list_del_init(&iocg->surplus_list);
2355 * If q is getting clogged or we're missing too much, we're issuing
2356 * too much IO and should lower vtime rate. If we're not missing
2357 * and experiencing shortages but not surpluses, we're too stingy
2358 * and should increase vtime rate.
2360 prev_busy_level = ioc->busy_level;
2361 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2362 missed_ppm[READ] > ppm_rthr ||
2363 missed_ppm[WRITE] > ppm_wthr) {
2364 /* clearly missing QoS targets, slow down vrate */
2365 ioc->busy_level = max(ioc->busy_level, 0);
2367 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2368 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2369 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2370 /* QoS targets are being met with >25% margin */
2373 * We're throttling while the device has spare
2374 * capacity. If vrate was being slowed down, stop.
2376 ioc->busy_level = min(ioc->busy_level, 0);
2379 * If there are IOs spanning multiple periods, wait
2380 * them out before pushing the device harder.
2386 * Nobody is being throttled and the users aren't
2387 * issuing enough IOs to saturate the device. We
2388 * simply don't know how close the device is to
2389 * saturation. Coast.
2391 ioc->busy_level = 0;
2394 /* inside the hysterisis margin, we're good */
2395 ioc->busy_level = 0;
2398 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2400 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2401 prev_busy_level, missed_ppm);
2403 ioc_refresh_params(ioc, false);
2405 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2408 * This period is done. Move onto the next one. If nothing's
2409 * going on with the device, stop the timer.
2411 atomic64_inc(&ioc->cur_period);
2413 if (ioc->running != IOC_STOP) {
2414 if (!list_empty(&ioc->active_iocgs)) {
2415 ioc_start_period(ioc, &now);
2417 ioc->busy_level = 0;
2419 ioc->running = IOC_IDLE;
2422 ioc_refresh_vrate(ioc, &now);
2425 spin_unlock_irq(&ioc->lock);
2428 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2429 u64 abs_cost, struct ioc_now *now)
2431 struct ioc *ioc = iocg->ioc;
2432 struct ioc_margins *margins = &ioc->margins;
2433 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2436 u64 cost, new_inuse;
2438 current_hweight(iocg, NULL, &hwi);
2440 cost = abs_cost_to_cost(abs_cost, hwi);
2441 margin = now->vnow - vtime - cost;
2443 /* debt handling owns inuse for debtors */
2444 if (iocg->abs_vdebt)
2448 * We only increase inuse during period and do so if the margin has
2449 * deteriorated since the previous adjustment.
2451 if (margin >= iocg->saved_margin || margin >= margins->low ||
2452 iocg->inuse == iocg->active)
2455 spin_lock_irq(&ioc->lock);
2457 /* we own inuse only when @iocg is in the normal active state */
2458 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2459 spin_unlock_irq(&ioc->lock);
2464 * Bump up inuse till @abs_cost fits in the existing budget.
2465 * adj_step must be determined after acquiring ioc->lock - we might
2466 * have raced and lost to another thread for activation and could
2467 * be reading 0 iocg->active before ioc->lock which will lead to
2470 new_inuse = iocg->inuse;
2471 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2473 new_inuse = new_inuse + adj_step;
2474 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2475 current_hweight(iocg, NULL, &hwi);
2476 cost = abs_cost_to_cost(abs_cost, hwi);
2477 } while (time_after64(vtime + cost, now->vnow) &&
2478 iocg->inuse != iocg->active);
2480 spin_unlock_irq(&ioc->lock);
2482 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2483 old_inuse, iocg->inuse, old_hwi, hwi);
2488 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2489 bool is_merge, u64 *costp)
2491 struct ioc *ioc = iocg->ioc;
2492 u64 coef_seqio, coef_randio, coef_page;
2493 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2497 switch (bio_op(bio)) {
2499 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2500 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2501 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2504 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2505 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2506 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2513 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2514 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2518 if (seek_pages > LCOEF_RANDIO_PAGES) {
2519 cost += coef_randio;
2524 cost += pages * coef_page;
2529 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2533 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2537 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2540 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2542 switch (req_op(rq)) {
2544 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2547 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2554 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2558 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2562 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2564 struct blkcg_gq *blkg = bio->bi_blkg;
2565 struct ioc *ioc = rqos_to_ioc(rqos);
2566 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2568 struct iocg_wait wait;
2569 u64 abs_cost, cost, vtime;
2570 bool use_debt, ioc_locked;
2571 unsigned long flags;
2573 /* bypass IOs if disabled, still initializing, or for root cgroup */
2574 if (!ioc->enabled || !iocg || !iocg->level)
2577 /* calculate the absolute vtime cost */
2578 abs_cost = calc_vtime_cost(bio, iocg, false);
2582 if (!iocg_activate(iocg, &now))
2585 iocg->cursor = bio_end_sector(bio);
2586 vtime = atomic64_read(&iocg->vtime);
2587 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2590 * If no one's waiting and within budget, issue right away. The
2591 * tests are racy but the races aren't systemic - we only miss once
2592 * in a while which is fine.
2594 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2595 time_before_eq64(vtime + cost, now.vnow)) {
2596 iocg_commit_bio(iocg, bio, abs_cost, cost);
2601 * We're over budget. This can be handled in two ways. IOs which may
2602 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2603 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2604 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2605 * whether debt handling is needed and acquire locks accordingly.
2607 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2608 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2610 iocg_lock(iocg, ioc_locked, &flags);
2613 * @iocg must stay activated for debt and waitq handling. Deactivation
2614 * is synchronized against both ioc->lock and waitq.lock and we won't
2615 * get deactivated as long as we're waiting or has debt, so we're good
2616 * if we're activated here. In the unlikely cases that we aren't, just
2619 if (unlikely(list_empty(&iocg->active_list))) {
2620 iocg_unlock(iocg, ioc_locked, &flags);
2621 iocg_commit_bio(iocg, bio, abs_cost, cost);
2626 * We're over budget. If @bio has to be issued regardless, remember
2627 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2628 * off the debt before waking more IOs.
2630 * This way, the debt is continuously paid off each period with the
2631 * actual budget available to the cgroup. If we just wound vtime, we
2632 * would incorrectly use the current hw_inuse for the entire amount
2633 * which, for example, can lead to the cgroup staying blocked for a
2634 * long time even with substantially raised hw_inuse.
2636 * An iocg with vdebt should stay online so that the timer can keep
2637 * deducting its vdebt and [de]activate use_delay mechanism
2638 * accordingly. We don't want to race against the timer trying to
2639 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2640 * penalizing the cgroup and its descendants.
2643 iocg_incur_debt(iocg, abs_cost, &now);
2644 if (iocg_kick_delay(iocg, &now))
2645 blkcg_schedule_throttle(rqos->q->disk,
2646 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2647 iocg_unlock(iocg, ioc_locked, &flags);
2651 /* guarantee that iocgs w/ waiters have maximum inuse */
2652 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2654 iocg_unlock(iocg, false, &flags);
2658 propagate_weights(iocg, iocg->active, iocg->active, true,
2663 * Append self to the waitq and schedule the wakeup timer if we're
2664 * the first waiter. The timer duration is calculated based on the
2665 * current vrate. vtime and hweight changes can make it too short
2666 * or too long. Each wait entry records the absolute cost it's
2667 * waiting for to allow re-evaluation using a custom wait entry.
2669 * If too short, the timer simply reschedules itself. If too long,
2670 * the period timer will notice and trigger wakeups.
2672 * All waiters are on iocg->waitq and the wait states are
2673 * synchronized using waitq.lock.
2675 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2676 wait.wait.private = current;
2678 wait.abs_cost = abs_cost;
2679 wait.committed = false; /* will be set true by waker */
2681 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2682 iocg_kick_waitq(iocg, ioc_locked, &now);
2684 iocg_unlock(iocg, ioc_locked, &flags);
2687 set_current_state(TASK_UNINTERRUPTIBLE);
2693 /* waker already committed us, proceed */
2694 finish_wait(&iocg->waitq, &wait.wait);
2697 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2700 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2701 struct ioc *ioc = rqos_to_ioc(rqos);
2702 sector_t bio_end = bio_end_sector(bio);
2704 u64 vtime, abs_cost, cost;
2705 unsigned long flags;
2707 /* bypass if disabled, still initializing, or for root cgroup */
2708 if (!ioc->enabled || !iocg || !iocg->level)
2711 abs_cost = calc_vtime_cost(bio, iocg, true);
2717 vtime = atomic64_read(&iocg->vtime);
2718 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2720 /* update cursor if backmerging into the request at the cursor */
2721 if (blk_rq_pos(rq) < bio_end &&
2722 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2723 iocg->cursor = bio_end;
2726 * Charge if there's enough vtime budget and the existing request has
2729 if (rq->bio && rq->bio->bi_iocost_cost &&
2730 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2731 iocg_commit_bio(iocg, bio, abs_cost, cost);
2736 * Otherwise, account it as debt if @iocg is online, which it should
2737 * be for the vast majority of cases. See debt handling in
2738 * ioc_rqos_throttle() for details.
2740 spin_lock_irqsave(&ioc->lock, flags);
2741 spin_lock(&iocg->waitq.lock);
2743 if (likely(!list_empty(&iocg->active_list))) {
2744 iocg_incur_debt(iocg, abs_cost, &now);
2745 if (iocg_kick_delay(iocg, &now))
2746 blkcg_schedule_throttle(rqos->q->disk,
2747 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2749 iocg_commit_bio(iocg, bio, abs_cost, cost);
2752 spin_unlock(&iocg->waitq.lock);
2753 spin_unlock_irqrestore(&ioc->lock, flags);
2756 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2758 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2760 if (iocg && bio->bi_iocost_cost)
2761 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2764 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2766 struct ioc *ioc = rqos_to_ioc(rqos);
2767 struct ioc_pcpu_stat *ccs;
2768 u64 on_q_ns, rq_wait_ns, size_nsec;
2771 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2774 switch (req_op(rq)) {
2787 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2788 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2789 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2791 ccs = get_cpu_ptr(ioc->pcpu_stat);
2793 if (on_q_ns <= size_nsec ||
2794 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2795 local_inc(&ccs->missed[rw].nr_met);
2797 local_inc(&ccs->missed[rw].nr_missed);
2799 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2804 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2806 struct ioc *ioc = rqos_to_ioc(rqos);
2808 spin_lock_irq(&ioc->lock);
2809 ioc_refresh_params(ioc, false);
2810 spin_unlock_irq(&ioc->lock);
2813 static void ioc_rqos_exit(struct rq_qos *rqos)
2815 struct ioc *ioc = rqos_to_ioc(rqos);
2817 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2819 spin_lock_irq(&ioc->lock);
2820 ioc->running = IOC_STOP;
2821 spin_unlock_irq(&ioc->lock);
2823 del_timer_sync(&ioc->timer);
2824 free_percpu(ioc->pcpu_stat);
2828 static struct rq_qos_ops ioc_rqos_ops = {
2829 .throttle = ioc_rqos_throttle,
2830 .merge = ioc_rqos_merge,
2831 .done_bio = ioc_rqos_done_bio,
2832 .done = ioc_rqos_done,
2833 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2834 .exit = ioc_rqos_exit,
2837 static int blk_iocost_init(struct gendisk *disk)
2839 struct request_queue *q = disk->queue;
2841 struct rq_qos *rqos;
2844 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2848 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2849 if (!ioc->pcpu_stat) {
2854 for_each_possible_cpu(cpu) {
2855 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2857 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2858 local_set(&ccs->missed[i].nr_met, 0);
2859 local_set(&ccs->missed[i].nr_missed, 0);
2861 local64_set(&ccs->rq_wait_ns, 0);
2865 rqos->id = RQ_QOS_COST;
2866 rqos->ops = &ioc_rqos_ops;
2869 spin_lock_init(&ioc->lock);
2870 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2871 INIT_LIST_HEAD(&ioc->active_iocgs);
2873 ioc->running = IOC_IDLE;
2874 ioc->vtime_base_rate = VTIME_PER_USEC;
2875 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2876 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2877 ioc->period_at = ktime_to_us(ktime_get());
2878 atomic64_set(&ioc->cur_period, 0);
2879 atomic_set(&ioc->hweight_gen, 0);
2881 spin_lock_irq(&ioc->lock);
2882 ioc->autop_idx = AUTOP_INVALID;
2883 ioc_refresh_params(ioc, true);
2884 spin_unlock_irq(&ioc->lock);
2887 * rqos must be added before activation to allow ioc_pd_init() to
2888 * lookup the ioc from q. This means that the rqos methods may get
2889 * called before policy activation completion, can't assume that the
2890 * target bio has an iocg associated and need to test for NULL iocg.
2892 ret = rq_qos_add(q, rqos);
2896 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2902 rq_qos_del(q, rqos);
2904 free_percpu(ioc->pcpu_stat);
2909 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2911 struct ioc_cgrp *iocc;
2913 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2917 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2921 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2923 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2926 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2927 struct blkcg *blkcg)
2929 int levels = blkcg->css.cgroup->level + 1;
2930 struct ioc_gq *iocg;
2932 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2936 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2937 if (!iocg->pcpu_stat) {
2945 static void ioc_pd_init(struct blkg_policy_data *pd)
2947 struct ioc_gq *iocg = pd_to_iocg(pd);
2948 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2949 struct ioc *ioc = q_to_ioc(blkg->q);
2951 struct blkcg_gq *tblkg;
2952 unsigned long flags;
2957 atomic64_set(&iocg->vtime, now.vnow);
2958 atomic64_set(&iocg->done_vtime, now.vnow);
2959 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2960 INIT_LIST_HEAD(&iocg->active_list);
2961 INIT_LIST_HEAD(&iocg->walk_list);
2962 INIT_LIST_HEAD(&iocg->surplus_list);
2963 iocg->hweight_active = WEIGHT_ONE;
2964 iocg->hweight_inuse = WEIGHT_ONE;
2966 init_waitqueue_head(&iocg->waitq);
2967 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2968 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2970 iocg->level = blkg->blkcg->css.cgroup->level;
2972 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2973 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2974 iocg->ancestors[tiocg->level] = tiocg;
2977 spin_lock_irqsave(&ioc->lock, flags);
2978 weight_updated(iocg, &now);
2979 spin_unlock_irqrestore(&ioc->lock, flags);
2982 static void ioc_pd_free(struct blkg_policy_data *pd)
2984 struct ioc_gq *iocg = pd_to_iocg(pd);
2985 struct ioc *ioc = iocg->ioc;
2986 unsigned long flags;
2989 spin_lock_irqsave(&ioc->lock, flags);
2991 if (!list_empty(&iocg->active_list)) {
2995 propagate_weights(iocg, 0, 0, false, &now);
2996 list_del_init(&iocg->active_list);
2999 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3000 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3002 spin_unlock_irqrestore(&ioc->lock, flags);
3004 hrtimer_cancel(&iocg->waitq_timer);
3006 free_percpu(iocg->pcpu_stat);
3010 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3012 struct ioc_gq *iocg = pd_to_iocg(pd);
3013 struct ioc *ioc = iocg->ioc;
3018 if (iocg->level == 0) {
3019 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3020 ioc->vtime_base_rate * 10000,
3022 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3025 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3027 if (blkcg_debug_stats)
3028 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3029 iocg->last_stat.wait_us,
3030 iocg->last_stat.indebt_us,
3031 iocg->last_stat.indelay_us);
3034 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3037 const char *dname = blkg_dev_name(pd->blkg);
3038 struct ioc_gq *iocg = pd_to_iocg(pd);
3040 if (dname && iocg->cfg_weight)
3041 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3046 static int ioc_weight_show(struct seq_file *sf, void *v)
3048 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3049 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3051 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3052 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3053 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3057 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3058 size_t nbytes, loff_t off)
3060 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3061 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3062 struct blkg_conf_ctx ctx;
3064 struct ioc_gq *iocg;
3068 if (!strchr(buf, ':')) {
3069 struct blkcg_gq *blkg;
3071 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3074 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3077 spin_lock_irq(&blkcg->lock);
3078 iocc->dfl_weight = v * WEIGHT_ONE;
3079 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3080 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3083 spin_lock(&iocg->ioc->lock);
3084 ioc_now(iocg->ioc, &now);
3085 weight_updated(iocg, &now);
3086 spin_unlock(&iocg->ioc->lock);
3089 spin_unlock_irq(&blkcg->lock);
3094 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3098 iocg = blkg_to_iocg(ctx.blkg);
3100 if (!strncmp(ctx.body, "default", 7)) {
3103 if (!sscanf(ctx.body, "%u", &v))
3105 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3109 spin_lock(&iocg->ioc->lock);
3110 iocg->cfg_weight = v * WEIGHT_ONE;
3111 ioc_now(iocg->ioc, &now);
3112 weight_updated(iocg, &now);
3113 spin_unlock(&iocg->ioc->lock);
3115 blkg_conf_finish(&ctx);
3119 blkg_conf_finish(&ctx);
3123 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3126 const char *dname = blkg_dev_name(pd->blkg);
3127 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3132 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3133 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3134 ioc->params.qos[QOS_RPPM] / 10000,
3135 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3136 ioc->params.qos[QOS_RLAT],
3137 ioc->params.qos[QOS_WPPM] / 10000,
3138 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3139 ioc->params.qos[QOS_WLAT],
3140 ioc->params.qos[QOS_MIN] / 10000,
3141 ioc->params.qos[QOS_MIN] % 10000 / 100,
3142 ioc->params.qos[QOS_MAX] / 10000,
3143 ioc->params.qos[QOS_MAX] % 10000 / 100);
3147 static int ioc_qos_show(struct seq_file *sf, void *v)
3149 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3151 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3152 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3156 static const match_table_t qos_ctrl_tokens = {
3157 { QOS_ENABLE, "enable=%u" },
3158 { QOS_CTRL, "ctrl=%s" },
3159 { NR_QOS_CTRL_PARAMS, NULL },
3162 static const match_table_t qos_tokens = {
3163 { QOS_RPPM, "rpct=%s" },
3164 { QOS_RLAT, "rlat=%u" },
3165 { QOS_WPPM, "wpct=%s" },
3166 { QOS_WLAT, "wlat=%u" },
3167 { QOS_MIN, "min=%s" },
3168 { QOS_MAX, "max=%s" },
3169 { NR_QOS_PARAMS, NULL },
3172 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3173 size_t nbytes, loff_t off)
3175 struct block_device *bdev;
3176 struct gendisk *disk;
3178 u32 qos[NR_QOS_PARAMS];
3183 bdev = blkcg_conf_open_bdev(&input);
3185 return PTR_ERR(bdev);
3187 disk = bdev->bd_disk;
3188 ioc = q_to_ioc(disk->queue);
3190 ret = blk_iocost_init(disk);
3193 ioc = q_to_ioc(disk->queue);
3196 blk_mq_freeze_queue(disk->queue);
3197 blk_mq_quiesce_queue(disk->queue);
3199 spin_lock_irq(&ioc->lock);
3200 memcpy(qos, ioc->params.qos, sizeof(qos));
3201 enable = ioc->enabled;
3202 user = ioc->user_qos_params;
3204 while ((p = strsep(&input, " \t\n"))) {
3205 substring_t args[MAX_OPT_ARGS];
3213 switch (match_token(p, qos_ctrl_tokens, args)) {
3215 match_u64(&args[0], &v);
3219 match_strlcpy(buf, &args[0], sizeof(buf));
3220 if (!strcmp(buf, "auto"))
3222 else if (!strcmp(buf, "user"))
3229 tok = match_token(p, qos_tokens, args);
3233 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3236 if (cgroup_parse_float(buf, 2, &v))
3238 if (v < 0 || v > 10000)
3244 if (match_u64(&args[0], &v))
3250 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3253 if (cgroup_parse_float(buf, 2, &v))
3257 qos[tok] = clamp_t(s64, v * 100,
3258 VRATE_MIN_PPM, VRATE_MAX_PPM);
3266 if (qos[QOS_MIN] > qos[QOS_MAX])
3270 blk_stat_enable_accounting(disk->queue);
3271 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3272 ioc->enabled = true;
3273 wbt_disable_default(disk->queue);
3275 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3276 ioc->enabled = false;
3277 wbt_enable_default(disk->queue);
3281 memcpy(ioc->params.qos, qos, sizeof(qos));
3282 ioc->user_qos_params = true;
3284 ioc->user_qos_params = false;
3287 ioc_refresh_params(ioc, true);
3288 spin_unlock_irq(&ioc->lock);
3290 blk_mq_unquiesce_queue(disk->queue);
3291 blk_mq_unfreeze_queue(disk->queue);
3293 blkdev_put_no_open(bdev);
3296 spin_unlock_irq(&ioc->lock);
3298 blk_mq_unquiesce_queue(disk->queue);
3299 blk_mq_unfreeze_queue(disk->queue);
3303 blkdev_put_no_open(bdev);
3307 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3308 struct blkg_policy_data *pd, int off)
3310 const char *dname = blkg_dev_name(pd->blkg);
3311 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3312 u64 *u = ioc->params.i_lcoefs;
3317 seq_printf(sf, "%s ctrl=%s model=linear "
3318 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3319 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3320 dname, ioc->user_cost_model ? "user" : "auto",
3321 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3322 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3326 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3328 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3330 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3331 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3335 static const match_table_t cost_ctrl_tokens = {
3336 { COST_CTRL, "ctrl=%s" },
3337 { COST_MODEL, "model=%s" },
3338 { NR_COST_CTRL_PARAMS, NULL },
3341 static const match_table_t i_lcoef_tokens = {
3342 { I_LCOEF_RBPS, "rbps=%u" },
3343 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3344 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3345 { I_LCOEF_WBPS, "wbps=%u" },
3346 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3347 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3348 { NR_I_LCOEFS, NULL },
3351 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3352 size_t nbytes, loff_t off)
3354 struct block_device *bdev;
3355 struct request_queue *q;
3362 bdev = blkcg_conf_open_bdev(&input);
3364 return PTR_ERR(bdev);
3366 q = bdev_get_queue(bdev);
3369 ret = blk_iocost_init(bdev->bd_disk);
3375 blk_mq_freeze_queue(q);
3376 blk_mq_quiesce_queue(q);
3378 spin_lock_irq(&ioc->lock);
3379 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3380 user = ioc->user_cost_model;
3382 while ((p = strsep(&input, " \t\n"))) {
3383 substring_t args[MAX_OPT_ARGS];
3391 switch (match_token(p, cost_ctrl_tokens, args)) {
3393 match_strlcpy(buf, &args[0], sizeof(buf));
3394 if (!strcmp(buf, "auto"))
3396 else if (!strcmp(buf, "user"))
3402 match_strlcpy(buf, &args[0], sizeof(buf));
3403 if (strcmp(buf, "linear"))
3408 tok = match_token(p, i_lcoef_tokens, args);
3409 if (tok == NR_I_LCOEFS)
3411 if (match_u64(&args[0], &v))
3418 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3419 ioc->user_cost_model = true;
3421 ioc->user_cost_model = false;
3423 ioc_refresh_params(ioc, true);
3424 spin_unlock_irq(&ioc->lock);
3426 blk_mq_unquiesce_queue(q);
3427 blk_mq_unfreeze_queue(q);
3429 blkdev_put_no_open(bdev);
3433 spin_unlock_irq(&ioc->lock);
3435 blk_mq_unquiesce_queue(q);
3436 blk_mq_unfreeze_queue(q);
3440 blkdev_put_no_open(bdev);
3444 static struct cftype ioc_files[] = {
3447 .flags = CFTYPE_NOT_ON_ROOT,
3448 .seq_show = ioc_weight_show,
3449 .write = ioc_weight_write,
3453 .flags = CFTYPE_ONLY_ON_ROOT,
3454 .seq_show = ioc_qos_show,
3455 .write = ioc_qos_write,
3458 .name = "cost.model",
3459 .flags = CFTYPE_ONLY_ON_ROOT,
3460 .seq_show = ioc_cost_model_show,
3461 .write = ioc_cost_model_write,
3466 static struct blkcg_policy blkcg_policy_iocost = {
3467 .dfl_cftypes = ioc_files,
3468 .cpd_alloc_fn = ioc_cpd_alloc,
3469 .cpd_free_fn = ioc_cpd_free,
3470 .pd_alloc_fn = ioc_pd_alloc,
3471 .pd_init_fn = ioc_pd_init,
3472 .pd_free_fn = ioc_pd_free,
3473 .pd_stat_fn = ioc_pd_stat,
3476 static int __init ioc_init(void)
3478 return blkcg_policy_register(&blkcg_policy_iocost);
3481 static void __exit ioc_exit(void)
3483 blkcg_policy_unregister(&blkcg_policy_iocost);
3486 module_init(ioc_init);
3487 module_exit(ioc_exit);