1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
262 /* unbusy hysterisis */
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
290 /* halve debts if avg usage over 100ms is under 50% */
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
294 /* don't let cmds which take a very long time pin lagging for too long */
295 MAX_LAGGING_PERIODS = 10,
297 /* switch iff the conditions are met for longer than this */
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
301 * Count IO size in 4k pages. The 12bit shift helps keeping
302 * size-proportional components of cost calculation in closer
303 * numbers of digits to per-IO cost components.
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
309 /* if apart further than 16M, consider randio for linear model */
310 LCOEF_RANDIO_PAGES = 4096,
319 /* io.cost.qos controls including per-dev enable of the whole controller */
326 /* io.cost.qos params */
337 /* io.cost.model controls */
344 /* builtin linear cost model coefficients */
376 u32 qos[NR_QOS_PARAMS];
377 u64 i_lcoefs[NR_I_LCOEFS];
378 u64 lcoefs[NR_LCOEFS];
379 u32 too_fast_vrate_pct;
380 u32 too_slow_vrate_pct;
396 struct ioc_pcpu_stat {
397 struct ioc_missed missed[2];
399 local64_t rq_wait_ns;
409 struct ioc_params params;
410 struct ioc_margins margins;
417 struct timer_list timer;
418 struct list_head active_iocgs; /* active cgroups */
419 struct ioc_pcpu_stat __percpu *pcpu_stat;
421 enum ioc_running running;
422 atomic64_t vtime_rate;
426 seqcount_spinlock_t period_seqcount;
427 u64 period_at; /* wallclock starttime */
428 u64 period_at_vtime; /* vtime starttime */
430 atomic64_t cur_period; /* inc'd each period */
431 int busy_level; /* saturation history */
433 bool weights_updated;
434 atomic_t hweight_gen; /* for lazy hweights */
436 /* debt forgivness */
439 u64 dfgv_usage_us_sum;
441 u64 autop_too_fast_at;
442 u64 autop_too_slow_at;
444 bool user_qos_params:1;
445 bool user_cost_model:1;
448 struct iocg_pcpu_stat {
449 local64_t abs_vusage;
459 /* per device-cgroup pair */
461 struct blkg_policy_data pd;
465 * A iocg can get its weight from two sources - an explicit
466 * per-device-cgroup configuration or the default weight of the
467 * cgroup. `cfg_weight` is the explicit per-device-cgroup
468 * configuration. `weight` is the effective considering both
471 * When an idle cgroup becomes active its `active` goes from 0 to
472 * `weight`. `inuse` is the surplus adjusted active weight.
473 * `active` and `inuse` are used to calculate `hweight_active` and
476 * `last_inuse` remembers `inuse` while an iocg is idle to persist
477 * surplus adjustments.
479 * `inuse` may be adjusted dynamically during period. `saved_*` are used
480 * to determine and track adjustments.
490 sector_t cursor; /* to detect randio */
493 * `vtime` is this iocg's vtime cursor which progresses as IOs are
494 * issued. If lagging behind device vtime, the delta represents
495 * the currently available IO budget. If runnning ahead, the
498 * `vtime_done` is the same but progressed on completion rather
499 * than issue. The delta behind `vtime` represents the cost of
500 * currently in-flight IOs.
503 atomic64_t done_vtime;
506 /* current delay in effect and when it started */
511 * The period this iocg was last active in. Used for deactivation
512 * and invalidating `vtime`.
514 atomic64_t active_period;
515 struct list_head active_list;
517 /* see __propagate_weights() and current_hweight() for details */
518 u64 child_active_sum;
520 u64 child_adjusted_sum;
524 u32 hweight_donating;
525 u32 hweight_after_donation;
527 struct list_head walk_list;
528 struct list_head surplus_list;
530 struct wait_queue_head waitq;
531 struct hrtimer waitq_timer;
533 /* timestamp at the latest activation */
537 struct iocg_pcpu_stat __percpu *pcpu_stat;
538 struct iocg_stat local_stat;
539 struct iocg_stat desc_stat;
540 struct iocg_stat last_stat;
541 u64 last_stat_abs_vusage;
547 /* this iocg's depth in the hierarchy and ancestors including self */
549 struct ioc_gq *ancestors[];
554 struct blkcg_policy_data cpd;
555 unsigned int dfl_weight;
566 struct wait_queue_entry wait;
572 struct iocg_wake_ctx {
578 static const struct ioc_params autop[] = {
581 [QOS_RLAT] = 250000, /* 250ms */
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
587 [I_LCOEF_RBPS] = 174019176,
588 [I_LCOEF_RSEQIOPS] = 41708,
589 [I_LCOEF_RRANDIOPS] = 370,
590 [I_LCOEF_WBPS] = 178075866,
591 [I_LCOEF_WSEQIOPS] = 42705,
592 [I_LCOEF_WRANDIOPS] = 378,
597 [QOS_RLAT] = 25000, /* 25ms */
599 [QOS_MIN] = VRATE_MIN_PPM,
600 [QOS_MAX] = VRATE_MAX_PPM,
603 [I_LCOEF_RBPS] = 245855193,
604 [I_LCOEF_RSEQIOPS] = 61575,
605 [I_LCOEF_RRANDIOPS] = 6946,
606 [I_LCOEF_WBPS] = 141365009,
607 [I_LCOEF_WSEQIOPS] = 33716,
608 [I_LCOEF_WRANDIOPS] = 26796,
613 [QOS_RLAT] = 25000, /* 25ms */
615 [QOS_MIN] = VRATE_MIN_PPM,
616 [QOS_MAX] = VRATE_MAX_PPM,
619 [I_LCOEF_RBPS] = 488636629,
620 [I_LCOEF_RSEQIOPS] = 8932,
621 [I_LCOEF_RRANDIOPS] = 8518,
622 [I_LCOEF_WBPS] = 427891549,
623 [I_LCOEF_WSEQIOPS] = 28755,
624 [I_LCOEF_WRANDIOPS] = 21940,
626 .too_fast_vrate_pct = 500,
630 [QOS_RLAT] = 5000, /* 5ms */
632 [QOS_MIN] = VRATE_MIN_PPM,
633 [QOS_MAX] = VRATE_MAX_PPM,
636 [I_LCOEF_RBPS] = 3102524156LLU,
637 [I_LCOEF_RSEQIOPS] = 724816,
638 [I_LCOEF_RRANDIOPS] = 778122,
639 [I_LCOEF_WBPS] = 1742780862LLU,
640 [I_LCOEF_WSEQIOPS] = 425702,
641 [I_LCOEF_WRANDIOPS] = 443193,
643 .too_slow_vrate_pct = 10,
648 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
649 * vtime credit shortage and down on device saturation.
651 static u32 vrate_adj_pct[] =
653 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
657 static struct blkcg_policy blkcg_policy_iocost;
659 /* accessors and helpers */
660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
662 return container_of(rqos, struct ioc, rqos);
665 static struct ioc *q_to_ioc(struct request_queue *q)
667 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
670 static const char *q_name(struct request_queue *q)
672 if (blk_queue_registered(q))
673 return kobject_name(q->kobj.parent);
678 static const char __maybe_unused *ioc_name(struct ioc *ioc)
680 return q_name(ioc->rqos.q);
683 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
685 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
688 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
690 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
693 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
695 return pd_to_blkg(&iocg->pd);
698 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
700 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
701 struct ioc_cgrp, cpd);
705 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
706 * weight, the more expensive each IO. Must round up.
708 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
710 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
714 * The inverse of abs_cost_to_cost(). Must round up.
716 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
718 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
721 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
722 u64 abs_cost, u64 cost)
724 struct iocg_pcpu_stat *gcs;
726 bio->bi_iocost_cost = cost;
727 atomic64_add(cost, &iocg->vtime);
729 gcs = get_cpu_ptr(iocg->pcpu_stat);
730 local64_add(abs_cost, &gcs->abs_vusage);
734 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
737 spin_lock_irqsave(&iocg->ioc->lock, *flags);
738 spin_lock(&iocg->waitq.lock);
740 spin_lock_irqsave(&iocg->waitq.lock, *flags);
744 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
747 spin_unlock(&iocg->waitq.lock);
748 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
750 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
754 #define CREATE_TRACE_POINTS
755 #include <trace/events/iocost.h>
757 static void ioc_refresh_margins(struct ioc *ioc)
759 struct ioc_margins *margins = &ioc->margins;
760 u32 period_us = ioc->period_us;
761 u64 vrate = ioc->vtime_base_rate;
763 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
764 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
765 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
768 /* latency Qos params changed, update period_us and all the dependent params */
769 static void ioc_refresh_period_us(struct ioc *ioc)
771 u32 ppm, lat, multi, period_us;
773 lockdep_assert_held(&ioc->lock);
775 /* pick the higher latency target */
776 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
777 ppm = ioc->params.qos[QOS_RPPM];
778 lat = ioc->params.qos[QOS_RLAT];
780 ppm = ioc->params.qos[QOS_WPPM];
781 lat = ioc->params.qos[QOS_WLAT];
785 * We want the period to be long enough to contain a healthy number
786 * of IOs while short enough for granular control. Define it as a
787 * multiple of the latency target. Ideally, the multiplier should
788 * be scaled according to the percentile so that it would nominally
789 * contain a certain number of requests. Let's be simpler and
790 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
793 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
796 period_us = multi * lat;
797 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
799 /* calculate dependent params */
800 ioc->period_us = period_us;
801 ioc->timer_slack_ns = div64_u64(
802 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
804 ioc_refresh_margins(ioc);
807 static int ioc_autop_idx(struct ioc *ioc)
809 int idx = ioc->autop_idx;
810 const struct ioc_params *p = &autop[idx];
815 if (!blk_queue_nonrot(ioc->rqos.q))
818 /* handle SATA SSDs w/ broken NCQ */
819 if (blk_queue_depth(ioc->rqos.q) == 1)
820 return AUTOP_SSD_QD1;
822 /* use one of the normal ssd sets */
823 if (idx < AUTOP_SSD_DFL)
824 return AUTOP_SSD_DFL;
826 /* if user is overriding anything, maintain what was there */
827 if (ioc->user_qos_params || ioc->user_cost_model)
830 /* step up/down based on the vrate */
831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
832 now_ns = ktime_get_ns();
834 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
835 if (!ioc->autop_too_fast_at)
836 ioc->autop_too_fast_at = now_ns;
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
840 ioc->autop_too_fast_at = 0;
843 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
844 if (!ioc->autop_too_slow_at)
845 ioc->autop_too_slow_at = now_ns;
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
849 ioc->autop_too_slow_at = 0;
856 * Take the followings as input
858 * @bps maximum sequential throughput
859 * @seqiops maximum sequential 4k iops
860 * @randiops maximum random 4k iops
862 * and calculate the linear model cost coefficients.
864 * *@page per-page cost 1s / (@bps / 4096)
865 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
866 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
868 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
869 u64 *page, u64 *seqio, u64 *randio)
873 *page = *seqio = *randio = 0;
876 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
877 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
880 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
886 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
892 static void ioc_refresh_lcoefs(struct ioc *ioc)
894 u64 *u = ioc->params.i_lcoefs;
895 u64 *c = ioc->params.lcoefs;
897 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
898 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
899 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
900 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
903 static bool ioc_refresh_params(struct ioc *ioc, bool force)
905 const struct ioc_params *p;
908 lockdep_assert_held(&ioc->lock);
910 idx = ioc_autop_idx(ioc);
913 if (idx == ioc->autop_idx && !force)
916 if (idx != ioc->autop_idx)
917 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
919 ioc->autop_idx = idx;
920 ioc->autop_too_fast_at = 0;
921 ioc->autop_too_slow_at = 0;
923 if (!ioc->user_qos_params)
924 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
925 if (!ioc->user_cost_model)
926 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
928 ioc_refresh_period_us(ioc);
929 ioc_refresh_lcoefs(ioc);
931 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
932 VTIME_PER_USEC, MILLION);
933 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
934 VTIME_PER_USEC, MILLION);
940 * When an iocg accumulates too much vtime or gets deactivated, we throw away
941 * some vtime, which lowers the overall device utilization. As the exact amount
942 * which is being thrown away is known, we can compensate by accelerating the
943 * vrate accordingly so that the extra vtime generated in the current period
944 * matches what got lost.
946 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
948 s64 pleft = ioc->period_at + ioc->period_us - now->now;
949 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
950 s64 vcomp, vcomp_min, vcomp_max;
952 lockdep_assert_held(&ioc->lock);
954 /* we need some time left in this period */
959 * Calculate how much vrate should be adjusted to offset the error.
960 * Limit the amount of adjustment and deduct the adjusted amount from
963 vcomp = -div64_s64(ioc->vtime_err, pleft);
964 vcomp_min = -(ioc->vtime_base_rate >> 1);
965 vcomp_max = ioc->vtime_base_rate;
966 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
968 ioc->vtime_err += vcomp * pleft;
970 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
972 /* bound how much error can accumulate */
973 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
976 /* take a snapshot of the current [v]time and vrate */
977 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
981 now->now_ns = ktime_get();
982 now->now = ktime_to_us(now->now_ns);
983 now->vrate = atomic64_read(&ioc->vtime_rate);
986 * The current vtime is
988 * vtime at period start + (wallclock time since the start) * vrate
990 * As a consistent snapshot of `period_at_vtime` and `period_at` is
991 * needed, they're seqcount protected.
994 seq = read_seqcount_begin(&ioc->period_seqcount);
995 now->vnow = ioc->period_at_vtime +
996 (now->now - ioc->period_at) * now->vrate;
997 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1000 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1002 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1004 write_seqcount_begin(&ioc->period_seqcount);
1005 ioc->period_at = now->now;
1006 ioc->period_at_vtime = now->vnow;
1007 write_seqcount_end(&ioc->period_seqcount);
1009 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1010 add_timer(&ioc->timer);
1014 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1015 * weight sums and propagate upwards accordingly. If @save, the current margin
1016 * is saved to be used as reference for later inuse in-period adjustments.
1018 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1019 bool save, struct ioc_now *now)
1021 struct ioc *ioc = iocg->ioc;
1024 lockdep_assert_held(&ioc->lock);
1027 * For an active leaf node, its inuse shouldn't be zero or exceed
1028 * @active. An active internal node's inuse is solely determined by the
1029 * inuse to active ratio of its children regardless of @inuse.
1031 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1032 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1033 iocg->child_active_sum);
1035 inuse = clamp_t(u32, inuse, 1, active);
1038 iocg->last_inuse = iocg->inuse;
1040 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1042 if (active == iocg->active && inuse == iocg->inuse)
1045 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1046 struct ioc_gq *parent = iocg->ancestors[lvl];
1047 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1048 u32 parent_active = 0, parent_inuse = 0;
1050 /* update the level sums */
1051 parent->child_active_sum += (s32)(active - child->active);
1052 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1053 /* apply the updates */
1054 child->active = active;
1055 child->inuse = inuse;
1058 * The delta between inuse and active sums indicates that
1059 * that much of weight is being given away. Parent's inuse
1060 * and active should reflect the ratio.
1062 if (parent->child_active_sum) {
1063 parent_active = parent->weight;
1064 parent_inuse = DIV64_U64_ROUND_UP(
1065 parent_active * parent->child_inuse_sum,
1066 parent->child_active_sum);
1069 /* do we need to keep walking up? */
1070 if (parent_active == parent->active &&
1071 parent_inuse == parent->inuse)
1074 active = parent_active;
1075 inuse = parent_inuse;
1078 ioc->weights_updated = true;
1081 static void commit_weights(struct ioc *ioc)
1083 lockdep_assert_held(&ioc->lock);
1085 if (ioc->weights_updated) {
1086 /* paired with rmb in current_hweight(), see there */
1088 atomic_inc(&ioc->hweight_gen);
1089 ioc->weights_updated = false;
1093 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1094 bool save, struct ioc_now *now)
1096 __propagate_weights(iocg, active, inuse, save, now);
1097 commit_weights(iocg->ioc);
1100 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1102 struct ioc *ioc = iocg->ioc;
1107 /* hot path - if uptodate, use cached */
1108 ioc_gen = atomic_read(&ioc->hweight_gen);
1109 if (ioc_gen == iocg->hweight_gen)
1113 * Paired with wmb in commit_weights(). If we saw the updated
1114 * hweight_gen, all the weight updates from __propagate_weights() are
1117 * We can race with weight updates during calculation and get it
1118 * wrong. However, hweight_gen would have changed and a future
1119 * reader will recalculate and we're guaranteed to discard the
1120 * wrong result soon.
1124 hwa = hwi = WEIGHT_ONE;
1125 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1126 struct ioc_gq *parent = iocg->ancestors[lvl];
1127 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1128 u64 active_sum = READ_ONCE(parent->child_active_sum);
1129 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1130 u32 active = READ_ONCE(child->active);
1131 u32 inuse = READ_ONCE(child->inuse);
1133 /* we can race with deactivations and either may read as zero */
1134 if (!active_sum || !inuse_sum)
1137 active_sum = max_t(u64, active, active_sum);
1138 hwa = div64_u64((u64)hwa * active, active_sum);
1140 inuse_sum = max_t(u64, inuse, inuse_sum);
1141 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1144 iocg->hweight_active = max_t(u32, hwa, 1);
1145 iocg->hweight_inuse = max_t(u32, hwi, 1);
1146 iocg->hweight_gen = ioc_gen;
1149 *hw_activep = iocg->hweight_active;
1151 *hw_inusep = iocg->hweight_inuse;
1155 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1156 * other weights stay unchanged.
1158 static u32 current_hweight_max(struct ioc_gq *iocg)
1160 u32 hwm = WEIGHT_ONE;
1161 u32 inuse = iocg->active;
1162 u64 child_inuse_sum;
1165 lockdep_assert_held(&iocg->ioc->lock);
1167 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1168 struct ioc_gq *parent = iocg->ancestors[lvl];
1169 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1171 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1172 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1173 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1174 parent->child_active_sum);
1177 return max_t(u32, hwm, 1);
1180 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1182 struct ioc *ioc = iocg->ioc;
1183 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1184 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1187 lockdep_assert_held(&ioc->lock);
1189 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1190 if (weight != iocg->weight && iocg->active)
1191 propagate_weights(iocg, weight, iocg->inuse, true, now);
1192 iocg->weight = weight;
1195 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1197 struct ioc *ioc = iocg->ioc;
1198 u64 last_period, cur_period;
1203 * If seem to be already active, just update the stamp to tell the
1204 * timer that we're still active. We don't mind occassional races.
1206 if (!list_empty(&iocg->active_list)) {
1208 cur_period = atomic64_read(&ioc->cur_period);
1209 if (atomic64_read(&iocg->active_period) != cur_period)
1210 atomic64_set(&iocg->active_period, cur_period);
1214 /* racy check on internal node IOs, treat as root level IOs */
1215 if (iocg->child_active_sum)
1218 spin_lock_irq(&ioc->lock);
1223 cur_period = atomic64_read(&ioc->cur_period);
1224 last_period = atomic64_read(&iocg->active_period);
1225 atomic64_set(&iocg->active_period, cur_period);
1227 /* already activated or breaking leaf-only constraint? */
1228 if (!list_empty(&iocg->active_list))
1229 goto succeed_unlock;
1230 for (i = iocg->level - 1; i > 0; i--)
1231 if (!list_empty(&iocg->ancestors[i]->active_list))
1234 if (iocg->child_active_sum)
1238 * Always start with the target budget. On deactivation, we throw away
1239 * anything above it.
1241 vtarget = now->vnow - ioc->margins.target;
1242 vtime = atomic64_read(&iocg->vtime);
1244 atomic64_add(vtarget - vtime, &iocg->vtime);
1245 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1249 * Activate, propagate weight and start period timer if not
1250 * running. Reset hweight_gen to avoid accidental match from
1253 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1254 list_add(&iocg->active_list, &ioc->active_iocgs);
1256 propagate_weights(iocg, iocg->weight,
1257 iocg->last_inuse ?: iocg->weight, true, now);
1259 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1260 last_period, cur_period, vtime);
1262 iocg->activated_at = now->now;
1264 if (ioc->running == IOC_IDLE) {
1265 ioc->running = IOC_RUNNING;
1266 ioc->dfgv_period_at = now->now;
1267 ioc->dfgv_period_rem = 0;
1268 ioc_start_period(ioc, now);
1272 spin_unlock_irq(&ioc->lock);
1276 spin_unlock_irq(&ioc->lock);
1280 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1282 struct ioc *ioc = iocg->ioc;
1283 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1284 u64 tdelta, delay, new_delay;
1285 s64 vover, vover_pct;
1288 lockdep_assert_held(&iocg->waitq.lock);
1290 /* calculate the current delay in effect - 1/2 every second */
1291 tdelta = now->now - iocg->delay_at;
1293 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1297 /* calculate the new delay from the debt amount */
1298 current_hweight(iocg, &hwa, NULL);
1299 vover = atomic64_read(&iocg->vtime) +
1300 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1301 vover_pct = div64_s64(100 * vover,
1302 ioc->period_us * ioc->vtime_base_rate);
1304 if (vover_pct <= MIN_DELAY_THR_PCT)
1306 else if (vover_pct >= MAX_DELAY_THR_PCT)
1307 new_delay = MAX_DELAY;
1309 new_delay = MIN_DELAY +
1310 div_u64((MAX_DELAY - MIN_DELAY) *
1311 (vover_pct - MIN_DELAY_THR_PCT),
1312 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1314 /* pick the higher one and apply */
1315 if (new_delay > delay) {
1316 iocg->delay = new_delay;
1317 iocg->delay_at = now->now;
1321 if (delay >= MIN_DELAY) {
1322 if (!iocg->indelay_since)
1323 iocg->indelay_since = now->now;
1324 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1327 if (iocg->indelay_since) {
1328 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1329 iocg->indelay_since = 0;
1332 blkcg_clear_delay(blkg);
1337 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1338 struct ioc_now *now)
1340 struct iocg_pcpu_stat *gcs;
1342 lockdep_assert_held(&iocg->ioc->lock);
1343 lockdep_assert_held(&iocg->waitq.lock);
1344 WARN_ON_ONCE(list_empty(&iocg->active_list));
1347 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1348 * inuse donating all of it share to others until its debt is paid off.
1350 if (!iocg->abs_vdebt && abs_cost) {
1351 iocg->indebt_since = now->now;
1352 propagate_weights(iocg, iocg->active, 0, false, now);
1355 iocg->abs_vdebt += abs_cost;
1357 gcs = get_cpu_ptr(iocg->pcpu_stat);
1358 local64_add(abs_cost, &gcs->abs_vusage);
1362 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1363 struct ioc_now *now)
1365 lockdep_assert_held(&iocg->ioc->lock);
1366 lockdep_assert_held(&iocg->waitq.lock);
1368 /* make sure that nobody messed with @iocg */
1369 WARN_ON_ONCE(list_empty(&iocg->active_list));
1370 WARN_ON_ONCE(iocg->inuse > 1);
1372 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1374 /* if debt is paid in full, restore inuse */
1375 if (!iocg->abs_vdebt) {
1376 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1377 iocg->indebt_since = 0;
1379 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1384 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1385 int flags, void *key)
1387 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1388 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1389 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1391 ctx->vbudget -= cost;
1393 if (ctx->vbudget < 0)
1396 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1399 * autoremove_wake_function() removes the wait entry only when it
1400 * actually changed the task state. We want the wait always
1401 * removed. Remove explicitly and use default_wake_function().
1403 list_del_init(&wq_entry->entry);
1404 wait->committed = true;
1406 default_wake_function(wq_entry, mode, flags, key);
1411 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1412 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1413 * addition to iocg->waitq.lock.
1415 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1416 struct ioc_now *now)
1418 struct ioc *ioc = iocg->ioc;
1419 struct iocg_wake_ctx ctx = { .iocg = iocg };
1420 u64 vshortage, expires, oexpires;
1424 lockdep_assert_held(&iocg->waitq.lock);
1426 current_hweight(iocg, &hwa, NULL);
1427 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1430 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1431 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1432 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1433 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1435 lockdep_assert_held(&ioc->lock);
1437 atomic64_add(vpay, &iocg->vtime);
1438 atomic64_add(vpay, &iocg->done_vtime);
1439 iocg_pay_debt(iocg, abs_vpay, now);
1443 if (iocg->abs_vdebt || iocg->delay)
1444 iocg_kick_delay(iocg, now);
1447 * Debt can still be outstanding if we haven't paid all yet or the
1448 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1449 * under debt. Make sure @vbudget reflects the outstanding amount and is
1452 if (iocg->abs_vdebt) {
1453 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1454 vbudget = min_t(s64, 0, vbudget - vdebt);
1458 * Wake up the ones which are due and see how much vtime we'll need for
1459 * the next one. As paying off debt restores hw_inuse, it must be read
1460 * after the above debt payment.
1462 ctx.vbudget = vbudget;
1463 current_hweight(iocg, NULL, &ctx.hw_inuse);
1465 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1467 if (!waitqueue_active(&iocg->waitq)) {
1468 if (iocg->wait_since) {
1469 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1470 iocg->wait_since = 0;
1475 if (!iocg->wait_since)
1476 iocg->wait_since = now->now;
1478 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1481 /* determine next wakeup, add a timer margin to guarantee chunking */
1482 vshortage = -ctx.vbudget;
1483 expires = now->now_ns +
1484 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1486 expires += ioc->timer_slack_ns;
1488 /* if already active and close enough, don't bother */
1489 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1490 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1491 abs(oexpires - expires) <= ioc->timer_slack_ns)
1494 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1495 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1498 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1500 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1501 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1503 unsigned long flags;
1505 ioc_now(iocg->ioc, &now);
1507 iocg_lock(iocg, pay_debt, &flags);
1508 iocg_kick_waitq(iocg, pay_debt, &now);
1509 iocg_unlock(iocg, pay_debt, &flags);
1511 return HRTIMER_NORESTART;
1514 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1516 u32 nr_met[2] = { };
1517 u32 nr_missed[2] = { };
1521 for_each_online_cpu(cpu) {
1522 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1523 u64 this_rq_wait_ns;
1525 for (rw = READ; rw <= WRITE; rw++) {
1526 u32 this_met = local_read(&stat->missed[rw].nr_met);
1527 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1529 nr_met[rw] += this_met - stat->missed[rw].last_met;
1530 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1531 stat->missed[rw].last_met = this_met;
1532 stat->missed[rw].last_missed = this_missed;
1535 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1536 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1537 stat->last_rq_wait_ns = this_rq_wait_ns;
1540 for (rw = READ; rw <= WRITE; rw++) {
1541 if (nr_met[rw] + nr_missed[rw])
1543 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1544 nr_met[rw] + nr_missed[rw]);
1546 missed_ppm_ar[rw] = 0;
1549 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1550 ioc->period_us * NSEC_PER_USEC);
1553 /* was iocg idle this period? */
1554 static bool iocg_is_idle(struct ioc_gq *iocg)
1556 struct ioc *ioc = iocg->ioc;
1558 /* did something get issued this period? */
1559 if (atomic64_read(&iocg->active_period) ==
1560 atomic64_read(&ioc->cur_period))
1563 /* is something in flight? */
1564 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1571 * Call this function on the target leaf @iocg's to build pre-order traversal
1572 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1573 * ->walk_list and the caller is responsible for dissolving the list after use.
1575 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1576 struct list_head *inner_walk)
1580 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1582 /* find the first ancestor which hasn't been visited yet */
1583 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1584 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1588 /* walk down and visit the inner nodes to get pre-order traversal */
1589 while (++lvl <= iocg->level - 1) {
1590 struct ioc_gq *inner = iocg->ancestors[lvl];
1592 /* record traversal order */
1593 list_add_tail(&inner->walk_list, inner_walk);
1597 /* collect per-cpu counters and propagate the deltas to the parent */
1598 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1600 struct ioc *ioc = iocg->ioc;
1601 struct iocg_stat new_stat;
1606 lockdep_assert_held(&iocg->ioc->lock);
1608 /* collect per-cpu counters */
1609 for_each_possible_cpu(cpu) {
1610 abs_vusage += local64_read(
1611 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1613 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1614 iocg->last_stat_abs_vusage = abs_vusage;
1616 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1617 iocg->local_stat.usage_us += iocg->usage_delta_us;
1619 /* propagate upwards */
1621 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1623 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1624 new_stat.indebt_us =
1625 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1626 new_stat.indelay_us =
1627 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1629 /* propagate the deltas to the parent */
1630 if (iocg->level > 0) {
1631 struct iocg_stat *parent_stat =
1632 &iocg->ancestors[iocg->level - 1]->desc_stat;
1634 parent_stat->usage_us +=
1635 new_stat.usage_us - iocg->last_stat.usage_us;
1636 parent_stat->wait_us +=
1637 new_stat.wait_us - iocg->last_stat.wait_us;
1638 parent_stat->indebt_us +=
1639 new_stat.indebt_us - iocg->last_stat.indebt_us;
1640 parent_stat->indelay_us +=
1641 new_stat.indelay_us - iocg->last_stat.indelay_us;
1644 iocg->last_stat = new_stat;
1647 /* get stat counters ready for reading on all active iocgs */
1648 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1650 LIST_HEAD(inner_walk);
1651 struct ioc_gq *iocg, *tiocg;
1653 /* flush leaves and build inner node walk list */
1654 list_for_each_entry(iocg, target_iocgs, active_list) {
1655 iocg_flush_stat_one(iocg, now);
1656 iocg_build_inner_walk(iocg, &inner_walk);
1659 /* keep flushing upwards by walking the inner list backwards */
1660 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1661 iocg_flush_stat_one(iocg, now);
1662 list_del_init(&iocg->walk_list);
1667 * Determine what @iocg's hweight_inuse should be after donating unused
1668 * capacity. @hwm is the upper bound and used to signal no donation. This
1669 * function also throws away @iocg's excess budget.
1671 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1672 u32 usage, struct ioc_now *now)
1674 struct ioc *ioc = iocg->ioc;
1675 u64 vtime = atomic64_read(&iocg->vtime);
1676 s64 excess, delta, target, new_hwi;
1678 /* debt handling owns inuse for debtors */
1679 if (iocg->abs_vdebt)
1682 /* see whether minimum margin requirement is met */
1683 if (waitqueue_active(&iocg->waitq) ||
1684 time_after64(vtime, now->vnow - ioc->margins.min))
1687 /* throw away excess above target */
1688 excess = now->vnow - vtime - ioc->margins.target;
1690 atomic64_add(excess, &iocg->vtime);
1691 atomic64_add(excess, &iocg->done_vtime);
1693 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1697 * Let's say the distance between iocg's and device's vtimes as a
1698 * fraction of period duration is delta. Assuming that the iocg will
1699 * consume the usage determined above, we want to determine new_hwi so
1700 * that delta equals MARGIN_TARGET at the end of the next period.
1702 * We need to execute usage worth of IOs while spending the sum of the
1703 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1706 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1708 * Therefore, the new_hwi is:
1710 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1712 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1713 now->vnow - ioc->period_at_vtime);
1714 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1715 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1717 return clamp_t(s64, new_hwi, 1, hwm);
1721 * For work-conservation, an iocg which isn't using all of its share should
1722 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1723 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1725 * #1 is mathematically simpler but has the drawback of requiring synchronous
1726 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1727 * change due to donation snapbacks as it has the possibility of grossly
1728 * overshooting what's allowed by the model and vrate.
1730 * #2 is inherently safe with local operations. The donating iocg can easily
1731 * snap back to higher weights when needed without worrying about impacts on
1732 * other nodes as the impacts will be inherently correct. This also makes idle
1733 * iocg activations safe. The only effect activations have is decreasing
1734 * hweight_inuse of others, the right solution to which is for those iocgs to
1735 * snap back to higher weights.
1737 * So, we go with #2. The challenge is calculating how each donating iocg's
1738 * inuse should be adjusted to achieve the target donation amounts. This is done
1739 * using Andy's method described in the following pdf.
1741 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1743 * Given the weights and target after-donation hweight_inuse values, Andy's
1744 * method determines how the proportional distribution should look like at each
1745 * sibling level to maintain the relative relationship between all non-donating
1746 * pairs. To roughly summarize, it divides the tree into donating and
1747 * non-donating parts, calculates global donation rate which is used to
1748 * determine the target hweight_inuse for each node, and then derives per-level
1751 * The following pdf shows that global distribution calculated this way can be
1752 * achieved by scaling inuse weights of donating leaves and propagating the
1753 * adjustments upwards proportionally.
1755 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1757 * Combining the above two, we can determine how each leaf iocg's inuse should
1758 * be adjusted to achieve the target donation.
1760 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1762 * The inline comments use symbols from the last pdf.
1764 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1765 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1766 * t is the sum of the absolute budgets of donating nodes in the subtree.
1767 * w is the weight of the node. w = w_f + w_t
1768 * w_f is the non-donating portion of w. w_f = w * f / b
1769 * w_b is the donating portion of w. w_t = w * t / b
1770 * s is the sum of all sibling weights. s = Sum(w) for siblings
1771 * s_f and s_t are the non-donating and donating portions of s.
1773 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1774 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1775 * after adjustments. Subscript r denotes the root node's values.
1777 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1779 LIST_HEAD(over_hwa);
1780 LIST_HEAD(inner_walk);
1781 struct ioc_gq *iocg, *tiocg, *root_iocg;
1782 u32 after_sum, over_sum, over_target, gamma;
1785 * It's pretty unlikely but possible for the total sum of
1786 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1787 * confuse the following calculations. If such condition is detected,
1788 * scale down everyone over its full share equally to keep the sum below
1793 list_for_each_entry(iocg, surpluses, surplus_list) {
1796 current_hweight(iocg, &hwa, NULL);
1797 after_sum += iocg->hweight_after_donation;
1799 if (iocg->hweight_after_donation > hwa) {
1800 over_sum += iocg->hweight_after_donation;
1801 list_add(&iocg->walk_list, &over_hwa);
1805 if (after_sum >= WEIGHT_ONE) {
1807 * The delta should be deducted from the over_sum, calculate
1808 * target over_sum value.
1810 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1811 WARN_ON_ONCE(over_sum <= over_delta);
1812 over_target = over_sum - over_delta;
1817 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1819 iocg->hweight_after_donation =
1820 div_u64((u64)iocg->hweight_after_donation *
1821 over_target, over_sum);
1822 list_del_init(&iocg->walk_list);
1826 * Build pre-order inner node walk list and prepare for donation
1827 * adjustment calculations.
1829 list_for_each_entry(iocg, surpluses, surplus_list) {
1830 iocg_build_inner_walk(iocg, &inner_walk);
1833 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1834 WARN_ON_ONCE(root_iocg->level > 0);
1836 list_for_each_entry(iocg, &inner_walk, walk_list) {
1837 iocg->child_adjusted_sum = 0;
1838 iocg->hweight_donating = 0;
1839 iocg->hweight_after_donation = 0;
1843 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1846 list_for_each_entry(iocg, surpluses, surplus_list) {
1847 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1849 parent->hweight_donating += iocg->hweight_donating;
1850 parent->hweight_after_donation += iocg->hweight_after_donation;
1853 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1854 if (iocg->level > 0) {
1855 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1857 parent->hweight_donating += iocg->hweight_donating;
1858 parent->hweight_after_donation += iocg->hweight_after_donation;
1863 * Calculate inner hwa's (b) and make sure the donation values are
1864 * within the accepted ranges as we're doing low res calculations with
1867 list_for_each_entry(iocg, &inner_walk, walk_list) {
1869 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1871 iocg->hweight_active = DIV64_U64_ROUND_UP(
1872 (u64)parent->hweight_active * iocg->active,
1873 parent->child_active_sum);
1877 iocg->hweight_donating = min(iocg->hweight_donating,
1878 iocg->hweight_active);
1879 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1880 iocg->hweight_donating - 1);
1881 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1882 iocg->hweight_donating <= 1 ||
1883 iocg->hweight_after_donation == 0)) {
1884 pr_warn("iocg: invalid donation weights in ");
1885 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1886 pr_cont(": active=%u donating=%u after=%u\n",
1887 iocg->hweight_active, iocg->hweight_donating,
1888 iocg->hweight_after_donation);
1893 * Calculate the global donation rate (gamma) - the rate to adjust
1894 * non-donating budgets by.
1896 * No need to use 64bit multiplication here as the first operand is
1897 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1899 * We know that there are beneficiary nodes and the sum of the donating
1900 * hweights can't be whole; however, due to the round-ups during hweight
1901 * calculations, root_iocg->hweight_donating might still end up equal to
1902 * or greater than whole. Limit the range when calculating the divider.
1904 * gamma = (1 - t_r') / (1 - t_r)
1906 gamma = DIV_ROUND_UP(
1907 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1908 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1911 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1914 list_for_each_entry(iocg, &inner_walk, walk_list) {
1915 struct ioc_gq *parent;
1916 u32 inuse, wpt, wptp;
1919 if (iocg->level == 0) {
1920 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1921 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1922 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1923 WEIGHT_ONE - iocg->hweight_after_donation);
1927 parent = iocg->ancestors[iocg->level - 1];
1929 /* b' = gamma * b_f + b_t' */
1930 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1931 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1932 WEIGHT_ONE) + iocg->hweight_after_donation;
1934 /* w' = s' * b' / b'_p */
1935 inuse = DIV64_U64_ROUND_UP(
1936 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1937 parent->hweight_inuse);
1939 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1940 st = DIV64_U64_ROUND_UP(
1941 iocg->child_active_sum * iocg->hweight_donating,
1942 iocg->hweight_active);
1943 sf = iocg->child_active_sum - st;
1944 wpt = DIV64_U64_ROUND_UP(
1945 (u64)iocg->active * iocg->hweight_donating,
1946 iocg->hweight_active);
1947 wptp = DIV64_U64_ROUND_UP(
1948 (u64)inuse * iocg->hweight_after_donation,
1949 iocg->hweight_inuse);
1951 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1955 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1956 * we can finally determine leaf adjustments.
1958 list_for_each_entry(iocg, surpluses, surplus_list) {
1959 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1963 * In-debt iocgs participated in the donation calculation with
1964 * the minimum target hweight_inuse. Configuring inuse
1965 * accordingly would work fine but debt handling expects
1966 * @iocg->inuse stay at the minimum and we don't wanna
1969 if (iocg->abs_vdebt) {
1970 WARN_ON_ONCE(iocg->inuse > 1);
1974 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1975 inuse = DIV64_U64_ROUND_UP(
1976 parent->child_adjusted_sum * iocg->hweight_after_donation,
1977 parent->hweight_inuse);
1979 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1981 iocg->hweight_inuse,
1982 iocg->hweight_after_donation);
1984 __propagate_weights(iocg, iocg->active, inuse, true, now);
1987 /* walk list should be dissolved after use */
1988 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
1989 list_del_init(&iocg->walk_list);
1993 * A low weight iocg can amass a large amount of debt, for example, when
1994 * anonymous memory gets reclaimed aggressively. If the system has a lot of
1995 * memory paired with a slow IO device, the debt can span multiple seconds or
1996 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
1997 * up blocked paying its debt while the IO device is idle.
1999 * The following protects against such cases. If the device has been
2000 * sufficiently idle for a while, the debts are halved and delays are
2003 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2004 struct ioc_now *now)
2006 struct ioc_gq *iocg;
2007 u64 dur, usage_pct, nr_cycles;
2009 /* if no debtor, reset the cycle */
2011 ioc->dfgv_period_at = now->now;
2012 ioc->dfgv_period_rem = 0;
2013 ioc->dfgv_usage_us_sum = 0;
2018 * Debtors can pass through a lot of writes choking the device and we
2019 * don't want to be forgiving debts while the device is struggling from
2020 * write bursts. If we're missing latency targets, consider the device
2023 if (ioc->busy_level > 0)
2024 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2026 ioc->dfgv_usage_us_sum += usage_us_sum;
2027 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2031 * At least DFGV_PERIOD has passed since the last period. Calculate the
2032 * average usage and reset the period counters.
2034 dur = now->now - ioc->dfgv_period_at;
2035 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2037 ioc->dfgv_period_at = now->now;
2038 ioc->dfgv_usage_us_sum = 0;
2040 /* if was too busy, reset everything */
2041 if (usage_pct > DFGV_USAGE_PCT) {
2042 ioc->dfgv_period_rem = 0;
2047 * Usage is lower than threshold. Let's forgive some debts. Debt
2048 * forgiveness runs off of the usual ioc timer but its period usually
2049 * doesn't match ioc's. Compensate the difference by performing the
2050 * reduction as many times as would fit in the duration since the last
2051 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2052 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2053 * reductions is doubled.
2055 nr_cycles = dur + ioc->dfgv_period_rem;
2056 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2058 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2059 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2061 if (!iocg->abs_vdebt && !iocg->delay)
2064 spin_lock(&iocg->waitq.lock);
2066 old_debt = iocg->abs_vdebt;
2067 old_delay = iocg->delay;
2069 if (iocg->abs_vdebt)
2070 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2072 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2074 iocg_kick_waitq(iocg, true, now);
2076 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2077 old_debt, iocg->abs_vdebt,
2078 old_delay, iocg->delay);
2080 spin_unlock(&iocg->waitq.lock);
2084 static void ioc_timer_fn(struct timer_list *timer)
2086 struct ioc *ioc = container_of(timer, struct ioc, timer);
2087 struct ioc_gq *iocg, *tiocg;
2089 LIST_HEAD(surpluses);
2090 int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
2091 u64 usage_us_sum = 0;
2092 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2093 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2094 u32 missed_ppm[2], rq_wait_pct;
2096 int prev_busy_level;
2098 /* how were the latencies during the period? */
2099 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2101 /* take care of active iocgs */
2102 spin_lock_irq(&ioc->lock);
2106 period_vtime = now.vnow - ioc->period_at_vtime;
2107 if (WARN_ON_ONCE(!period_vtime)) {
2108 spin_unlock_irq(&ioc->lock);
2113 * Waiters determine the sleep durations based on the vrate they
2114 * saw at the time of sleep. If vrate has increased, some waiters
2115 * could be sleeping for too long. Wake up tardy waiters which
2116 * should have woken up in the last period and expire idle iocgs.
2118 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2119 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2120 !iocg->delay && !iocg_is_idle(iocg))
2123 spin_lock(&iocg->waitq.lock);
2125 /* flush wait and indebt stat deltas */
2126 if (iocg->wait_since) {
2127 iocg->local_stat.wait_us += now.now - iocg->wait_since;
2128 iocg->wait_since = now.now;
2130 if (iocg->indebt_since) {
2131 iocg->local_stat.indebt_us +=
2132 now.now - iocg->indebt_since;
2133 iocg->indebt_since = now.now;
2135 if (iocg->indelay_since) {
2136 iocg->local_stat.indelay_us +=
2137 now.now - iocg->indelay_since;
2138 iocg->indelay_since = now.now;
2141 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2143 /* might be oversleeping vtime / hweight changes, kick */
2144 iocg_kick_waitq(iocg, true, &now);
2145 if (iocg->abs_vdebt || iocg->delay)
2147 } else if (iocg_is_idle(iocg)) {
2148 /* no waiter and idle, deactivate */
2149 u64 vtime = atomic64_read(&iocg->vtime);
2153 * @iocg has been inactive for a full duration and will
2154 * have a high budget. Account anything above target as
2155 * error and throw away. On reactivation, it'll start
2156 * with the target budget.
2158 excess = now.vnow - vtime - ioc->margins.target;
2162 current_hweight(iocg, NULL, &old_hwi);
2163 ioc->vtime_err -= div64_u64(excess * old_hwi,
2167 __propagate_weights(iocg, 0, 0, false, &now);
2168 list_del_init(&iocg->active_list);
2171 spin_unlock(&iocg->waitq.lock);
2173 commit_weights(ioc);
2176 * Wait and indebt stat are flushed above and the donation calculation
2177 * below needs updated usage stat. Let's bring stat up-to-date.
2179 iocg_flush_stat(&ioc->active_iocgs, &now);
2181 /* calc usage and see whether some weights need to be moved around */
2182 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2183 u64 vdone, vtime, usage_us, usage_dur;
2184 u32 usage, hw_active, hw_inuse;
2187 * Collect unused and wind vtime closer to vnow to prevent
2188 * iocgs from accumulating a large amount of budget.
2190 vdone = atomic64_read(&iocg->done_vtime);
2191 vtime = atomic64_read(&iocg->vtime);
2192 current_hweight(iocg, &hw_active, &hw_inuse);
2195 * Latency QoS detection doesn't account for IOs which are
2196 * in-flight for longer than a period. Detect them by
2197 * comparing vdone against period start. If lagging behind
2198 * IOs from past periods, don't increase vrate.
2200 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2201 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2202 time_after64(vtime, vdone) &&
2203 time_after64(vtime, now.vnow -
2204 MAX_LAGGING_PERIODS * period_vtime) &&
2205 time_before64(vdone, now.vnow - period_vtime))
2209 * Determine absolute usage factoring in in-flight IOs to avoid
2210 * high-latency completions appearing as idle.
2212 usage_us = iocg->usage_delta_us;
2213 usage_us_sum += usage_us;
2215 if (vdone != vtime) {
2216 u64 inflight_us = DIV64_U64_ROUND_UP(
2217 cost_to_abs_cost(vtime - vdone, hw_inuse),
2218 ioc->vtime_base_rate);
2219 usage_us = max(usage_us, inflight_us);
2222 /* convert to hweight based usage ratio */
2223 if (time_after64(iocg->activated_at, ioc->period_at))
2224 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2226 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2228 usage = clamp_t(u32,
2229 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2233 /* see whether there's surplus vtime */
2234 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2235 if (hw_inuse < hw_active ||
2236 (!waitqueue_active(&iocg->waitq) &&
2237 time_before64(vtime, now.vnow - ioc->margins.low))) {
2238 u32 hwa, old_hwi, hwm, new_hwi;
2241 * Already donating or accumulated enough to start.
2242 * Determine the donation amount.
2244 current_hweight(iocg, &hwa, &old_hwi);
2245 hwm = current_hweight_max(iocg);
2246 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2248 if (new_hwi < hwm) {
2249 iocg->hweight_donating = hwa;
2250 iocg->hweight_after_donation = new_hwi;
2251 list_add(&iocg->surplus_list, &surpluses);
2253 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2254 iocg->inuse, iocg->active,
2255 iocg->hweight_inuse, new_hwi);
2257 __propagate_weights(iocg, iocg->active,
2258 iocg->active, true, &now);
2262 /* genuinely short on vtime */
2267 if (!list_empty(&surpluses) && nr_shortages)
2268 transfer_surpluses(&surpluses, &now);
2270 commit_weights(ioc);
2272 /* surplus list should be dissolved after use */
2273 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2274 list_del_init(&iocg->surplus_list);
2277 * If q is getting clogged or we're missing too much, we're issuing
2278 * too much IO and should lower vtime rate. If we're not missing
2279 * and experiencing shortages but not surpluses, we're too stingy
2280 * and should increase vtime rate.
2282 prev_busy_level = ioc->busy_level;
2283 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2284 missed_ppm[READ] > ppm_rthr ||
2285 missed_ppm[WRITE] > ppm_wthr) {
2286 /* clearly missing QoS targets, slow down vrate */
2287 ioc->busy_level = max(ioc->busy_level, 0);
2289 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2290 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2291 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2292 /* QoS targets are being met with >25% margin */
2295 * We're throttling while the device has spare
2296 * capacity. If vrate was being slowed down, stop.
2298 ioc->busy_level = min(ioc->busy_level, 0);
2301 * If there are IOs spanning multiple periods, wait
2302 * them out before pushing the device harder.
2308 * Nobody is being throttled and the users aren't
2309 * issuing enough IOs to saturate the device. We
2310 * simply don't know how close the device is to
2311 * saturation. Coast.
2313 ioc->busy_level = 0;
2316 /* inside the hysterisis margin, we're good */
2317 ioc->busy_level = 0;
2320 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2322 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2323 u64 vrate = ioc->vtime_base_rate;
2324 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2326 /* rq_wait signal is always reliable, ignore user vrate_min */
2327 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2328 vrate_min = VRATE_MIN;
2331 * If vrate is out of bounds, apply clamp gradually as the
2332 * bounds can change abruptly. Otherwise, apply busy_level
2335 if (vrate < vrate_min) {
2336 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2338 vrate = min(vrate, vrate_min);
2339 } else if (vrate > vrate_max) {
2340 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2342 vrate = max(vrate, vrate_max);
2344 int idx = min_t(int, abs(ioc->busy_level),
2345 ARRAY_SIZE(vrate_adj_pct) - 1);
2346 u32 adj_pct = vrate_adj_pct[idx];
2348 if (ioc->busy_level > 0)
2349 adj_pct = 100 - adj_pct;
2351 adj_pct = 100 + adj_pct;
2353 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2354 vrate_min, vrate_max);
2357 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2358 nr_lagging, nr_shortages);
2360 ioc->vtime_base_rate = vrate;
2361 ioc_refresh_margins(ioc);
2362 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2363 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2364 missed_ppm, rq_wait_pct, nr_lagging,
2368 ioc_refresh_params(ioc, false);
2370 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2373 * This period is done. Move onto the next one. If nothing's
2374 * going on with the device, stop the timer.
2376 atomic64_inc(&ioc->cur_period);
2378 if (ioc->running != IOC_STOP) {
2379 if (!list_empty(&ioc->active_iocgs)) {
2380 ioc_start_period(ioc, &now);
2382 ioc->busy_level = 0;
2384 ioc->running = IOC_IDLE;
2387 ioc_refresh_vrate(ioc, &now);
2390 spin_unlock_irq(&ioc->lock);
2393 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2394 u64 abs_cost, struct ioc_now *now)
2396 struct ioc *ioc = iocg->ioc;
2397 struct ioc_margins *margins = &ioc->margins;
2398 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2401 u64 cost, new_inuse;
2403 current_hweight(iocg, NULL, &hwi);
2405 cost = abs_cost_to_cost(abs_cost, hwi);
2406 margin = now->vnow - vtime - cost;
2408 /* debt handling owns inuse for debtors */
2409 if (iocg->abs_vdebt)
2413 * We only increase inuse during period and do so iff the margin has
2414 * deteriorated since the previous adjustment.
2416 if (margin >= iocg->saved_margin || margin >= margins->low ||
2417 iocg->inuse == iocg->active)
2420 spin_lock_irq(&ioc->lock);
2422 /* we own inuse only when @iocg is in the normal active state */
2423 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2424 spin_unlock_irq(&ioc->lock);
2429 * Bump up inuse till @abs_cost fits in the existing budget.
2430 * adj_step must be determined after acquiring ioc->lock - we might
2431 * have raced and lost to another thread for activation and could
2432 * be reading 0 iocg->active before ioc->lock which will lead to
2435 new_inuse = iocg->inuse;
2436 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2438 new_inuse = new_inuse + adj_step;
2439 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2440 current_hweight(iocg, NULL, &hwi);
2441 cost = abs_cost_to_cost(abs_cost, hwi);
2442 } while (time_after64(vtime + cost, now->vnow) &&
2443 iocg->inuse != iocg->active);
2445 spin_unlock_irq(&ioc->lock);
2447 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2448 old_inuse, iocg->inuse, old_hwi, hwi);
2453 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2454 bool is_merge, u64 *costp)
2456 struct ioc *ioc = iocg->ioc;
2457 u64 coef_seqio, coef_randio, coef_page;
2458 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2462 switch (bio_op(bio)) {
2464 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2465 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2466 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2469 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2470 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2471 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2478 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2479 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2483 if (seek_pages > LCOEF_RANDIO_PAGES) {
2484 cost += coef_randio;
2489 cost += pages * coef_page;
2494 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2498 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2502 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2505 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2507 switch (req_op(rq)) {
2509 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2512 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2519 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2523 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2527 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2529 struct blkcg_gq *blkg = bio->bi_blkg;
2530 struct ioc *ioc = rqos_to_ioc(rqos);
2531 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2533 struct iocg_wait wait;
2534 u64 abs_cost, cost, vtime;
2535 bool use_debt, ioc_locked;
2536 unsigned long flags;
2538 /* bypass IOs if disabled, still initializing, or for root cgroup */
2539 if (!ioc->enabled || !iocg || !iocg->level)
2542 /* calculate the absolute vtime cost */
2543 abs_cost = calc_vtime_cost(bio, iocg, false);
2547 if (!iocg_activate(iocg, &now))
2550 iocg->cursor = bio_end_sector(bio);
2551 vtime = atomic64_read(&iocg->vtime);
2552 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2555 * If no one's waiting and within budget, issue right away. The
2556 * tests are racy but the races aren't systemic - we only miss once
2557 * in a while which is fine.
2559 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2560 time_before_eq64(vtime + cost, now.vnow)) {
2561 iocg_commit_bio(iocg, bio, abs_cost, cost);
2566 * We're over budget. This can be handled in two ways. IOs which may
2567 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2568 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2569 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2570 * whether debt handling is needed and acquire locks accordingly.
2572 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2573 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2575 iocg_lock(iocg, ioc_locked, &flags);
2578 * @iocg must stay activated for debt and waitq handling. Deactivation
2579 * is synchronized against both ioc->lock and waitq.lock and we won't
2580 * get deactivated as long as we're waiting or has debt, so we're good
2581 * if we're activated here. In the unlikely cases that we aren't, just
2584 if (unlikely(list_empty(&iocg->active_list))) {
2585 iocg_unlock(iocg, ioc_locked, &flags);
2586 iocg_commit_bio(iocg, bio, abs_cost, cost);
2591 * We're over budget. If @bio has to be issued regardless, remember
2592 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2593 * off the debt before waking more IOs.
2595 * This way, the debt is continuously paid off each period with the
2596 * actual budget available to the cgroup. If we just wound vtime, we
2597 * would incorrectly use the current hw_inuse for the entire amount
2598 * which, for example, can lead to the cgroup staying blocked for a
2599 * long time even with substantially raised hw_inuse.
2601 * An iocg with vdebt should stay online so that the timer can keep
2602 * deducting its vdebt and [de]activate use_delay mechanism
2603 * accordingly. We don't want to race against the timer trying to
2604 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2605 * penalizing the cgroup and its descendants.
2608 iocg_incur_debt(iocg, abs_cost, &now);
2609 if (iocg_kick_delay(iocg, &now))
2610 blkcg_schedule_throttle(rqos->q,
2611 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2612 iocg_unlock(iocg, ioc_locked, &flags);
2616 /* guarantee that iocgs w/ waiters have maximum inuse */
2617 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2619 iocg_unlock(iocg, false, &flags);
2623 propagate_weights(iocg, iocg->active, iocg->active, true,
2628 * Append self to the waitq and schedule the wakeup timer if we're
2629 * the first waiter. The timer duration is calculated based on the
2630 * current vrate. vtime and hweight changes can make it too short
2631 * or too long. Each wait entry records the absolute cost it's
2632 * waiting for to allow re-evaluation using a custom wait entry.
2634 * If too short, the timer simply reschedules itself. If too long,
2635 * the period timer will notice and trigger wakeups.
2637 * All waiters are on iocg->waitq and the wait states are
2638 * synchronized using waitq.lock.
2640 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2641 wait.wait.private = current;
2643 wait.abs_cost = abs_cost;
2644 wait.committed = false; /* will be set true by waker */
2646 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2647 iocg_kick_waitq(iocg, ioc_locked, &now);
2649 iocg_unlock(iocg, ioc_locked, &flags);
2652 set_current_state(TASK_UNINTERRUPTIBLE);
2658 /* waker already committed us, proceed */
2659 finish_wait(&iocg->waitq, &wait.wait);
2662 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2665 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2666 struct ioc *ioc = rqos_to_ioc(rqos);
2667 sector_t bio_end = bio_end_sector(bio);
2669 u64 vtime, abs_cost, cost;
2670 unsigned long flags;
2672 /* bypass if disabled, still initializing, or for root cgroup */
2673 if (!ioc->enabled || !iocg || !iocg->level)
2676 abs_cost = calc_vtime_cost(bio, iocg, true);
2682 vtime = atomic64_read(&iocg->vtime);
2683 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2685 /* update cursor if backmerging into the request at the cursor */
2686 if (blk_rq_pos(rq) < bio_end &&
2687 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2688 iocg->cursor = bio_end;
2691 * Charge if there's enough vtime budget and the existing request has
2694 if (rq->bio && rq->bio->bi_iocost_cost &&
2695 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2696 iocg_commit_bio(iocg, bio, abs_cost, cost);
2701 * Otherwise, account it as debt if @iocg is online, which it should
2702 * be for the vast majority of cases. See debt handling in
2703 * ioc_rqos_throttle() for details.
2705 spin_lock_irqsave(&ioc->lock, flags);
2706 spin_lock(&iocg->waitq.lock);
2708 if (likely(!list_empty(&iocg->active_list))) {
2709 iocg_incur_debt(iocg, abs_cost, &now);
2710 if (iocg_kick_delay(iocg, &now))
2711 blkcg_schedule_throttle(rqos->q,
2712 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2714 iocg_commit_bio(iocg, bio, abs_cost, cost);
2717 spin_unlock(&iocg->waitq.lock);
2718 spin_unlock_irqrestore(&ioc->lock, flags);
2721 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2723 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2725 if (iocg && bio->bi_iocost_cost)
2726 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2729 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2731 struct ioc *ioc = rqos_to_ioc(rqos);
2732 struct ioc_pcpu_stat *ccs;
2733 u64 on_q_ns, rq_wait_ns, size_nsec;
2736 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2739 switch (req_op(rq) & REQ_OP_MASK) {
2752 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2753 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2754 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2756 ccs = get_cpu_ptr(ioc->pcpu_stat);
2758 if (on_q_ns <= size_nsec ||
2759 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2760 local_inc(&ccs->missed[rw].nr_met);
2762 local_inc(&ccs->missed[rw].nr_missed);
2764 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2769 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2771 struct ioc *ioc = rqos_to_ioc(rqos);
2773 spin_lock_irq(&ioc->lock);
2774 ioc_refresh_params(ioc, false);
2775 spin_unlock_irq(&ioc->lock);
2778 static void ioc_rqos_exit(struct rq_qos *rqos)
2780 struct ioc *ioc = rqos_to_ioc(rqos);
2782 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2784 spin_lock_irq(&ioc->lock);
2785 ioc->running = IOC_STOP;
2786 spin_unlock_irq(&ioc->lock);
2788 del_timer_sync(&ioc->timer);
2789 free_percpu(ioc->pcpu_stat);
2793 static struct rq_qos_ops ioc_rqos_ops = {
2794 .throttle = ioc_rqos_throttle,
2795 .merge = ioc_rqos_merge,
2796 .done_bio = ioc_rqos_done_bio,
2797 .done = ioc_rqos_done,
2798 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2799 .exit = ioc_rqos_exit,
2802 static int blk_iocost_init(struct request_queue *q)
2805 struct rq_qos *rqos;
2808 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2812 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2813 if (!ioc->pcpu_stat) {
2818 for_each_possible_cpu(cpu) {
2819 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2821 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2822 local_set(&ccs->missed[i].nr_met, 0);
2823 local_set(&ccs->missed[i].nr_missed, 0);
2825 local64_set(&ccs->rq_wait_ns, 0);
2829 rqos->id = RQ_QOS_COST;
2830 rqos->ops = &ioc_rqos_ops;
2833 spin_lock_init(&ioc->lock);
2834 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2835 INIT_LIST_HEAD(&ioc->active_iocgs);
2837 ioc->running = IOC_IDLE;
2838 ioc->vtime_base_rate = VTIME_PER_USEC;
2839 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2840 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2841 ioc->period_at = ktime_to_us(ktime_get());
2842 atomic64_set(&ioc->cur_period, 0);
2843 atomic_set(&ioc->hweight_gen, 0);
2845 spin_lock_irq(&ioc->lock);
2846 ioc->autop_idx = AUTOP_INVALID;
2847 ioc_refresh_params(ioc, true);
2848 spin_unlock_irq(&ioc->lock);
2851 * rqos must be added before activation to allow iocg_pd_init() to
2852 * lookup the ioc from q. This means that the rqos methods may get
2853 * called before policy activation completion, can't assume that the
2854 * target bio has an iocg associated and need to test for NULL iocg.
2856 rq_qos_add(q, rqos);
2857 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2859 rq_qos_del(q, rqos);
2860 free_percpu(ioc->pcpu_stat);
2867 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2869 struct ioc_cgrp *iocc;
2871 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2875 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2879 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2881 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2884 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2885 struct blkcg *blkcg)
2887 int levels = blkcg->css.cgroup->level + 1;
2888 struct ioc_gq *iocg;
2890 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2894 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2895 if (!iocg->pcpu_stat) {
2903 static void ioc_pd_init(struct blkg_policy_data *pd)
2905 struct ioc_gq *iocg = pd_to_iocg(pd);
2906 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2907 struct ioc *ioc = q_to_ioc(blkg->q);
2909 struct blkcg_gq *tblkg;
2910 unsigned long flags;
2915 atomic64_set(&iocg->vtime, now.vnow);
2916 atomic64_set(&iocg->done_vtime, now.vnow);
2917 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2918 INIT_LIST_HEAD(&iocg->active_list);
2919 INIT_LIST_HEAD(&iocg->walk_list);
2920 INIT_LIST_HEAD(&iocg->surplus_list);
2921 iocg->hweight_active = WEIGHT_ONE;
2922 iocg->hweight_inuse = WEIGHT_ONE;
2924 init_waitqueue_head(&iocg->waitq);
2925 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2926 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2928 iocg->level = blkg->blkcg->css.cgroup->level;
2930 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2931 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2932 iocg->ancestors[tiocg->level] = tiocg;
2935 spin_lock_irqsave(&ioc->lock, flags);
2936 weight_updated(iocg, &now);
2937 spin_unlock_irqrestore(&ioc->lock, flags);
2940 static void ioc_pd_free(struct blkg_policy_data *pd)
2942 struct ioc_gq *iocg = pd_to_iocg(pd);
2943 struct ioc *ioc = iocg->ioc;
2944 unsigned long flags;
2947 spin_lock_irqsave(&ioc->lock, flags);
2949 if (!list_empty(&iocg->active_list)) {
2953 propagate_weights(iocg, 0, 0, false, &now);
2954 list_del_init(&iocg->active_list);
2957 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2958 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2960 spin_unlock_irqrestore(&ioc->lock, flags);
2962 hrtimer_cancel(&iocg->waitq_timer);
2964 free_percpu(iocg->pcpu_stat);
2968 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2970 struct ioc_gq *iocg = pd_to_iocg(pd);
2971 struct ioc *ioc = iocg->ioc;
2977 if (iocg->level == 0) {
2978 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
2979 ioc->vtime_base_rate * 10000,
2981 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
2982 vp10k / 100, vp10k % 100);
2985 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
2986 iocg->last_stat.usage_us);
2988 if (blkcg_debug_stats)
2989 pos += scnprintf(buf + pos, size - pos,
2990 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
2991 iocg->last_stat.wait_us,
2992 iocg->last_stat.indebt_us,
2993 iocg->last_stat.indelay_us);
2998 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3001 const char *dname = blkg_dev_name(pd->blkg);
3002 struct ioc_gq *iocg = pd_to_iocg(pd);
3004 if (dname && iocg->cfg_weight)
3005 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3010 static int ioc_weight_show(struct seq_file *sf, void *v)
3012 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3013 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3015 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3016 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3017 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3021 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3022 size_t nbytes, loff_t off)
3024 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3025 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3026 struct blkg_conf_ctx ctx;
3028 struct ioc_gq *iocg;
3032 if (!strchr(buf, ':')) {
3033 struct blkcg_gq *blkg;
3035 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3038 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3041 spin_lock(&blkcg->lock);
3042 iocc->dfl_weight = v * WEIGHT_ONE;
3043 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3044 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3047 spin_lock_irq(&iocg->ioc->lock);
3048 ioc_now(iocg->ioc, &now);
3049 weight_updated(iocg, &now);
3050 spin_unlock_irq(&iocg->ioc->lock);
3053 spin_unlock(&blkcg->lock);
3058 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3062 iocg = blkg_to_iocg(ctx.blkg);
3064 if (!strncmp(ctx.body, "default", 7)) {
3067 if (!sscanf(ctx.body, "%u", &v))
3069 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3073 spin_lock(&iocg->ioc->lock);
3074 iocg->cfg_weight = v * WEIGHT_ONE;
3075 ioc_now(iocg->ioc, &now);
3076 weight_updated(iocg, &now);
3077 spin_unlock(&iocg->ioc->lock);
3079 blkg_conf_finish(&ctx);
3083 blkg_conf_finish(&ctx);
3087 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3090 const char *dname = blkg_dev_name(pd->blkg);
3091 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3096 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3097 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3098 ioc->params.qos[QOS_RPPM] / 10000,
3099 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3100 ioc->params.qos[QOS_RLAT],
3101 ioc->params.qos[QOS_WPPM] / 10000,
3102 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3103 ioc->params.qos[QOS_WLAT],
3104 ioc->params.qos[QOS_MIN] / 10000,
3105 ioc->params.qos[QOS_MIN] % 10000 / 100,
3106 ioc->params.qos[QOS_MAX] / 10000,
3107 ioc->params.qos[QOS_MAX] % 10000 / 100);
3111 static int ioc_qos_show(struct seq_file *sf, void *v)
3113 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3115 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3116 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3120 static const match_table_t qos_ctrl_tokens = {
3121 { QOS_ENABLE, "enable=%u" },
3122 { QOS_CTRL, "ctrl=%s" },
3123 { NR_QOS_CTRL_PARAMS, NULL },
3126 static const match_table_t qos_tokens = {
3127 { QOS_RPPM, "rpct=%s" },
3128 { QOS_RLAT, "rlat=%u" },
3129 { QOS_WPPM, "wpct=%s" },
3130 { QOS_WLAT, "wlat=%u" },
3131 { QOS_MIN, "min=%s" },
3132 { QOS_MAX, "max=%s" },
3133 { NR_QOS_PARAMS, NULL },
3136 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3137 size_t nbytes, loff_t off)
3139 struct gendisk *disk;
3141 u32 qos[NR_QOS_PARAMS];
3146 disk = blkcg_conf_get_disk(&input);
3148 return PTR_ERR(disk);
3150 ioc = q_to_ioc(disk->queue);
3152 ret = blk_iocost_init(disk->queue);
3155 ioc = q_to_ioc(disk->queue);
3158 spin_lock_irq(&ioc->lock);
3159 memcpy(qos, ioc->params.qos, sizeof(qos));
3160 enable = ioc->enabled;
3161 user = ioc->user_qos_params;
3162 spin_unlock_irq(&ioc->lock);
3164 while ((p = strsep(&input, " \t\n"))) {
3165 substring_t args[MAX_OPT_ARGS];
3173 switch (match_token(p, qos_ctrl_tokens, args)) {
3175 match_u64(&args[0], &v);
3179 match_strlcpy(buf, &args[0], sizeof(buf));
3180 if (!strcmp(buf, "auto"))
3182 else if (!strcmp(buf, "user"))
3189 tok = match_token(p, qos_tokens, args);
3193 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3196 if (cgroup_parse_float(buf, 2, &v))
3198 if (v < 0 || v > 10000)
3204 if (match_u64(&args[0], &v))
3210 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3213 if (cgroup_parse_float(buf, 2, &v))
3217 qos[tok] = clamp_t(s64, v * 100,
3218 VRATE_MIN_PPM, VRATE_MAX_PPM);
3226 if (qos[QOS_MIN] > qos[QOS_MAX])
3229 spin_lock_irq(&ioc->lock);
3232 blk_stat_enable_accounting(ioc->rqos.q);
3233 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3234 ioc->enabled = true;
3236 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3237 ioc->enabled = false;
3241 memcpy(ioc->params.qos, qos, sizeof(qos));
3242 ioc->user_qos_params = true;
3244 ioc->user_qos_params = false;
3247 ioc_refresh_params(ioc, true);
3248 spin_unlock_irq(&ioc->lock);
3250 put_disk_and_module(disk);
3255 put_disk_and_module(disk);
3259 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3260 struct blkg_policy_data *pd, int off)
3262 const char *dname = blkg_dev_name(pd->blkg);
3263 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3264 u64 *u = ioc->params.i_lcoefs;
3269 seq_printf(sf, "%s ctrl=%s model=linear "
3270 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3271 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3272 dname, ioc->user_cost_model ? "user" : "auto",
3273 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3274 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3278 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3280 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3282 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3283 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3287 static const match_table_t cost_ctrl_tokens = {
3288 { COST_CTRL, "ctrl=%s" },
3289 { COST_MODEL, "model=%s" },
3290 { NR_COST_CTRL_PARAMS, NULL },
3293 static const match_table_t i_lcoef_tokens = {
3294 { I_LCOEF_RBPS, "rbps=%u" },
3295 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3296 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3297 { I_LCOEF_WBPS, "wbps=%u" },
3298 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3299 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3300 { NR_I_LCOEFS, NULL },
3303 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3304 size_t nbytes, loff_t off)
3306 struct gendisk *disk;
3313 disk = blkcg_conf_get_disk(&input);
3315 return PTR_ERR(disk);
3317 ioc = q_to_ioc(disk->queue);
3319 ret = blk_iocost_init(disk->queue);
3322 ioc = q_to_ioc(disk->queue);
3325 spin_lock_irq(&ioc->lock);
3326 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3327 user = ioc->user_cost_model;
3328 spin_unlock_irq(&ioc->lock);
3330 while ((p = strsep(&input, " \t\n"))) {
3331 substring_t args[MAX_OPT_ARGS];
3339 switch (match_token(p, cost_ctrl_tokens, args)) {
3341 match_strlcpy(buf, &args[0], sizeof(buf));
3342 if (!strcmp(buf, "auto"))
3344 else if (!strcmp(buf, "user"))
3350 match_strlcpy(buf, &args[0], sizeof(buf));
3351 if (strcmp(buf, "linear"))
3356 tok = match_token(p, i_lcoef_tokens, args);
3357 if (tok == NR_I_LCOEFS)
3359 if (match_u64(&args[0], &v))
3365 spin_lock_irq(&ioc->lock);
3367 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3368 ioc->user_cost_model = true;
3370 ioc->user_cost_model = false;
3372 ioc_refresh_params(ioc, true);
3373 spin_unlock_irq(&ioc->lock);
3375 put_disk_and_module(disk);
3381 put_disk_and_module(disk);
3385 static struct cftype ioc_files[] = {
3388 .flags = CFTYPE_NOT_ON_ROOT,
3389 .seq_show = ioc_weight_show,
3390 .write = ioc_weight_write,
3394 .flags = CFTYPE_ONLY_ON_ROOT,
3395 .seq_show = ioc_qos_show,
3396 .write = ioc_qos_write,
3399 .name = "cost.model",
3400 .flags = CFTYPE_ONLY_ON_ROOT,
3401 .seq_show = ioc_cost_model_show,
3402 .write = ioc_cost_model_write,
3407 static struct blkcg_policy blkcg_policy_iocost = {
3408 .dfl_cftypes = ioc_files,
3409 .cpd_alloc_fn = ioc_cpd_alloc,
3410 .cpd_free_fn = ioc_cpd_free,
3411 .pd_alloc_fn = ioc_pd_alloc,
3412 .pd_init_fn = ioc_pd_init,
3413 .pd_free_fn = ioc_pd_free,
3414 .pd_stat_fn = ioc_pd_stat,
3417 static int __init ioc_init(void)
3419 return blkcg_policy_register(&blkcg_policy_iocost);
3422 static void __exit ioc_exit(void)
3424 blkcg_policy_unregister(&blkcg_policy_iocost);
3427 module_init(ioc_init);
3428 module_exit(ioc_exit);