1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, solely depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
249 VTIME_PER_SEC_SHIFT = 37,
250 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
251 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
252 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM = 10000, /* 1% */
256 VRATE_MAX_PPM = 100000000, /* 10000% */
258 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 VRATE_CLAMP_ADJ_PCT = 4,
261 /* switch iff the conditions are met for longer than this */
262 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
266 /* if IOs end up waiting for requests, issue less */
267 RQ_WAIT_BUSY_PCT = 5,
269 /* unbusy hysterisis */
273 * The effect of delay is indirect and non-linear and a huge amount of
274 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 * up delay as debt is going up and then let it decay exponentially.
276 * This gives us quick ramp ups while delay is accumulating and long
277 * tails which can help reducing the frequency of debt explosions on
278 * unthrottle. The parameters are experimentally determined.
280 * The delay mechanism provides adequate protection and behavior in many
281 * cases. However, this is far from ideal and falls shorts on both
282 * fronts. The debtors are often throttled too harshly costing a
283 * significant level of fairness and possibly total work while the
284 * protection against their impacts on the system can be choppy and
287 * The shortcoming primarily stems from the fact that, unlike for page
288 * cache, the kernel doesn't have well-defined back-pressure propagation
289 * mechanism and policies for anonymous memory. Fully addressing this
290 * issue will likely require substantial improvements in the area.
292 MIN_DELAY_THR_PCT = 500,
293 MAX_DELAY_THR_PCT = 25000,
295 MAX_DELAY = 250 * USEC_PER_MSEC,
297 /* halve debts if avg usage over 100ms is under 50% */
299 DFGV_PERIOD = 100 * USEC_PER_MSEC,
301 /* don't let cmds which take a very long time pin lagging for too long */
302 MAX_LAGGING_PERIODS = 10,
305 * Count IO size in 4k pages. The 12bit shift helps keeping
306 * size-proportional components of cost calculation in closer
307 * numbers of digits to per-IO cost components.
310 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
311 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
313 /* if apart further than 16M, consider randio for linear model */
314 LCOEF_RANDIO_PAGES = 4096,
323 /* io.cost.qos controls including per-dev enable of the whole controller */
330 /* io.cost.qos params */
341 /* io.cost.model controls */
348 /* builtin linear cost model coefficients */
378 u32 qos[NR_QOS_PARAMS];
379 u64 i_lcoefs[NR_I_LCOEFS];
380 u64 lcoefs[NR_LCOEFS];
381 u32 too_fast_vrate_pct;
382 u32 too_slow_vrate_pct;
398 struct ioc_pcpu_stat {
399 struct ioc_missed missed[2];
401 local64_t rq_wait_ns;
411 struct ioc_params params;
412 struct ioc_margins margins;
419 struct timer_list timer;
420 struct list_head active_iocgs; /* active cgroups */
421 struct ioc_pcpu_stat __percpu *pcpu_stat;
423 enum ioc_running running;
424 atomic64_t vtime_rate;
428 seqcount_spinlock_t period_seqcount;
429 u64 period_at; /* wallclock starttime */
430 u64 period_at_vtime; /* vtime starttime */
432 atomic64_t cur_period; /* inc'd each period */
433 int busy_level; /* saturation history */
435 bool weights_updated;
436 atomic_t hweight_gen; /* for lazy hweights */
438 /* debt forgivness */
441 u64 dfgv_usage_us_sum;
443 u64 autop_too_fast_at;
444 u64 autop_too_slow_at;
446 bool user_qos_params:1;
447 bool user_cost_model:1;
450 struct iocg_pcpu_stat {
451 local64_t abs_vusage;
461 /* per device-cgroup pair */
463 struct blkg_policy_data pd;
467 * A iocg can get its weight from two sources - an explicit
468 * per-device-cgroup configuration or the default weight of the
469 * cgroup. `cfg_weight` is the explicit per-device-cgroup
470 * configuration. `weight` is the effective considering both
473 * When an idle cgroup becomes active its `active` goes from 0 to
474 * `weight`. `inuse` is the surplus adjusted active weight.
475 * `active` and `inuse` are used to calculate `hweight_active` and
478 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479 * surplus adjustments.
481 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482 * to determine and track adjustments.
492 sector_t cursor; /* to detect randio */
495 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496 * issued. If lagging behind device vtime, the delta represents
497 * the currently available IO budget. If running ahead, the
500 * `vtime_done` is the same but progressed on completion rather
501 * than issue. The delta behind `vtime` represents the cost of
502 * currently in-flight IOs.
505 atomic64_t done_vtime;
508 /* current delay in effect and when it started */
513 * The period this iocg was last active in. Used for deactivation
514 * and invalidating `vtime`.
516 atomic64_t active_period;
517 struct list_head active_list;
519 /* see __propagate_weights() and current_hweight() for details */
520 u64 child_active_sum;
522 u64 child_adjusted_sum;
526 u32 hweight_donating;
527 u32 hweight_after_donation;
529 struct list_head walk_list;
530 struct list_head surplus_list;
532 struct wait_queue_head waitq;
533 struct hrtimer waitq_timer;
535 /* timestamp at the latest activation */
539 struct iocg_pcpu_stat __percpu *pcpu_stat;
540 struct iocg_stat stat;
541 struct iocg_stat last_stat;
542 u64 last_stat_abs_vusage;
548 /* this iocg's depth in the hierarchy and ancestors including self */
550 struct ioc_gq *ancestors[];
555 struct blkcg_policy_data cpd;
556 unsigned int dfl_weight;
566 struct wait_queue_entry wait;
572 struct iocg_wake_ctx {
578 static const struct ioc_params autop[] = {
581 [QOS_RLAT] = 250000, /* 250ms */
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
587 [I_LCOEF_RBPS] = 174019176,
588 [I_LCOEF_RSEQIOPS] = 41708,
589 [I_LCOEF_RRANDIOPS] = 370,
590 [I_LCOEF_WBPS] = 178075866,
591 [I_LCOEF_WSEQIOPS] = 42705,
592 [I_LCOEF_WRANDIOPS] = 378,
597 [QOS_RLAT] = 25000, /* 25ms */
599 [QOS_MIN] = VRATE_MIN_PPM,
600 [QOS_MAX] = VRATE_MAX_PPM,
603 [I_LCOEF_RBPS] = 245855193,
604 [I_LCOEF_RSEQIOPS] = 61575,
605 [I_LCOEF_RRANDIOPS] = 6946,
606 [I_LCOEF_WBPS] = 141365009,
607 [I_LCOEF_WSEQIOPS] = 33716,
608 [I_LCOEF_WRANDIOPS] = 26796,
613 [QOS_RLAT] = 25000, /* 25ms */
615 [QOS_MIN] = VRATE_MIN_PPM,
616 [QOS_MAX] = VRATE_MAX_PPM,
619 [I_LCOEF_RBPS] = 488636629,
620 [I_LCOEF_RSEQIOPS] = 8932,
621 [I_LCOEF_RRANDIOPS] = 8518,
622 [I_LCOEF_WBPS] = 427891549,
623 [I_LCOEF_WSEQIOPS] = 28755,
624 [I_LCOEF_WRANDIOPS] = 21940,
626 .too_fast_vrate_pct = 500,
630 [QOS_RLAT] = 5000, /* 5ms */
632 [QOS_MIN] = VRATE_MIN_PPM,
633 [QOS_MAX] = VRATE_MAX_PPM,
636 [I_LCOEF_RBPS] = 3102524156LLU,
637 [I_LCOEF_RSEQIOPS] = 724816,
638 [I_LCOEF_RRANDIOPS] = 778122,
639 [I_LCOEF_WBPS] = 1742780862LLU,
640 [I_LCOEF_WSEQIOPS] = 425702,
641 [I_LCOEF_WRANDIOPS] = 443193,
643 .too_slow_vrate_pct = 10,
648 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
649 * vtime credit shortage and down on device saturation.
651 static u32 vrate_adj_pct[] =
653 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
657 static struct blkcg_policy blkcg_policy_iocost;
659 /* accessors and helpers */
660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
662 return container_of(rqos, struct ioc, rqos);
665 static struct ioc *q_to_ioc(struct request_queue *q)
667 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
670 static const char __maybe_unused *ioc_name(struct ioc *ioc)
672 struct gendisk *disk = ioc->rqos.disk;
676 return disk->disk_name;
679 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
681 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
684 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
686 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
689 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
691 return pd_to_blkg(&iocg->pd);
694 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
696 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
697 struct ioc_cgrp, cpd);
701 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
702 * weight, the more expensive each IO. Must round up.
704 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
706 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
710 * The inverse of abs_cost_to_cost(). Must round up.
712 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
714 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
717 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
718 u64 abs_cost, u64 cost)
720 struct iocg_pcpu_stat *gcs;
722 bio->bi_iocost_cost = cost;
723 atomic64_add(cost, &iocg->vtime);
725 gcs = get_cpu_ptr(iocg->pcpu_stat);
726 local64_add(abs_cost, &gcs->abs_vusage);
730 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
733 spin_lock_irqsave(&iocg->ioc->lock, *flags);
734 spin_lock(&iocg->waitq.lock);
736 spin_lock_irqsave(&iocg->waitq.lock, *flags);
740 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
743 spin_unlock(&iocg->waitq.lock);
744 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
746 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
750 #define CREATE_TRACE_POINTS
751 #include <trace/events/iocost.h>
753 static void ioc_refresh_margins(struct ioc *ioc)
755 struct ioc_margins *margins = &ioc->margins;
756 u32 period_us = ioc->period_us;
757 u64 vrate = ioc->vtime_base_rate;
759 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
760 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
761 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
764 /* latency Qos params changed, update period_us and all the dependent params */
765 static void ioc_refresh_period_us(struct ioc *ioc)
767 u32 ppm, lat, multi, period_us;
769 lockdep_assert_held(&ioc->lock);
771 /* pick the higher latency target */
772 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
773 ppm = ioc->params.qos[QOS_RPPM];
774 lat = ioc->params.qos[QOS_RLAT];
776 ppm = ioc->params.qos[QOS_WPPM];
777 lat = ioc->params.qos[QOS_WLAT];
781 * We want the period to be long enough to contain a healthy number
782 * of IOs while short enough for granular control. Define it as a
783 * multiple of the latency target. Ideally, the multiplier should
784 * be scaled according to the percentile so that it would nominally
785 * contain a certain number of requests. Let's be simpler and
786 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
789 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
792 period_us = multi * lat;
793 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
795 /* calculate dependent params */
796 ioc->period_us = period_us;
797 ioc->timer_slack_ns = div64_u64(
798 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
800 ioc_refresh_margins(ioc);
803 static int ioc_autop_idx(struct ioc *ioc)
805 int idx = ioc->autop_idx;
806 const struct ioc_params *p = &autop[idx];
811 if (!blk_queue_nonrot(ioc->rqos.disk->queue))
814 /* handle SATA SSDs w/ broken NCQ */
815 if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
816 return AUTOP_SSD_QD1;
818 /* use one of the normal ssd sets */
819 if (idx < AUTOP_SSD_DFL)
820 return AUTOP_SSD_DFL;
822 /* if user is overriding anything, maintain what was there */
823 if (ioc->user_qos_params || ioc->user_cost_model)
826 /* step up/down based on the vrate */
827 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
828 now_ns = ktime_get_ns();
830 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
831 if (!ioc->autop_too_fast_at)
832 ioc->autop_too_fast_at = now_ns;
833 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
836 ioc->autop_too_fast_at = 0;
839 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
840 if (!ioc->autop_too_slow_at)
841 ioc->autop_too_slow_at = now_ns;
842 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
845 ioc->autop_too_slow_at = 0;
852 * Take the followings as input
854 * @bps maximum sequential throughput
855 * @seqiops maximum sequential 4k iops
856 * @randiops maximum random 4k iops
858 * and calculate the linear model cost coefficients.
860 * *@page per-page cost 1s / (@bps / 4096)
861 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
862 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
864 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
865 u64 *page, u64 *seqio, u64 *randio)
869 *page = *seqio = *randio = 0;
872 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
875 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
881 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
887 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
893 static void ioc_refresh_lcoefs(struct ioc *ioc)
895 u64 *u = ioc->params.i_lcoefs;
896 u64 *c = ioc->params.lcoefs;
898 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
899 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
900 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
901 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
904 static bool ioc_refresh_params(struct ioc *ioc, bool force)
906 const struct ioc_params *p;
909 lockdep_assert_held(&ioc->lock);
911 idx = ioc_autop_idx(ioc);
914 if (idx == ioc->autop_idx && !force)
917 if (idx != ioc->autop_idx) {
918 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
919 ioc->vtime_base_rate = VTIME_PER_USEC;
922 ioc->autop_idx = idx;
923 ioc->autop_too_fast_at = 0;
924 ioc->autop_too_slow_at = 0;
926 if (!ioc->user_qos_params)
927 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
928 if (!ioc->user_cost_model)
929 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
931 ioc_refresh_period_us(ioc);
932 ioc_refresh_lcoefs(ioc);
934 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
935 VTIME_PER_USEC, MILLION);
936 ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] *
937 VTIME_PER_USEC, MILLION);
943 * When an iocg accumulates too much vtime or gets deactivated, we throw away
944 * some vtime, which lowers the overall device utilization. As the exact amount
945 * which is being thrown away is known, we can compensate by accelerating the
946 * vrate accordingly so that the extra vtime generated in the current period
947 * matches what got lost.
949 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
951 s64 pleft = ioc->period_at + ioc->period_us - now->now;
952 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
953 s64 vcomp, vcomp_min, vcomp_max;
955 lockdep_assert_held(&ioc->lock);
957 /* we need some time left in this period */
962 * Calculate how much vrate should be adjusted to offset the error.
963 * Limit the amount of adjustment and deduct the adjusted amount from
966 vcomp = -div64_s64(ioc->vtime_err, pleft);
967 vcomp_min = -(ioc->vtime_base_rate >> 1);
968 vcomp_max = ioc->vtime_base_rate;
969 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
971 ioc->vtime_err += vcomp * pleft;
973 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
975 /* bound how much error can accumulate */
976 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
979 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
980 int nr_lagging, int nr_shortages,
981 int prev_busy_level, u32 *missed_ppm)
983 u64 vrate = ioc->vtime_base_rate;
984 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
986 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
987 if (ioc->busy_level != prev_busy_level || nr_lagging)
988 trace_iocost_ioc_vrate_adj(ioc, vrate,
989 missed_ppm, rq_wait_pct,
990 nr_lagging, nr_shortages);
996 * If vrate is out of bounds, apply clamp gradually as the
997 * bounds can change abruptly. Otherwise, apply busy_level
1000 if (vrate < vrate_min) {
1001 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1002 vrate = min(vrate, vrate_min);
1003 } else if (vrate > vrate_max) {
1004 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1005 vrate = max(vrate, vrate_max);
1007 int idx = min_t(int, abs(ioc->busy_level),
1008 ARRAY_SIZE(vrate_adj_pct) - 1);
1009 u32 adj_pct = vrate_adj_pct[idx];
1011 if (ioc->busy_level > 0)
1012 adj_pct = 100 - adj_pct;
1014 adj_pct = 100 + adj_pct;
1016 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1017 vrate_min, vrate_max);
1020 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1021 nr_lagging, nr_shortages);
1023 ioc->vtime_base_rate = vrate;
1024 ioc_refresh_margins(ioc);
1027 /* take a snapshot of the current [v]time and vrate */
1028 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1033 now->now_ns = ktime_get();
1034 now->now = ktime_to_us(now->now_ns);
1035 vrate = atomic64_read(&ioc->vtime_rate);
1038 * The current vtime is
1040 * vtime at period start + (wallclock time since the start) * vrate
1042 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1043 * needed, they're seqcount protected.
1046 seq = read_seqcount_begin(&ioc->period_seqcount);
1047 now->vnow = ioc->period_at_vtime +
1048 (now->now - ioc->period_at) * vrate;
1049 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1052 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1054 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1056 write_seqcount_begin(&ioc->period_seqcount);
1057 ioc->period_at = now->now;
1058 ioc->period_at_vtime = now->vnow;
1059 write_seqcount_end(&ioc->period_seqcount);
1061 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1062 add_timer(&ioc->timer);
1066 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1067 * weight sums and propagate upwards accordingly. If @save, the current margin
1068 * is saved to be used as reference for later inuse in-period adjustments.
1070 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1071 bool save, struct ioc_now *now)
1073 struct ioc *ioc = iocg->ioc;
1076 lockdep_assert_held(&ioc->lock);
1079 * For an active leaf node, its inuse shouldn't be zero or exceed
1080 * @active. An active internal node's inuse is solely determined by the
1081 * inuse to active ratio of its children regardless of @inuse.
1083 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1084 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1085 iocg->child_active_sum);
1087 inuse = clamp_t(u32, inuse, 1, active);
1090 iocg->last_inuse = iocg->inuse;
1092 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1094 if (active == iocg->active && inuse == iocg->inuse)
1097 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1098 struct ioc_gq *parent = iocg->ancestors[lvl];
1099 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1100 u32 parent_active = 0, parent_inuse = 0;
1102 /* update the level sums */
1103 parent->child_active_sum += (s32)(active - child->active);
1104 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1105 /* apply the updates */
1106 child->active = active;
1107 child->inuse = inuse;
1110 * The delta between inuse and active sums indicates that
1111 * much of weight is being given away. Parent's inuse
1112 * and active should reflect the ratio.
1114 if (parent->child_active_sum) {
1115 parent_active = parent->weight;
1116 parent_inuse = DIV64_U64_ROUND_UP(
1117 parent_active * parent->child_inuse_sum,
1118 parent->child_active_sum);
1121 /* do we need to keep walking up? */
1122 if (parent_active == parent->active &&
1123 parent_inuse == parent->inuse)
1126 active = parent_active;
1127 inuse = parent_inuse;
1130 ioc->weights_updated = true;
1133 static void commit_weights(struct ioc *ioc)
1135 lockdep_assert_held(&ioc->lock);
1137 if (ioc->weights_updated) {
1138 /* paired with rmb in current_hweight(), see there */
1140 atomic_inc(&ioc->hweight_gen);
1141 ioc->weights_updated = false;
1145 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1146 bool save, struct ioc_now *now)
1148 __propagate_weights(iocg, active, inuse, save, now);
1149 commit_weights(iocg->ioc);
1152 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1154 struct ioc *ioc = iocg->ioc;
1159 /* hot path - if uptodate, use cached */
1160 ioc_gen = atomic_read(&ioc->hweight_gen);
1161 if (ioc_gen == iocg->hweight_gen)
1165 * Paired with wmb in commit_weights(). If we saw the updated
1166 * hweight_gen, all the weight updates from __propagate_weights() are
1169 * We can race with weight updates during calculation and get it
1170 * wrong. However, hweight_gen would have changed and a future
1171 * reader will recalculate and we're guaranteed to discard the
1172 * wrong result soon.
1176 hwa = hwi = WEIGHT_ONE;
1177 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1178 struct ioc_gq *parent = iocg->ancestors[lvl];
1179 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1180 u64 active_sum = READ_ONCE(parent->child_active_sum);
1181 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1182 u32 active = READ_ONCE(child->active);
1183 u32 inuse = READ_ONCE(child->inuse);
1185 /* we can race with deactivations and either may read as zero */
1186 if (!active_sum || !inuse_sum)
1189 active_sum = max_t(u64, active, active_sum);
1190 hwa = div64_u64((u64)hwa * active, active_sum);
1192 inuse_sum = max_t(u64, inuse, inuse_sum);
1193 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1196 iocg->hweight_active = max_t(u32, hwa, 1);
1197 iocg->hweight_inuse = max_t(u32, hwi, 1);
1198 iocg->hweight_gen = ioc_gen;
1201 *hw_activep = iocg->hweight_active;
1203 *hw_inusep = iocg->hweight_inuse;
1207 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1208 * other weights stay unchanged.
1210 static u32 current_hweight_max(struct ioc_gq *iocg)
1212 u32 hwm = WEIGHT_ONE;
1213 u32 inuse = iocg->active;
1214 u64 child_inuse_sum;
1217 lockdep_assert_held(&iocg->ioc->lock);
1219 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1220 struct ioc_gq *parent = iocg->ancestors[lvl];
1221 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1223 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1224 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1225 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1226 parent->child_active_sum);
1229 return max_t(u32, hwm, 1);
1232 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1234 struct ioc *ioc = iocg->ioc;
1235 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1236 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1239 lockdep_assert_held(&ioc->lock);
1241 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1242 if (weight != iocg->weight && iocg->active)
1243 propagate_weights(iocg, weight, iocg->inuse, true, now);
1244 iocg->weight = weight;
1247 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1249 struct ioc *ioc = iocg->ioc;
1250 u64 last_period, cur_period;
1255 * If seem to be already active, just update the stamp to tell the
1256 * timer that we're still active. We don't mind occassional races.
1258 if (!list_empty(&iocg->active_list)) {
1260 cur_period = atomic64_read(&ioc->cur_period);
1261 if (atomic64_read(&iocg->active_period) != cur_period)
1262 atomic64_set(&iocg->active_period, cur_period);
1266 /* racy check on internal node IOs, treat as root level IOs */
1267 if (iocg->child_active_sum)
1270 spin_lock_irq(&ioc->lock);
1275 cur_period = atomic64_read(&ioc->cur_period);
1276 last_period = atomic64_read(&iocg->active_period);
1277 atomic64_set(&iocg->active_period, cur_period);
1279 /* already activated or breaking leaf-only constraint? */
1280 if (!list_empty(&iocg->active_list))
1281 goto succeed_unlock;
1282 for (i = iocg->level - 1; i > 0; i--)
1283 if (!list_empty(&iocg->ancestors[i]->active_list))
1286 if (iocg->child_active_sum)
1290 * Always start with the target budget. On deactivation, we throw away
1291 * anything above it.
1293 vtarget = now->vnow - ioc->margins.target;
1294 vtime = atomic64_read(&iocg->vtime);
1296 atomic64_add(vtarget - vtime, &iocg->vtime);
1297 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1301 * Activate, propagate weight and start period timer if not
1302 * running. Reset hweight_gen to avoid accidental match from
1305 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1306 list_add(&iocg->active_list, &ioc->active_iocgs);
1308 propagate_weights(iocg, iocg->weight,
1309 iocg->last_inuse ?: iocg->weight, true, now);
1311 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1312 last_period, cur_period, vtime);
1314 iocg->activated_at = now->now;
1316 if (ioc->running == IOC_IDLE) {
1317 ioc->running = IOC_RUNNING;
1318 ioc->dfgv_period_at = now->now;
1319 ioc->dfgv_period_rem = 0;
1320 ioc_start_period(ioc, now);
1324 spin_unlock_irq(&ioc->lock);
1328 spin_unlock_irq(&ioc->lock);
1332 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1334 struct ioc *ioc = iocg->ioc;
1335 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1336 u64 tdelta, delay, new_delay;
1337 s64 vover, vover_pct;
1340 lockdep_assert_held(&iocg->waitq.lock);
1342 /* calculate the current delay in effect - 1/2 every second */
1343 tdelta = now->now - iocg->delay_at;
1345 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1349 /* calculate the new delay from the debt amount */
1350 current_hweight(iocg, &hwa, NULL);
1351 vover = atomic64_read(&iocg->vtime) +
1352 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1353 vover_pct = div64_s64(100 * vover,
1354 ioc->period_us * ioc->vtime_base_rate);
1356 if (vover_pct <= MIN_DELAY_THR_PCT)
1358 else if (vover_pct >= MAX_DELAY_THR_PCT)
1359 new_delay = MAX_DELAY;
1361 new_delay = MIN_DELAY +
1362 div_u64((MAX_DELAY - MIN_DELAY) *
1363 (vover_pct - MIN_DELAY_THR_PCT),
1364 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1366 /* pick the higher one and apply */
1367 if (new_delay > delay) {
1368 iocg->delay = new_delay;
1369 iocg->delay_at = now->now;
1373 if (delay >= MIN_DELAY) {
1374 if (!iocg->indelay_since)
1375 iocg->indelay_since = now->now;
1376 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1379 if (iocg->indelay_since) {
1380 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1381 iocg->indelay_since = 0;
1384 blkcg_clear_delay(blkg);
1389 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1390 struct ioc_now *now)
1392 struct iocg_pcpu_stat *gcs;
1394 lockdep_assert_held(&iocg->ioc->lock);
1395 lockdep_assert_held(&iocg->waitq.lock);
1396 WARN_ON_ONCE(list_empty(&iocg->active_list));
1399 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1400 * inuse donating all of it share to others until its debt is paid off.
1402 if (!iocg->abs_vdebt && abs_cost) {
1403 iocg->indebt_since = now->now;
1404 propagate_weights(iocg, iocg->active, 0, false, now);
1407 iocg->abs_vdebt += abs_cost;
1409 gcs = get_cpu_ptr(iocg->pcpu_stat);
1410 local64_add(abs_cost, &gcs->abs_vusage);
1414 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1415 struct ioc_now *now)
1417 lockdep_assert_held(&iocg->ioc->lock);
1418 lockdep_assert_held(&iocg->waitq.lock);
1420 /* make sure that nobody messed with @iocg */
1421 WARN_ON_ONCE(list_empty(&iocg->active_list));
1422 WARN_ON_ONCE(iocg->inuse > 1);
1424 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1426 /* if debt is paid in full, restore inuse */
1427 if (!iocg->abs_vdebt) {
1428 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1429 iocg->indebt_since = 0;
1431 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1436 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1437 int flags, void *key)
1439 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1440 struct iocg_wake_ctx *ctx = key;
1441 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1443 ctx->vbudget -= cost;
1445 if (ctx->vbudget < 0)
1448 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1449 wait->committed = true;
1452 * autoremove_wake_function() removes the wait entry only when it
1453 * actually changed the task state. We want the wait always removed.
1454 * Remove explicitly and use default_wake_function(). Note that the
1455 * order of operations is important as finish_wait() tests whether
1456 * @wq_entry is removed without grabbing the lock.
1458 default_wake_function(wq_entry, mode, flags, key);
1459 list_del_init_careful(&wq_entry->entry);
1464 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1465 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1466 * addition to iocg->waitq.lock.
1468 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1469 struct ioc_now *now)
1471 struct ioc *ioc = iocg->ioc;
1472 struct iocg_wake_ctx ctx = { .iocg = iocg };
1473 u64 vshortage, expires, oexpires;
1477 lockdep_assert_held(&iocg->waitq.lock);
1479 current_hweight(iocg, &hwa, NULL);
1480 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1483 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1484 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1485 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1486 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1488 lockdep_assert_held(&ioc->lock);
1490 atomic64_add(vpay, &iocg->vtime);
1491 atomic64_add(vpay, &iocg->done_vtime);
1492 iocg_pay_debt(iocg, abs_vpay, now);
1496 if (iocg->abs_vdebt || iocg->delay)
1497 iocg_kick_delay(iocg, now);
1500 * Debt can still be outstanding if we haven't paid all yet or the
1501 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1502 * under debt. Make sure @vbudget reflects the outstanding amount and is
1505 if (iocg->abs_vdebt) {
1506 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1507 vbudget = min_t(s64, 0, vbudget - vdebt);
1511 * Wake up the ones which are due and see how much vtime we'll need for
1512 * the next one. As paying off debt restores hw_inuse, it must be read
1513 * after the above debt payment.
1515 ctx.vbudget = vbudget;
1516 current_hweight(iocg, NULL, &ctx.hw_inuse);
1518 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1520 if (!waitqueue_active(&iocg->waitq)) {
1521 if (iocg->wait_since) {
1522 iocg->stat.wait_us += now->now - iocg->wait_since;
1523 iocg->wait_since = 0;
1528 if (!iocg->wait_since)
1529 iocg->wait_since = now->now;
1531 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1534 /* determine next wakeup, add a timer margin to guarantee chunking */
1535 vshortage = -ctx.vbudget;
1536 expires = now->now_ns +
1537 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1539 expires += ioc->timer_slack_ns;
1541 /* if already active and close enough, don't bother */
1542 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1543 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1544 abs(oexpires - expires) <= ioc->timer_slack_ns)
1547 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1548 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1551 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1553 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1554 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1556 unsigned long flags;
1558 ioc_now(iocg->ioc, &now);
1560 iocg_lock(iocg, pay_debt, &flags);
1561 iocg_kick_waitq(iocg, pay_debt, &now);
1562 iocg_unlock(iocg, pay_debt, &flags);
1564 return HRTIMER_NORESTART;
1567 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1569 u32 nr_met[2] = { };
1570 u32 nr_missed[2] = { };
1574 for_each_online_cpu(cpu) {
1575 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1576 u64 this_rq_wait_ns;
1578 for (rw = READ; rw <= WRITE; rw++) {
1579 u32 this_met = local_read(&stat->missed[rw].nr_met);
1580 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1582 nr_met[rw] += this_met - stat->missed[rw].last_met;
1583 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1584 stat->missed[rw].last_met = this_met;
1585 stat->missed[rw].last_missed = this_missed;
1588 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1589 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1590 stat->last_rq_wait_ns = this_rq_wait_ns;
1593 for (rw = READ; rw <= WRITE; rw++) {
1594 if (nr_met[rw] + nr_missed[rw])
1596 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1597 nr_met[rw] + nr_missed[rw]);
1599 missed_ppm_ar[rw] = 0;
1602 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1603 ioc->period_us * NSEC_PER_USEC);
1606 /* was iocg idle this period? */
1607 static bool iocg_is_idle(struct ioc_gq *iocg)
1609 struct ioc *ioc = iocg->ioc;
1611 /* did something get issued this period? */
1612 if (atomic64_read(&iocg->active_period) ==
1613 atomic64_read(&ioc->cur_period))
1616 /* is something in flight? */
1617 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1624 * Call this function on the target leaf @iocg's to build pre-order traversal
1625 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1626 * ->walk_list and the caller is responsible for dissolving the list after use.
1628 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1629 struct list_head *inner_walk)
1633 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1635 /* find the first ancestor which hasn't been visited yet */
1636 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1637 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1641 /* walk down and visit the inner nodes to get pre-order traversal */
1642 while (++lvl <= iocg->level - 1) {
1643 struct ioc_gq *inner = iocg->ancestors[lvl];
1645 /* record traversal order */
1646 list_add_tail(&inner->walk_list, inner_walk);
1650 /* propagate the deltas to the parent */
1651 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1653 if (iocg->level > 0) {
1654 struct iocg_stat *parent_stat =
1655 &iocg->ancestors[iocg->level - 1]->stat;
1657 parent_stat->usage_us +=
1658 iocg->stat.usage_us - iocg->last_stat.usage_us;
1659 parent_stat->wait_us +=
1660 iocg->stat.wait_us - iocg->last_stat.wait_us;
1661 parent_stat->indebt_us +=
1662 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1663 parent_stat->indelay_us +=
1664 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1667 iocg->last_stat = iocg->stat;
1670 /* collect per-cpu counters and propagate the deltas to the parent */
1671 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1673 struct ioc *ioc = iocg->ioc;
1678 lockdep_assert_held(&iocg->ioc->lock);
1680 /* collect per-cpu counters */
1681 for_each_possible_cpu(cpu) {
1682 abs_vusage += local64_read(
1683 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1685 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1686 iocg->last_stat_abs_vusage = abs_vusage;
1688 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1689 iocg->stat.usage_us += iocg->usage_delta_us;
1691 iocg_flush_stat_upward(iocg);
1694 /* get stat counters ready for reading on all active iocgs */
1695 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1697 LIST_HEAD(inner_walk);
1698 struct ioc_gq *iocg, *tiocg;
1700 /* flush leaves and build inner node walk list */
1701 list_for_each_entry(iocg, target_iocgs, active_list) {
1702 iocg_flush_stat_leaf(iocg, now);
1703 iocg_build_inner_walk(iocg, &inner_walk);
1706 /* keep flushing upwards by walking the inner list backwards */
1707 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1708 iocg_flush_stat_upward(iocg);
1709 list_del_init(&iocg->walk_list);
1714 * Determine what @iocg's hweight_inuse should be after donating unused
1715 * capacity. @hwm is the upper bound and used to signal no donation. This
1716 * function also throws away @iocg's excess budget.
1718 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1719 u32 usage, struct ioc_now *now)
1721 struct ioc *ioc = iocg->ioc;
1722 u64 vtime = atomic64_read(&iocg->vtime);
1723 s64 excess, delta, target, new_hwi;
1725 /* debt handling owns inuse for debtors */
1726 if (iocg->abs_vdebt)
1729 /* see whether minimum margin requirement is met */
1730 if (waitqueue_active(&iocg->waitq) ||
1731 time_after64(vtime, now->vnow - ioc->margins.min))
1734 /* throw away excess above target */
1735 excess = now->vnow - vtime - ioc->margins.target;
1737 atomic64_add(excess, &iocg->vtime);
1738 atomic64_add(excess, &iocg->done_vtime);
1740 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1744 * Let's say the distance between iocg's and device's vtimes as a
1745 * fraction of period duration is delta. Assuming that the iocg will
1746 * consume the usage determined above, we want to determine new_hwi so
1747 * that delta equals MARGIN_TARGET at the end of the next period.
1749 * We need to execute usage worth of IOs while spending the sum of the
1750 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1753 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1755 * Therefore, the new_hwi is:
1757 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1759 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1760 now->vnow - ioc->period_at_vtime);
1761 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1762 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1764 return clamp_t(s64, new_hwi, 1, hwm);
1768 * For work-conservation, an iocg which isn't using all of its share should
1769 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1770 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1772 * #1 is mathematically simpler but has the drawback of requiring synchronous
1773 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1774 * change due to donation snapbacks as it has the possibility of grossly
1775 * overshooting what's allowed by the model and vrate.
1777 * #2 is inherently safe with local operations. The donating iocg can easily
1778 * snap back to higher weights when needed without worrying about impacts on
1779 * other nodes as the impacts will be inherently correct. This also makes idle
1780 * iocg activations safe. The only effect activations have is decreasing
1781 * hweight_inuse of others, the right solution to which is for those iocgs to
1782 * snap back to higher weights.
1784 * So, we go with #2. The challenge is calculating how each donating iocg's
1785 * inuse should be adjusted to achieve the target donation amounts. This is done
1786 * using Andy's method described in the following pdf.
1788 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1790 * Given the weights and target after-donation hweight_inuse values, Andy's
1791 * method determines how the proportional distribution should look like at each
1792 * sibling level to maintain the relative relationship between all non-donating
1793 * pairs. To roughly summarize, it divides the tree into donating and
1794 * non-donating parts, calculates global donation rate which is used to
1795 * determine the target hweight_inuse for each node, and then derives per-level
1798 * The following pdf shows that global distribution calculated this way can be
1799 * achieved by scaling inuse weights of donating leaves and propagating the
1800 * adjustments upwards proportionally.
1802 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1804 * Combining the above two, we can determine how each leaf iocg's inuse should
1805 * be adjusted to achieve the target donation.
1807 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1809 * The inline comments use symbols from the last pdf.
1811 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1812 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1813 * t is the sum of the absolute budgets of donating nodes in the subtree.
1814 * w is the weight of the node. w = w_f + w_t
1815 * w_f is the non-donating portion of w. w_f = w * f / b
1816 * w_b is the donating portion of w. w_t = w * t / b
1817 * s is the sum of all sibling weights. s = Sum(w) for siblings
1818 * s_f and s_t are the non-donating and donating portions of s.
1820 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1821 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1822 * after adjustments. Subscript r denotes the root node's values.
1824 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1826 LIST_HEAD(over_hwa);
1827 LIST_HEAD(inner_walk);
1828 struct ioc_gq *iocg, *tiocg, *root_iocg;
1829 u32 after_sum, over_sum, over_target, gamma;
1832 * It's pretty unlikely but possible for the total sum of
1833 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1834 * confuse the following calculations. If such condition is detected,
1835 * scale down everyone over its full share equally to keep the sum below
1840 list_for_each_entry(iocg, surpluses, surplus_list) {
1843 current_hweight(iocg, &hwa, NULL);
1844 after_sum += iocg->hweight_after_donation;
1846 if (iocg->hweight_after_donation > hwa) {
1847 over_sum += iocg->hweight_after_donation;
1848 list_add(&iocg->walk_list, &over_hwa);
1852 if (after_sum >= WEIGHT_ONE) {
1854 * The delta should be deducted from the over_sum, calculate
1855 * target over_sum value.
1857 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1858 WARN_ON_ONCE(over_sum <= over_delta);
1859 over_target = over_sum - over_delta;
1864 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1866 iocg->hweight_after_donation =
1867 div_u64((u64)iocg->hweight_after_donation *
1868 over_target, over_sum);
1869 list_del_init(&iocg->walk_list);
1873 * Build pre-order inner node walk list and prepare for donation
1874 * adjustment calculations.
1876 list_for_each_entry(iocg, surpluses, surplus_list) {
1877 iocg_build_inner_walk(iocg, &inner_walk);
1880 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1881 WARN_ON_ONCE(root_iocg->level > 0);
1883 list_for_each_entry(iocg, &inner_walk, walk_list) {
1884 iocg->child_adjusted_sum = 0;
1885 iocg->hweight_donating = 0;
1886 iocg->hweight_after_donation = 0;
1890 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1893 list_for_each_entry(iocg, surpluses, surplus_list) {
1894 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1896 parent->hweight_donating += iocg->hweight_donating;
1897 parent->hweight_after_donation += iocg->hweight_after_donation;
1900 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1901 if (iocg->level > 0) {
1902 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1904 parent->hweight_donating += iocg->hweight_donating;
1905 parent->hweight_after_donation += iocg->hweight_after_donation;
1910 * Calculate inner hwa's (b) and make sure the donation values are
1911 * within the accepted ranges as we're doing low res calculations with
1914 list_for_each_entry(iocg, &inner_walk, walk_list) {
1916 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1918 iocg->hweight_active = DIV64_U64_ROUND_UP(
1919 (u64)parent->hweight_active * iocg->active,
1920 parent->child_active_sum);
1924 iocg->hweight_donating = min(iocg->hweight_donating,
1925 iocg->hweight_active);
1926 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1927 iocg->hweight_donating - 1);
1928 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1929 iocg->hweight_donating <= 1 ||
1930 iocg->hweight_after_donation == 0)) {
1931 pr_warn("iocg: invalid donation weights in ");
1932 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1933 pr_cont(": active=%u donating=%u after=%u\n",
1934 iocg->hweight_active, iocg->hweight_donating,
1935 iocg->hweight_after_donation);
1940 * Calculate the global donation rate (gamma) - the rate to adjust
1941 * non-donating budgets by.
1943 * No need to use 64bit multiplication here as the first operand is
1944 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1946 * We know that there are beneficiary nodes and the sum of the donating
1947 * hweights can't be whole; however, due to the round-ups during hweight
1948 * calculations, root_iocg->hweight_donating might still end up equal to
1949 * or greater than whole. Limit the range when calculating the divider.
1951 * gamma = (1 - t_r') / (1 - t_r)
1953 gamma = DIV_ROUND_UP(
1954 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1955 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1958 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1961 list_for_each_entry(iocg, &inner_walk, walk_list) {
1962 struct ioc_gq *parent;
1963 u32 inuse, wpt, wptp;
1966 if (iocg->level == 0) {
1967 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1968 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1969 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1970 WEIGHT_ONE - iocg->hweight_after_donation);
1974 parent = iocg->ancestors[iocg->level - 1];
1976 /* b' = gamma * b_f + b_t' */
1977 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1978 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1979 WEIGHT_ONE) + iocg->hweight_after_donation;
1981 /* w' = s' * b' / b'_p */
1982 inuse = DIV64_U64_ROUND_UP(
1983 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1984 parent->hweight_inuse);
1986 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1987 st = DIV64_U64_ROUND_UP(
1988 iocg->child_active_sum * iocg->hweight_donating,
1989 iocg->hweight_active);
1990 sf = iocg->child_active_sum - st;
1991 wpt = DIV64_U64_ROUND_UP(
1992 (u64)iocg->active * iocg->hweight_donating,
1993 iocg->hweight_active);
1994 wptp = DIV64_U64_ROUND_UP(
1995 (u64)inuse * iocg->hweight_after_donation,
1996 iocg->hweight_inuse);
1998 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2002 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2003 * we can finally determine leaf adjustments.
2005 list_for_each_entry(iocg, surpluses, surplus_list) {
2006 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2010 * In-debt iocgs participated in the donation calculation with
2011 * the minimum target hweight_inuse. Configuring inuse
2012 * accordingly would work fine but debt handling expects
2013 * @iocg->inuse stay at the minimum and we don't wanna
2016 if (iocg->abs_vdebt) {
2017 WARN_ON_ONCE(iocg->inuse > 1);
2021 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2022 inuse = DIV64_U64_ROUND_UP(
2023 parent->child_adjusted_sum * iocg->hweight_after_donation,
2024 parent->hweight_inuse);
2026 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2028 iocg->hweight_inuse,
2029 iocg->hweight_after_donation);
2031 __propagate_weights(iocg, iocg->active, inuse, true, now);
2034 /* walk list should be dissolved after use */
2035 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2036 list_del_init(&iocg->walk_list);
2040 * A low weight iocg can amass a large amount of debt, for example, when
2041 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2042 * memory paired with a slow IO device, the debt can span multiple seconds or
2043 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2044 * up blocked paying its debt while the IO device is idle.
2046 * The following protects against such cases. If the device has been
2047 * sufficiently idle for a while, the debts are halved and delays are
2050 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2051 struct ioc_now *now)
2053 struct ioc_gq *iocg;
2054 u64 dur, usage_pct, nr_cycles;
2056 /* if no debtor, reset the cycle */
2058 ioc->dfgv_period_at = now->now;
2059 ioc->dfgv_period_rem = 0;
2060 ioc->dfgv_usage_us_sum = 0;
2065 * Debtors can pass through a lot of writes choking the device and we
2066 * don't want to be forgiving debts while the device is struggling from
2067 * write bursts. If we're missing latency targets, consider the device
2070 if (ioc->busy_level > 0)
2071 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2073 ioc->dfgv_usage_us_sum += usage_us_sum;
2074 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2078 * At least DFGV_PERIOD has passed since the last period. Calculate the
2079 * average usage and reset the period counters.
2081 dur = now->now - ioc->dfgv_period_at;
2082 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2084 ioc->dfgv_period_at = now->now;
2085 ioc->dfgv_usage_us_sum = 0;
2087 /* if was too busy, reset everything */
2088 if (usage_pct > DFGV_USAGE_PCT) {
2089 ioc->dfgv_period_rem = 0;
2094 * Usage is lower than threshold. Let's forgive some debts. Debt
2095 * forgiveness runs off of the usual ioc timer but its period usually
2096 * doesn't match ioc's. Compensate the difference by performing the
2097 * reduction as many times as would fit in the duration since the last
2098 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2099 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2100 * reductions is doubled.
2102 nr_cycles = dur + ioc->dfgv_period_rem;
2103 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2105 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2106 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2108 if (!iocg->abs_vdebt && !iocg->delay)
2111 spin_lock(&iocg->waitq.lock);
2113 old_debt = iocg->abs_vdebt;
2114 old_delay = iocg->delay;
2116 if (iocg->abs_vdebt)
2117 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2119 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2121 iocg_kick_waitq(iocg, true, now);
2123 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2124 old_debt, iocg->abs_vdebt,
2125 old_delay, iocg->delay);
2127 spin_unlock(&iocg->waitq.lock);
2132 * Check the active iocgs' state to avoid oversleeping and deactive
2135 * Since waiters determine the sleep durations based on the vrate
2136 * they saw at the time of sleep, if vrate has increased, some
2137 * waiters could be sleeping for too long. Wake up tardy waiters
2138 * which should have woken up in the last period and expire idle
2141 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2144 struct ioc_gq *iocg, *tiocg;
2146 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2147 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2148 !iocg->delay && !iocg_is_idle(iocg))
2151 spin_lock(&iocg->waitq.lock);
2153 /* flush wait and indebt stat deltas */
2154 if (iocg->wait_since) {
2155 iocg->stat.wait_us += now->now - iocg->wait_since;
2156 iocg->wait_since = now->now;
2158 if (iocg->indebt_since) {
2159 iocg->stat.indebt_us +=
2160 now->now - iocg->indebt_since;
2161 iocg->indebt_since = now->now;
2163 if (iocg->indelay_since) {
2164 iocg->stat.indelay_us +=
2165 now->now - iocg->indelay_since;
2166 iocg->indelay_since = now->now;
2169 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2171 /* might be oversleeping vtime / hweight changes, kick */
2172 iocg_kick_waitq(iocg, true, now);
2173 if (iocg->abs_vdebt || iocg->delay)
2175 } else if (iocg_is_idle(iocg)) {
2176 /* no waiter and idle, deactivate */
2177 u64 vtime = atomic64_read(&iocg->vtime);
2181 * @iocg has been inactive for a full duration and will
2182 * have a high budget. Account anything above target as
2183 * error and throw away. On reactivation, it'll start
2184 * with the target budget.
2186 excess = now->vnow - vtime - ioc->margins.target;
2190 current_hweight(iocg, NULL, &old_hwi);
2191 ioc->vtime_err -= div64_u64(excess * old_hwi,
2195 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2196 atomic64_read(&iocg->active_period),
2197 atomic64_read(&ioc->cur_period), vtime);
2198 __propagate_weights(iocg, 0, 0, false, now);
2199 list_del_init(&iocg->active_list);
2202 spin_unlock(&iocg->waitq.lock);
2205 commit_weights(ioc);
2209 static void ioc_timer_fn(struct timer_list *timer)
2211 struct ioc *ioc = container_of(timer, struct ioc, timer);
2212 struct ioc_gq *iocg, *tiocg;
2214 LIST_HEAD(surpluses);
2215 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2216 u64 usage_us_sum = 0;
2219 u32 missed_ppm[2], rq_wait_pct;
2221 int prev_busy_level;
2223 /* how were the latencies during the period? */
2224 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2226 /* take care of active iocgs */
2227 spin_lock_irq(&ioc->lock);
2229 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2230 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2233 period_vtime = now.vnow - ioc->period_at_vtime;
2234 if (WARN_ON_ONCE(!period_vtime)) {
2235 spin_unlock_irq(&ioc->lock);
2239 nr_debtors = ioc_check_iocgs(ioc, &now);
2242 * Wait and indebt stat are flushed above and the donation calculation
2243 * below needs updated usage stat. Let's bring stat up-to-date.
2245 iocg_flush_stat(&ioc->active_iocgs, &now);
2247 /* calc usage and see whether some weights need to be moved around */
2248 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2249 u64 vdone, vtime, usage_us;
2250 u32 hw_active, hw_inuse;
2253 * Collect unused and wind vtime closer to vnow to prevent
2254 * iocgs from accumulating a large amount of budget.
2256 vdone = atomic64_read(&iocg->done_vtime);
2257 vtime = atomic64_read(&iocg->vtime);
2258 current_hweight(iocg, &hw_active, &hw_inuse);
2261 * Latency QoS detection doesn't account for IOs which are
2262 * in-flight for longer than a period. Detect them by
2263 * comparing vdone against period start. If lagging behind
2264 * IOs from past periods, don't increase vrate.
2266 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2267 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2268 time_after64(vtime, vdone) &&
2269 time_after64(vtime, now.vnow -
2270 MAX_LAGGING_PERIODS * period_vtime) &&
2271 time_before64(vdone, now.vnow - period_vtime))
2275 * Determine absolute usage factoring in in-flight IOs to avoid
2276 * high-latency completions appearing as idle.
2278 usage_us = iocg->usage_delta_us;
2279 usage_us_sum += usage_us;
2281 /* see whether there's surplus vtime */
2282 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2283 if (hw_inuse < hw_active ||
2284 (!waitqueue_active(&iocg->waitq) &&
2285 time_before64(vtime, now.vnow - ioc->margins.low))) {
2286 u32 hwa, old_hwi, hwm, new_hwi, usage;
2289 if (vdone != vtime) {
2290 u64 inflight_us = DIV64_U64_ROUND_UP(
2291 cost_to_abs_cost(vtime - vdone, hw_inuse),
2292 ioc->vtime_base_rate);
2294 usage_us = max(usage_us, inflight_us);
2297 /* convert to hweight based usage ratio */
2298 if (time_after64(iocg->activated_at, ioc->period_at))
2299 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2301 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2303 usage = clamp_t(u32,
2304 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2309 * Already donating or accumulated enough to start.
2310 * Determine the donation amount.
2312 current_hweight(iocg, &hwa, &old_hwi);
2313 hwm = current_hweight_max(iocg);
2314 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2317 * Donation calculation assumes hweight_after_donation
2318 * to be positive, a condition that a donor w/ hwa < 2
2319 * can't meet. Don't bother with donation if hwa is
2320 * below 2. It's not gonna make a meaningful difference
2323 if (new_hwi < hwm && hwa >= 2) {
2324 iocg->hweight_donating = hwa;
2325 iocg->hweight_after_donation = new_hwi;
2326 list_add(&iocg->surplus_list, &surpluses);
2327 } else if (!iocg->abs_vdebt) {
2329 * @iocg doesn't have enough to donate. Reset
2330 * its inuse to active.
2332 * Don't reset debtors as their inuse's are
2333 * owned by debt handling. This shouldn't affect
2334 * donation calculuation in any meaningful way
2335 * as @iocg doesn't have a meaningful amount of
2338 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2339 iocg->inuse, iocg->active,
2340 iocg->hweight_inuse, new_hwi);
2342 __propagate_weights(iocg, iocg->active,
2343 iocg->active, true, &now);
2347 /* genuinely short on vtime */
2352 if (!list_empty(&surpluses) && nr_shortages)
2353 transfer_surpluses(&surpluses, &now);
2355 commit_weights(ioc);
2357 /* surplus list should be dissolved after use */
2358 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2359 list_del_init(&iocg->surplus_list);
2362 * If q is getting clogged or we're missing too much, we're issuing
2363 * too much IO and should lower vtime rate. If we're not missing
2364 * and experiencing shortages but not surpluses, we're too stingy
2365 * and should increase vtime rate.
2367 prev_busy_level = ioc->busy_level;
2368 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2369 missed_ppm[READ] > ppm_rthr ||
2370 missed_ppm[WRITE] > ppm_wthr) {
2371 /* clearly missing QoS targets, slow down vrate */
2372 ioc->busy_level = max(ioc->busy_level, 0);
2374 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2375 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2376 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2377 /* QoS targets are being met with >25% margin */
2380 * We're throttling while the device has spare
2381 * capacity. If vrate was being slowed down, stop.
2383 ioc->busy_level = min(ioc->busy_level, 0);
2386 * If there are IOs spanning multiple periods, wait
2387 * them out before pushing the device harder.
2393 * Nobody is being throttled and the users aren't
2394 * issuing enough IOs to saturate the device. We
2395 * simply don't know how close the device is to
2396 * saturation. Coast.
2398 ioc->busy_level = 0;
2401 /* inside the hysterisis margin, we're good */
2402 ioc->busy_level = 0;
2405 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2407 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2408 prev_busy_level, missed_ppm);
2410 ioc_refresh_params(ioc, false);
2412 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2415 * This period is done. Move onto the next one. If nothing's
2416 * going on with the device, stop the timer.
2418 atomic64_inc(&ioc->cur_period);
2420 if (ioc->running != IOC_STOP) {
2421 if (!list_empty(&ioc->active_iocgs)) {
2422 ioc_start_period(ioc, &now);
2424 ioc->busy_level = 0;
2426 ioc->running = IOC_IDLE;
2429 ioc_refresh_vrate(ioc, &now);
2432 spin_unlock_irq(&ioc->lock);
2435 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2436 u64 abs_cost, struct ioc_now *now)
2438 struct ioc *ioc = iocg->ioc;
2439 struct ioc_margins *margins = &ioc->margins;
2440 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2443 u64 cost, new_inuse;
2445 current_hweight(iocg, NULL, &hwi);
2447 cost = abs_cost_to_cost(abs_cost, hwi);
2448 margin = now->vnow - vtime - cost;
2450 /* debt handling owns inuse for debtors */
2451 if (iocg->abs_vdebt)
2455 * We only increase inuse during period and do so if the margin has
2456 * deteriorated since the previous adjustment.
2458 if (margin >= iocg->saved_margin || margin >= margins->low ||
2459 iocg->inuse == iocg->active)
2462 spin_lock_irq(&ioc->lock);
2464 /* we own inuse only when @iocg is in the normal active state */
2465 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2466 spin_unlock_irq(&ioc->lock);
2471 * Bump up inuse till @abs_cost fits in the existing budget.
2472 * adj_step must be determined after acquiring ioc->lock - we might
2473 * have raced and lost to another thread for activation and could
2474 * be reading 0 iocg->active before ioc->lock which will lead to
2477 new_inuse = iocg->inuse;
2478 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2480 new_inuse = new_inuse + adj_step;
2481 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2482 current_hweight(iocg, NULL, &hwi);
2483 cost = abs_cost_to_cost(abs_cost, hwi);
2484 } while (time_after64(vtime + cost, now->vnow) &&
2485 iocg->inuse != iocg->active);
2487 spin_unlock_irq(&ioc->lock);
2489 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2490 old_inuse, iocg->inuse, old_hwi, hwi);
2495 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2496 bool is_merge, u64 *costp)
2498 struct ioc *ioc = iocg->ioc;
2499 u64 coef_seqio, coef_randio, coef_page;
2500 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2504 switch (bio_op(bio)) {
2506 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2507 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2508 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2511 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2512 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2513 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2520 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2521 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2525 if (seek_pages > LCOEF_RANDIO_PAGES) {
2526 cost += coef_randio;
2531 cost += pages * coef_page;
2536 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2540 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2544 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2547 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2549 switch (req_op(rq)) {
2551 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2554 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2561 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2565 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2569 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2571 struct blkcg_gq *blkg = bio->bi_blkg;
2572 struct ioc *ioc = rqos_to_ioc(rqos);
2573 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2575 struct iocg_wait wait;
2576 u64 abs_cost, cost, vtime;
2577 bool use_debt, ioc_locked;
2578 unsigned long flags;
2580 /* bypass IOs if disabled, still initializing, or for root cgroup */
2581 if (!ioc->enabled || !iocg || !iocg->level)
2584 /* calculate the absolute vtime cost */
2585 abs_cost = calc_vtime_cost(bio, iocg, false);
2589 if (!iocg_activate(iocg, &now))
2592 iocg->cursor = bio_end_sector(bio);
2593 vtime = atomic64_read(&iocg->vtime);
2594 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2597 * If no one's waiting and within budget, issue right away. The
2598 * tests are racy but the races aren't systemic - we only miss once
2599 * in a while which is fine.
2601 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2602 time_before_eq64(vtime + cost, now.vnow)) {
2603 iocg_commit_bio(iocg, bio, abs_cost, cost);
2608 * We're over budget. This can be handled in two ways. IOs which may
2609 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2610 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2611 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2612 * whether debt handling is needed and acquire locks accordingly.
2614 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2615 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2617 iocg_lock(iocg, ioc_locked, &flags);
2620 * @iocg must stay activated for debt and waitq handling. Deactivation
2621 * is synchronized against both ioc->lock and waitq.lock and we won't
2622 * get deactivated as long as we're waiting or has debt, so we're good
2623 * if we're activated here. In the unlikely cases that we aren't, just
2626 if (unlikely(list_empty(&iocg->active_list))) {
2627 iocg_unlock(iocg, ioc_locked, &flags);
2628 iocg_commit_bio(iocg, bio, abs_cost, cost);
2633 * We're over budget. If @bio has to be issued regardless, remember
2634 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2635 * off the debt before waking more IOs.
2637 * This way, the debt is continuously paid off each period with the
2638 * actual budget available to the cgroup. If we just wound vtime, we
2639 * would incorrectly use the current hw_inuse for the entire amount
2640 * which, for example, can lead to the cgroup staying blocked for a
2641 * long time even with substantially raised hw_inuse.
2643 * An iocg with vdebt should stay online so that the timer can keep
2644 * deducting its vdebt and [de]activate use_delay mechanism
2645 * accordingly. We don't want to race against the timer trying to
2646 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2647 * penalizing the cgroup and its descendants.
2650 iocg_incur_debt(iocg, abs_cost, &now);
2651 if (iocg_kick_delay(iocg, &now))
2652 blkcg_schedule_throttle(rqos->disk,
2653 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2654 iocg_unlock(iocg, ioc_locked, &flags);
2658 /* guarantee that iocgs w/ waiters have maximum inuse */
2659 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2661 iocg_unlock(iocg, false, &flags);
2665 propagate_weights(iocg, iocg->active, iocg->active, true,
2670 * Append self to the waitq and schedule the wakeup timer if we're
2671 * the first waiter. The timer duration is calculated based on the
2672 * current vrate. vtime and hweight changes can make it too short
2673 * or too long. Each wait entry records the absolute cost it's
2674 * waiting for to allow re-evaluation using a custom wait entry.
2676 * If too short, the timer simply reschedules itself. If too long,
2677 * the period timer will notice and trigger wakeups.
2679 * All waiters are on iocg->waitq and the wait states are
2680 * synchronized using waitq.lock.
2682 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2683 wait.wait.private = current;
2685 wait.abs_cost = abs_cost;
2686 wait.committed = false; /* will be set true by waker */
2688 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2689 iocg_kick_waitq(iocg, ioc_locked, &now);
2691 iocg_unlock(iocg, ioc_locked, &flags);
2694 set_current_state(TASK_UNINTERRUPTIBLE);
2700 /* waker already committed us, proceed */
2701 finish_wait(&iocg->waitq, &wait.wait);
2704 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2707 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2708 struct ioc *ioc = rqos_to_ioc(rqos);
2709 sector_t bio_end = bio_end_sector(bio);
2711 u64 vtime, abs_cost, cost;
2712 unsigned long flags;
2714 /* bypass if disabled, still initializing, or for root cgroup */
2715 if (!ioc->enabled || !iocg || !iocg->level)
2718 abs_cost = calc_vtime_cost(bio, iocg, true);
2724 vtime = atomic64_read(&iocg->vtime);
2725 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2727 /* update cursor if backmerging into the request at the cursor */
2728 if (blk_rq_pos(rq) < bio_end &&
2729 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2730 iocg->cursor = bio_end;
2733 * Charge if there's enough vtime budget and the existing request has
2736 if (rq->bio && rq->bio->bi_iocost_cost &&
2737 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2738 iocg_commit_bio(iocg, bio, abs_cost, cost);
2743 * Otherwise, account it as debt if @iocg is online, which it should
2744 * be for the vast majority of cases. See debt handling in
2745 * ioc_rqos_throttle() for details.
2747 spin_lock_irqsave(&ioc->lock, flags);
2748 spin_lock(&iocg->waitq.lock);
2750 if (likely(!list_empty(&iocg->active_list))) {
2751 iocg_incur_debt(iocg, abs_cost, &now);
2752 if (iocg_kick_delay(iocg, &now))
2753 blkcg_schedule_throttle(rqos->disk,
2754 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2756 iocg_commit_bio(iocg, bio, abs_cost, cost);
2759 spin_unlock(&iocg->waitq.lock);
2760 spin_unlock_irqrestore(&ioc->lock, flags);
2763 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2765 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2767 if (iocg && bio->bi_iocost_cost)
2768 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2771 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2773 struct ioc *ioc = rqos_to_ioc(rqos);
2774 struct ioc_pcpu_stat *ccs;
2775 u64 on_q_ns, rq_wait_ns, size_nsec;
2778 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2781 switch (req_op(rq)) {
2794 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2795 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2796 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2798 ccs = get_cpu_ptr(ioc->pcpu_stat);
2800 if (on_q_ns <= size_nsec ||
2801 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2802 local_inc(&ccs->missed[rw].nr_met);
2804 local_inc(&ccs->missed[rw].nr_missed);
2806 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2811 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2813 struct ioc *ioc = rqos_to_ioc(rqos);
2815 spin_lock_irq(&ioc->lock);
2816 ioc_refresh_params(ioc, false);
2817 spin_unlock_irq(&ioc->lock);
2820 static void ioc_rqos_exit(struct rq_qos *rqos)
2822 struct ioc *ioc = rqos_to_ioc(rqos);
2824 blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iocost);
2826 spin_lock_irq(&ioc->lock);
2827 ioc->running = IOC_STOP;
2828 spin_unlock_irq(&ioc->lock);
2830 timer_shutdown_sync(&ioc->timer);
2831 free_percpu(ioc->pcpu_stat);
2835 static const struct rq_qos_ops ioc_rqos_ops = {
2836 .throttle = ioc_rqos_throttle,
2837 .merge = ioc_rqos_merge,
2838 .done_bio = ioc_rqos_done_bio,
2839 .done = ioc_rqos_done,
2840 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2841 .exit = ioc_rqos_exit,
2844 static int blk_iocost_init(struct gendisk *disk)
2849 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2853 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2854 if (!ioc->pcpu_stat) {
2859 for_each_possible_cpu(cpu) {
2860 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2862 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2863 local_set(&ccs->missed[i].nr_met, 0);
2864 local_set(&ccs->missed[i].nr_missed, 0);
2866 local64_set(&ccs->rq_wait_ns, 0);
2869 spin_lock_init(&ioc->lock);
2870 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2871 INIT_LIST_HEAD(&ioc->active_iocgs);
2873 ioc->running = IOC_IDLE;
2874 ioc->vtime_base_rate = VTIME_PER_USEC;
2875 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2876 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2877 ioc->period_at = ktime_to_us(ktime_get());
2878 atomic64_set(&ioc->cur_period, 0);
2879 atomic_set(&ioc->hweight_gen, 0);
2881 spin_lock_irq(&ioc->lock);
2882 ioc->autop_idx = AUTOP_INVALID;
2883 ioc_refresh_params(ioc, true);
2884 spin_unlock_irq(&ioc->lock);
2887 * rqos must be added before activation to allow ioc_pd_init() to
2888 * lookup the ioc from q. This means that the rqos methods may get
2889 * called before policy activation completion, can't assume that the
2890 * target bio has an iocg associated and need to test for NULL iocg.
2892 ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops);
2896 ret = blkcg_activate_policy(disk, &blkcg_policy_iocost);
2902 rq_qos_del(&ioc->rqos);
2904 free_percpu(ioc->pcpu_stat);
2909 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2911 struct ioc_cgrp *iocc;
2913 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2917 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2921 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2923 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2926 static struct blkg_policy_data *ioc_pd_alloc(struct gendisk *disk,
2927 struct blkcg *blkcg, gfp_t gfp)
2929 int levels = blkcg->css.cgroup->level + 1;
2930 struct ioc_gq *iocg;
2932 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp,
2937 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2938 if (!iocg->pcpu_stat) {
2946 static void ioc_pd_init(struct blkg_policy_data *pd)
2948 struct ioc_gq *iocg = pd_to_iocg(pd);
2949 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2950 struct ioc *ioc = q_to_ioc(blkg->q);
2952 struct blkcg_gq *tblkg;
2953 unsigned long flags;
2958 atomic64_set(&iocg->vtime, now.vnow);
2959 atomic64_set(&iocg->done_vtime, now.vnow);
2960 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2961 INIT_LIST_HEAD(&iocg->active_list);
2962 INIT_LIST_HEAD(&iocg->walk_list);
2963 INIT_LIST_HEAD(&iocg->surplus_list);
2964 iocg->hweight_active = WEIGHT_ONE;
2965 iocg->hweight_inuse = WEIGHT_ONE;
2967 init_waitqueue_head(&iocg->waitq);
2968 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2969 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2971 iocg->level = blkg->blkcg->css.cgroup->level;
2973 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2974 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2975 iocg->ancestors[tiocg->level] = tiocg;
2978 spin_lock_irqsave(&ioc->lock, flags);
2979 weight_updated(iocg, &now);
2980 spin_unlock_irqrestore(&ioc->lock, flags);
2983 static void ioc_pd_free(struct blkg_policy_data *pd)
2985 struct ioc_gq *iocg = pd_to_iocg(pd);
2986 struct ioc *ioc = iocg->ioc;
2987 unsigned long flags;
2990 spin_lock_irqsave(&ioc->lock, flags);
2992 if (!list_empty(&iocg->active_list)) {
2996 propagate_weights(iocg, 0, 0, false, &now);
2997 list_del_init(&iocg->active_list);
3000 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3001 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3003 spin_unlock_irqrestore(&ioc->lock, flags);
3005 hrtimer_cancel(&iocg->waitq_timer);
3007 free_percpu(iocg->pcpu_stat);
3011 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3013 struct ioc_gq *iocg = pd_to_iocg(pd);
3014 struct ioc *ioc = iocg->ioc;
3019 if (iocg->level == 0) {
3020 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3021 ioc->vtime_base_rate * 10000,
3023 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3026 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3028 if (blkcg_debug_stats)
3029 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3030 iocg->last_stat.wait_us,
3031 iocg->last_stat.indebt_us,
3032 iocg->last_stat.indelay_us);
3035 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3038 const char *dname = blkg_dev_name(pd->blkg);
3039 struct ioc_gq *iocg = pd_to_iocg(pd);
3041 if (dname && iocg->cfg_weight)
3042 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3047 static int ioc_weight_show(struct seq_file *sf, void *v)
3049 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3050 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3052 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3053 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3054 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3058 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3059 size_t nbytes, loff_t off)
3061 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3062 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3063 struct blkg_conf_ctx ctx;
3065 struct ioc_gq *iocg;
3069 if (!strchr(buf, ':')) {
3070 struct blkcg_gq *blkg;
3072 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3075 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3078 spin_lock_irq(&blkcg->lock);
3079 iocc->dfl_weight = v * WEIGHT_ONE;
3080 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3081 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3084 spin_lock(&iocg->ioc->lock);
3085 ioc_now(iocg->ioc, &now);
3086 weight_updated(iocg, &now);
3087 spin_unlock(&iocg->ioc->lock);
3090 spin_unlock_irq(&blkcg->lock);
3095 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3099 iocg = blkg_to_iocg(ctx.blkg);
3101 if (!strncmp(ctx.body, "default", 7)) {
3104 if (!sscanf(ctx.body, "%u", &v))
3106 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3110 spin_lock(&iocg->ioc->lock);
3111 iocg->cfg_weight = v * WEIGHT_ONE;
3112 ioc_now(iocg->ioc, &now);
3113 weight_updated(iocg, &now);
3114 spin_unlock(&iocg->ioc->lock);
3116 blkg_conf_finish(&ctx);
3120 blkg_conf_finish(&ctx);
3124 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3127 const char *dname = blkg_dev_name(pd->blkg);
3128 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3133 spin_lock_irq(&ioc->lock);
3134 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3135 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3136 ioc->params.qos[QOS_RPPM] / 10000,
3137 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3138 ioc->params.qos[QOS_RLAT],
3139 ioc->params.qos[QOS_WPPM] / 10000,
3140 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3141 ioc->params.qos[QOS_WLAT],
3142 ioc->params.qos[QOS_MIN] / 10000,
3143 ioc->params.qos[QOS_MIN] % 10000 / 100,
3144 ioc->params.qos[QOS_MAX] / 10000,
3145 ioc->params.qos[QOS_MAX] % 10000 / 100);
3146 spin_unlock_irq(&ioc->lock);
3150 static int ioc_qos_show(struct seq_file *sf, void *v)
3152 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3154 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3155 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3159 static const match_table_t qos_ctrl_tokens = {
3160 { QOS_ENABLE, "enable=%u" },
3161 { QOS_CTRL, "ctrl=%s" },
3162 { NR_QOS_CTRL_PARAMS, NULL },
3165 static const match_table_t qos_tokens = {
3166 { QOS_RPPM, "rpct=%s" },
3167 { QOS_RLAT, "rlat=%u" },
3168 { QOS_WPPM, "wpct=%s" },
3169 { QOS_WLAT, "wlat=%u" },
3170 { QOS_MIN, "min=%s" },
3171 { QOS_MAX, "max=%s" },
3172 { NR_QOS_PARAMS, NULL },
3175 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3176 size_t nbytes, loff_t off)
3178 struct block_device *bdev;
3179 struct gendisk *disk;
3181 u32 qos[NR_QOS_PARAMS];
3186 bdev = blkcg_conf_open_bdev(&input);
3188 return PTR_ERR(bdev);
3190 disk = bdev->bd_disk;
3191 if (!queue_is_mq(disk->queue)) {
3196 ioc = q_to_ioc(disk->queue);
3198 ret = blk_iocost_init(disk);
3201 ioc = q_to_ioc(disk->queue);
3204 blk_mq_freeze_queue(disk->queue);
3205 blk_mq_quiesce_queue(disk->queue);
3207 spin_lock_irq(&ioc->lock);
3208 memcpy(qos, ioc->params.qos, sizeof(qos));
3209 enable = ioc->enabled;
3210 user = ioc->user_qos_params;
3212 while ((p = strsep(&input, " \t\n"))) {
3213 substring_t args[MAX_OPT_ARGS];
3221 switch (match_token(p, qos_ctrl_tokens, args)) {
3223 if (match_u64(&args[0], &v))
3228 match_strlcpy(buf, &args[0], sizeof(buf));
3229 if (!strcmp(buf, "auto"))
3231 else if (!strcmp(buf, "user"))
3238 tok = match_token(p, qos_tokens, args);
3242 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3245 if (cgroup_parse_float(buf, 2, &v))
3247 if (v < 0 || v > 10000)
3253 if (match_u64(&args[0], &v))
3259 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3262 if (cgroup_parse_float(buf, 2, &v))
3266 qos[tok] = clamp_t(s64, v * 100,
3267 VRATE_MIN_PPM, VRATE_MAX_PPM);
3275 if (qos[QOS_MIN] > qos[QOS_MAX])
3279 blk_stat_enable_accounting(disk->queue);
3280 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3281 ioc->enabled = true;
3282 wbt_disable_default(disk);
3284 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3285 ioc->enabled = false;
3286 wbt_enable_default(disk);
3290 memcpy(ioc->params.qos, qos, sizeof(qos));
3291 ioc->user_qos_params = true;
3293 ioc->user_qos_params = false;
3296 ioc_refresh_params(ioc, true);
3297 spin_unlock_irq(&ioc->lock);
3299 blk_mq_unquiesce_queue(disk->queue);
3300 blk_mq_unfreeze_queue(disk->queue);
3302 blkdev_put_no_open(bdev);
3305 spin_unlock_irq(&ioc->lock);
3307 blk_mq_unquiesce_queue(disk->queue);
3308 blk_mq_unfreeze_queue(disk->queue);
3312 blkdev_put_no_open(bdev);
3316 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3317 struct blkg_policy_data *pd, int off)
3319 const char *dname = blkg_dev_name(pd->blkg);
3320 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3321 u64 *u = ioc->params.i_lcoefs;
3326 spin_lock_irq(&ioc->lock);
3327 seq_printf(sf, "%s ctrl=%s model=linear "
3328 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3329 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3330 dname, ioc->user_cost_model ? "user" : "auto",
3331 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3332 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3333 spin_unlock_irq(&ioc->lock);
3337 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3339 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3341 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3342 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3346 static const match_table_t cost_ctrl_tokens = {
3347 { COST_CTRL, "ctrl=%s" },
3348 { COST_MODEL, "model=%s" },
3349 { NR_COST_CTRL_PARAMS, NULL },
3352 static const match_table_t i_lcoef_tokens = {
3353 { I_LCOEF_RBPS, "rbps=%u" },
3354 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3355 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3356 { I_LCOEF_WBPS, "wbps=%u" },
3357 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3358 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3359 { NR_I_LCOEFS, NULL },
3362 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3363 size_t nbytes, loff_t off)
3365 struct block_device *bdev;
3366 struct request_queue *q;
3373 bdev = blkcg_conf_open_bdev(&input);
3375 return PTR_ERR(bdev);
3377 q = bdev_get_queue(bdev);
3378 if (!queue_is_mq(q)) {
3385 ret = blk_iocost_init(bdev->bd_disk);
3391 blk_mq_freeze_queue(q);
3392 blk_mq_quiesce_queue(q);
3394 spin_lock_irq(&ioc->lock);
3395 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3396 user = ioc->user_cost_model;
3398 while ((p = strsep(&input, " \t\n"))) {
3399 substring_t args[MAX_OPT_ARGS];
3407 switch (match_token(p, cost_ctrl_tokens, args)) {
3409 match_strlcpy(buf, &args[0], sizeof(buf));
3410 if (!strcmp(buf, "auto"))
3412 else if (!strcmp(buf, "user"))
3418 match_strlcpy(buf, &args[0], sizeof(buf));
3419 if (strcmp(buf, "linear"))
3424 tok = match_token(p, i_lcoef_tokens, args);
3425 if (tok == NR_I_LCOEFS)
3427 if (match_u64(&args[0], &v))
3434 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3435 ioc->user_cost_model = true;
3437 ioc->user_cost_model = false;
3439 ioc_refresh_params(ioc, true);
3440 spin_unlock_irq(&ioc->lock);
3442 blk_mq_unquiesce_queue(q);
3443 blk_mq_unfreeze_queue(q);
3445 blkdev_put_no_open(bdev);
3449 spin_unlock_irq(&ioc->lock);
3451 blk_mq_unquiesce_queue(q);
3452 blk_mq_unfreeze_queue(q);
3456 blkdev_put_no_open(bdev);
3460 static struct cftype ioc_files[] = {
3463 .flags = CFTYPE_NOT_ON_ROOT,
3464 .seq_show = ioc_weight_show,
3465 .write = ioc_weight_write,
3469 .flags = CFTYPE_ONLY_ON_ROOT,
3470 .seq_show = ioc_qos_show,
3471 .write = ioc_qos_write,
3474 .name = "cost.model",
3475 .flags = CFTYPE_ONLY_ON_ROOT,
3476 .seq_show = ioc_cost_model_show,
3477 .write = ioc_cost_model_write,
3482 static struct blkcg_policy blkcg_policy_iocost = {
3483 .dfl_cftypes = ioc_files,
3484 .cpd_alloc_fn = ioc_cpd_alloc,
3485 .cpd_free_fn = ioc_cpd_free,
3486 .pd_alloc_fn = ioc_pd_alloc,
3487 .pd_init_fn = ioc_pd_init,
3488 .pd_free_fn = ioc_pd_free,
3489 .pd_stat_fn = ioc_pd_stat,
3492 static int __init ioc_init(void)
3494 return blkcg_policy_register(&blkcg_policy_iocost);
3497 static void __exit ioc_exit(void)
3499 blkcg_policy_unregister(&blkcg_policy_iocost);
3502 module_init(ioc_init);
3503 module_exit(ioc_exit);