1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, solely depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
262 /* unbusy hysterisis */
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
290 /* halve debts if avg usage over 100ms is under 50% */
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
294 /* don't let cmds which take a very long time pin lagging for too long */
295 MAX_LAGGING_PERIODS = 10,
297 /* switch iff the conditions are met for longer than this */
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
301 * Count IO size in 4k pages. The 12bit shift helps keeping
302 * size-proportional components of cost calculation in closer
303 * numbers of digits to per-IO cost components.
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
309 /* if apart further than 16M, consider randio for linear model */
310 LCOEF_RANDIO_PAGES = 4096,
319 /* io.cost.qos controls including per-dev enable of the whole controller */
326 /* io.cost.qos params */
337 /* io.cost.model controls */
344 /* builtin linear cost model coefficients */
374 u32 qos[NR_QOS_PARAMS];
375 u64 i_lcoefs[NR_I_LCOEFS];
376 u64 lcoefs[NR_LCOEFS];
377 u32 too_fast_vrate_pct;
378 u32 too_slow_vrate_pct;
394 struct ioc_pcpu_stat {
395 struct ioc_missed missed[2];
397 local64_t rq_wait_ns;
407 struct ioc_params params;
408 struct ioc_margins margins;
415 struct timer_list timer;
416 struct list_head active_iocgs; /* active cgroups */
417 struct ioc_pcpu_stat __percpu *pcpu_stat;
419 enum ioc_running running;
420 atomic64_t vtime_rate;
424 seqcount_spinlock_t period_seqcount;
425 u64 period_at; /* wallclock starttime */
426 u64 period_at_vtime; /* vtime starttime */
428 atomic64_t cur_period; /* inc'd each period */
429 int busy_level; /* saturation history */
431 bool weights_updated;
432 atomic_t hweight_gen; /* for lazy hweights */
434 /* debt forgivness */
437 u64 dfgv_usage_us_sum;
439 u64 autop_too_fast_at;
440 u64 autop_too_slow_at;
442 bool user_qos_params:1;
443 bool user_cost_model:1;
446 struct iocg_pcpu_stat {
447 local64_t abs_vusage;
457 /* per device-cgroup pair */
459 struct blkg_policy_data pd;
463 * A iocg can get its weight from two sources - an explicit
464 * per-device-cgroup configuration or the default weight of the
465 * cgroup. `cfg_weight` is the explicit per-device-cgroup
466 * configuration. `weight` is the effective considering both
469 * When an idle cgroup becomes active its `active` goes from 0 to
470 * `weight`. `inuse` is the surplus adjusted active weight.
471 * `active` and `inuse` are used to calculate `hweight_active` and
474 * `last_inuse` remembers `inuse` while an iocg is idle to persist
475 * surplus adjustments.
477 * `inuse` may be adjusted dynamically during period. `saved_*` are used
478 * to determine and track adjustments.
488 sector_t cursor; /* to detect randio */
491 * `vtime` is this iocg's vtime cursor which progresses as IOs are
492 * issued. If lagging behind device vtime, the delta represents
493 * the currently available IO budget. If running ahead, the
496 * `vtime_done` is the same but progressed on completion rather
497 * than issue. The delta behind `vtime` represents the cost of
498 * currently in-flight IOs.
501 atomic64_t done_vtime;
504 /* current delay in effect and when it started */
509 * The period this iocg was last active in. Used for deactivation
510 * and invalidating `vtime`.
512 atomic64_t active_period;
513 struct list_head active_list;
515 /* see __propagate_weights() and current_hweight() for details */
516 u64 child_active_sum;
518 u64 child_adjusted_sum;
522 u32 hweight_donating;
523 u32 hweight_after_donation;
525 struct list_head walk_list;
526 struct list_head surplus_list;
528 struct wait_queue_head waitq;
529 struct hrtimer waitq_timer;
531 /* timestamp at the latest activation */
535 struct iocg_pcpu_stat __percpu *pcpu_stat;
536 struct iocg_stat stat;
537 struct iocg_stat last_stat;
538 u64 last_stat_abs_vusage;
544 /* this iocg's depth in the hierarchy and ancestors including self */
546 struct ioc_gq *ancestors[];
551 struct blkcg_policy_data cpd;
552 unsigned int dfl_weight;
562 struct wait_queue_entry wait;
568 struct iocg_wake_ctx {
574 static const struct ioc_params autop[] = {
577 [QOS_RLAT] = 250000, /* 250ms */
579 [QOS_MIN] = VRATE_MIN_PPM,
580 [QOS_MAX] = VRATE_MAX_PPM,
583 [I_LCOEF_RBPS] = 174019176,
584 [I_LCOEF_RSEQIOPS] = 41708,
585 [I_LCOEF_RRANDIOPS] = 370,
586 [I_LCOEF_WBPS] = 178075866,
587 [I_LCOEF_WSEQIOPS] = 42705,
588 [I_LCOEF_WRANDIOPS] = 378,
593 [QOS_RLAT] = 25000, /* 25ms */
595 [QOS_MIN] = VRATE_MIN_PPM,
596 [QOS_MAX] = VRATE_MAX_PPM,
599 [I_LCOEF_RBPS] = 245855193,
600 [I_LCOEF_RSEQIOPS] = 61575,
601 [I_LCOEF_RRANDIOPS] = 6946,
602 [I_LCOEF_WBPS] = 141365009,
603 [I_LCOEF_WSEQIOPS] = 33716,
604 [I_LCOEF_WRANDIOPS] = 26796,
609 [QOS_RLAT] = 25000, /* 25ms */
611 [QOS_MIN] = VRATE_MIN_PPM,
612 [QOS_MAX] = VRATE_MAX_PPM,
615 [I_LCOEF_RBPS] = 488636629,
616 [I_LCOEF_RSEQIOPS] = 8932,
617 [I_LCOEF_RRANDIOPS] = 8518,
618 [I_LCOEF_WBPS] = 427891549,
619 [I_LCOEF_WSEQIOPS] = 28755,
620 [I_LCOEF_WRANDIOPS] = 21940,
622 .too_fast_vrate_pct = 500,
626 [QOS_RLAT] = 5000, /* 5ms */
628 [QOS_MIN] = VRATE_MIN_PPM,
629 [QOS_MAX] = VRATE_MAX_PPM,
632 [I_LCOEF_RBPS] = 3102524156LLU,
633 [I_LCOEF_RSEQIOPS] = 724816,
634 [I_LCOEF_RRANDIOPS] = 778122,
635 [I_LCOEF_WBPS] = 1742780862LLU,
636 [I_LCOEF_WSEQIOPS] = 425702,
637 [I_LCOEF_WRANDIOPS] = 443193,
639 .too_slow_vrate_pct = 10,
644 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
645 * vtime credit shortage and down on device saturation.
647 static u32 vrate_adj_pct[] =
649 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
650 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
651 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
653 static struct blkcg_policy blkcg_policy_iocost;
655 /* accessors and helpers */
656 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
658 return container_of(rqos, struct ioc, rqos);
661 static struct ioc *q_to_ioc(struct request_queue *q)
663 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
666 static const char __maybe_unused *ioc_name(struct ioc *ioc)
668 struct gendisk *disk = ioc->rqos.q->disk;
672 return disk->disk_name;
675 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
677 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
680 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
682 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
685 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
687 return pd_to_blkg(&iocg->pd);
690 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
692 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
693 struct ioc_cgrp, cpd);
697 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
698 * weight, the more expensive each IO. Must round up.
700 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
702 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
706 * The inverse of abs_cost_to_cost(). Must round up.
708 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
710 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
713 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
714 u64 abs_cost, u64 cost)
716 struct iocg_pcpu_stat *gcs;
718 bio->bi_iocost_cost = cost;
719 atomic64_add(cost, &iocg->vtime);
721 gcs = get_cpu_ptr(iocg->pcpu_stat);
722 local64_add(abs_cost, &gcs->abs_vusage);
726 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
729 spin_lock_irqsave(&iocg->ioc->lock, *flags);
730 spin_lock(&iocg->waitq.lock);
732 spin_lock_irqsave(&iocg->waitq.lock, *flags);
736 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
739 spin_unlock(&iocg->waitq.lock);
740 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
742 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
746 #define CREATE_TRACE_POINTS
747 #include <trace/events/iocost.h>
749 static void ioc_refresh_margins(struct ioc *ioc)
751 struct ioc_margins *margins = &ioc->margins;
752 u32 period_us = ioc->period_us;
753 u64 vrate = ioc->vtime_base_rate;
755 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
756 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
757 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
760 /* latency Qos params changed, update period_us and all the dependent params */
761 static void ioc_refresh_period_us(struct ioc *ioc)
763 u32 ppm, lat, multi, period_us;
765 lockdep_assert_held(&ioc->lock);
767 /* pick the higher latency target */
768 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
769 ppm = ioc->params.qos[QOS_RPPM];
770 lat = ioc->params.qos[QOS_RLAT];
772 ppm = ioc->params.qos[QOS_WPPM];
773 lat = ioc->params.qos[QOS_WLAT];
777 * We want the period to be long enough to contain a healthy number
778 * of IOs while short enough for granular control. Define it as a
779 * multiple of the latency target. Ideally, the multiplier should
780 * be scaled according to the percentile so that it would nominally
781 * contain a certain number of requests. Let's be simpler and
782 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
785 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
788 period_us = multi * lat;
789 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
791 /* calculate dependent params */
792 ioc->period_us = period_us;
793 ioc->timer_slack_ns = div64_u64(
794 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
796 ioc_refresh_margins(ioc);
799 static int ioc_autop_idx(struct ioc *ioc)
801 int idx = ioc->autop_idx;
802 const struct ioc_params *p = &autop[idx];
807 if (!blk_queue_nonrot(ioc->rqos.q))
810 /* handle SATA SSDs w/ broken NCQ */
811 if (blk_queue_depth(ioc->rqos.q) == 1)
812 return AUTOP_SSD_QD1;
814 /* use one of the normal ssd sets */
815 if (idx < AUTOP_SSD_DFL)
816 return AUTOP_SSD_DFL;
818 /* if user is overriding anything, maintain what was there */
819 if (ioc->user_qos_params || ioc->user_cost_model)
822 /* step up/down based on the vrate */
823 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
824 now_ns = ktime_get_ns();
826 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
827 if (!ioc->autop_too_fast_at)
828 ioc->autop_too_fast_at = now_ns;
829 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
832 ioc->autop_too_fast_at = 0;
835 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
836 if (!ioc->autop_too_slow_at)
837 ioc->autop_too_slow_at = now_ns;
838 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
841 ioc->autop_too_slow_at = 0;
848 * Take the followings as input
850 * @bps maximum sequential throughput
851 * @seqiops maximum sequential 4k iops
852 * @randiops maximum random 4k iops
854 * and calculate the linear model cost coefficients.
856 * *@page per-page cost 1s / (@bps / 4096)
857 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
858 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
860 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
861 u64 *page, u64 *seqio, u64 *randio)
865 *page = *seqio = *randio = 0;
868 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
869 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
872 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
878 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
884 static void ioc_refresh_lcoefs(struct ioc *ioc)
886 u64 *u = ioc->params.i_lcoefs;
887 u64 *c = ioc->params.lcoefs;
889 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
890 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
891 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
892 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
895 static bool ioc_refresh_params(struct ioc *ioc, bool force)
897 const struct ioc_params *p;
900 lockdep_assert_held(&ioc->lock);
902 idx = ioc_autop_idx(ioc);
905 if (idx == ioc->autop_idx && !force)
908 if (idx != ioc->autop_idx) {
909 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
910 ioc->vtime_base_rate = VTIME_PER_USEC;
913 ioc->autop_idx = idx;
914 ioc->autop_too_fast_at = 0;
915 ioc->autop_too_slow_at = 0;
917 if (!ioc->user_qos_params)
918 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
919 if (!ioc->user_cost_model)
920 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
922 ioc_refresh_period_us(ioc);
923 ioc_refresh_lcoefs(ioc);
925 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
926 VTIME_PER_USEC, MILLION);
927 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
928 VTIME_PER_USEC, MILLION);
934 * When an iocg accumulates too much vtime or gets deactivated, we throw away
935 * some vtime, which lowers the overall device utilization. As the exact amount
936 * which is being thrown away is known, we can compensate by accelerating the
937 * vrate accordingly so that the extra vtime generated in the current period
938 * matches what got lost.
940 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
942 s64 pleft = ioc->period_at + ioc->period_us - now->now;
943 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
944 s64 vcomp, vcomp_min, vcomp_max;
946 lockdep_assert_held(&ioc->lock);
948 /* we need some time left in this period */
953 * Calculate how much vrate should be adjusted to offset the error.
954 * Limit the amount of adjustment and deduct the adjusted amount from
957 vcomp = -div64_s64(ioc->vtime_err, pleft);
958 vcomp_min = -(ioc->vtime_base_rate >> 1);
959 vcomp_max = ioc->vtime_base_rate;
960 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
962 ioc->vtime_err += vcomp * pleft;
964 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
966 /* bound how much error can accumulate */
967 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
970 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
971 int nr_lagging, int nr_shortages,
972 int prev_busy_level, u32 *missed_ppm)
974 u64 vrate = ioc->vtime_base_rate;
975 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
977 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
978 if (ioc->busy_level != prev_busy_level || nr_lagging)
979 trace_iocost_ioc_vrate_adj(ioc, vrate,
980 missed_ppm, rq_wait_pct,
981 nr_lagging, nr_shortages);
987 * If vrate is out of bounds, apply clamp gradually as the
988 * bounds can change abruptly. Otherwise, apply busy_level
991 if (vrate < vrate_min) {
992 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
993 vrate = min(vrate, vrate_min);
994 } else if (vrate > vrate_max) {
995 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
996 vrate = max(vrate, vrate_max);
998 int idx = min_t(int, abs(ioc->busy_level),
999 ARRAY_SIZE(vrate_adj_pct) - 1);
1000 u32 adj_pct = vrate_adj_pct[idx];
1002 if (ioc->busy_level > 0)
1003 adj_pct = 100 - adj_pct;
1005 adj_pct = 100 + adj_pct;
1007 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1008 vrate_min, vrate_max);
1011 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1012 nr_lagging, nr_shortages);
1014 ioc->vtime_base_rate = vrate;
1015 ioc_refresh_margins(ioc);
1018 /* take a snapshot of the current [v]time and vrate */
1019 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1024 now->now_ns = ktime_get();
1025 now->now = ktime_to_us(now->now_ns);
1026 vrate = atomic64_read(&ioc->vtime_rate);
1029 * The current vtime is
1031 * vtime at period start + (wallclock time since the start) * vrate
1033 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1034 * needed, they're seqcount protected.
1037 seq = read_seqcount_begin(&ioc->period_seqcount);
1038 now->vnow = ioc->period_at_vtime +
1039 (now->now - ioc->period_at) * vrate;
1040 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1043 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1045 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1047 write_seqcount_begin(&ioc->period_seqcount);
1048 ioc->period_at = now->now;
1049 ioc->period_at_vtime = now->vnow;
1050 write_seqcount_end(&ioc->period_seqcount);
1052 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1053 add_timer(&ioc->timer);
1057 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1058 * weight sums and propagate upwards accordingly. If @save, the current margin
1059 * is saved to be used as reference for later inuse in-period adjustments.
1061 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1062 bool save, struct ioc_now *now)
1064 struct ioc *ioc = iocg->ioc;
1067 lockdep_assert_held(&ioc->lock);
1070 * For an active leaf node, its inuse shouldn't be zero or exceed
1071 * @active. An active internal node's inuse is solely determined by the
1072 * inuse to active ratio of its children regardless of @inuse.
1074 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1075 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1076 iocg->child_active_sum);
1078 inuse = clamp_t(u32, inuse, 1, active);
1081 iocg->last_inuse = iocg->inuse;
1083 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1085 if (active == iocg->active && inuse == iocg->inuse)
1088 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1089 struct ioc_gq *parent = iocg->ancestors[lvl];
1090 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1091 u32 parent_active = 0, parent_inuse = 0;
1093 /* update the level sums */
1094 parent->child_active_sum += (s32)(active - child->active);
1095 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1096 /* apply the updates */
1097 child->active = active;
1098 child->inuse = inuse;
1101 * The delta between inuse and active sums indicates that
1102 * much of weight is being given away. Parent's inuse
1103 * and active should reflect the ratio.
1105 if (parent->child_active_sum) {
1106 parent_active = parent->weight;
1107 parent_inuse = DIV64_U64_ROUND_UP(
1108 parent_active * parent->child_inuse_sum,
1109 parent->child_active_sum);
1112 /* do we need to keep walking up? */
1113 if (parent_active == parent->active &&
1114 parent_inuse == parent->inuse)
1117 active = parent_active;
1118 inuse = parent_inuse;
1121 ioc->weights_updated = true;
1124 static void commit_weights(struct ioc *ioc)
1126 lockdep_assert_held(&ioc->lock);
1128 if (ioc->weights_updated) {
1129 /* paired with rmb in current_hweight(), see there */
1131 atomic_inc(&ioc->hweight_gen);
1132 ioc->weights_updated = false;
1136 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1137 bool save, struct ioc_now *now)
1139 __propagate_weights(iocg, active, inuse, save, now);
1140 commit_weights(iocg->ioc);
1143 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1145 struct ioc *ioc = iocg->ioc;
1150 /* hot path - if uptodate, use cached */
1151 ioc_gen = atomic_read(&ioc->hweight_gen);
1152 if (ioc_gen == iocg->hweight_gen)
1156 * Paired with wmb in commit_weights(). If we saw the updated
1157 * hweight_gen, all the weight updates from __propagate_weights() are
1160 * We can race with weight updates during calculation and get it
1161 * wrong. However, hweight_gen would have changed and a future
1162 * reader will recalculate and we're guaranteed to discard the
1163 * wrong result soon.
1167 hwa = hwi = WEIGHT_ONE;
1168 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1169 struct ioc_gq *parent = iocg->ancestors[lvl];
1170 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1171 u64 active_sum = READ_ONCE(parent->child_active_sum);
1172 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1173 u32 active = READ_ONCE(child->active);
1174 u32 inuse = READ_ONCE(child->inuse);
1176 /* we can race with deactivations and either may read as zero */
1177 if (!active_sum || !inuse_sum)
1180 active_sum = max_t(u64, active, active_sum);
1181 hwa = div64_u64((u64)hwa * active, active_sum);
1183 inuse_sum = max_t(u64, inuse, inuse_sum);
1184 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1187 iocg->hweight_active = max_t(u32, hwa, 1);
1188 iocg->hweight_inuse = max_t(u32, hwi, 1);
1189 iocg->hweight_gen = ioc_gen;
1192 *hw_activep = iocg->hweight_active;
1194 *hw_inusep = iocg->hweight_inuse;
1198 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1199 * other weights stay unchanged.
1201 static u32 current_hweight_max(struct ioc_gq *iocg)
1203 u32 hwm = WEIGHT_ONE;
1204 u32 inuse = iocg->active;
1205 u64 child_inuse_sum;
1208 lockdep_assert_held(&iocg->ioc->lock);
1210 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1211 struct ioc_gq *parent = iocg->ancestors[lvl];
1212 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1214 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1215 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1216 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1217 parent->child_active_sum);
1220 return max_t(u32, hwm, 1);
1223 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1225 struct ioc *ioc = iocg->ioc;
1226 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1227 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1230 lockdep_assert_held(&ioc->lock);
1232 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1233 if (weight != iocg->weight && iocg->active)
1234 propagate_weights(iocg, weight, iocg->inuse, true, now);
1235 iocg->weight = weight;
1238 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1240 struct ioc *ioc = iocg->ioc;
1241 u64 last_period, cur_period;
1246 * If seem to be already active, just update the stamp to tell the
1247 * timer that we're still active. We don't mind occassional races.
1249 if (!list_empty(&iocg->active_list)) {
1251 cur_period = atomic64_read(&ioc->cur_period);
1252 if (atomic64_read(&iocg->active_period) != cur_period)
1253 atomic64_set(&iocg->active_period, cur_period);
1257 /* racy check on internal node IOs, treat as root level IOs */
1258 if (iocg->child_active_sum)
1261 spin_lock_irq(&ioc->lock);
1266 cur_period = atomic64_read(&ioc->cur_period);
1267 last_period = atomic64_read(&iocg->active_period);
1268 atomic64_set(&iocg->active_period, cur_period);
1270 /* already activated or breaking leaf-only constraint? */
1271 if (!list_empty(&iocg->active_list))
1272 goto succeed_unlock;
1273 for (i = iocg->level - 1; i > 0; i--)
1274 if (!list_empty(&iocg->ancestors[i]->active_list))
1277 if (iocg->child_active_sum)
1281 * Always start with the target budget. On deactivation, we throw away
1282 * anything above it.
1284 vtarget = now->vnow - ioc->margins.target;
1285 vtime = atomic64_read(&iocg->vtime);
1287 atomic64_add(vtarget - vtime, &iocg->vtime);
1288 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1292 * Activate, propagate weight and start period timer if not
1293 * running. Reset hweight_gen to avoid accidental match from
1296 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1297 list_add(&iocg->active_list, &ioc->active_iocgs);
1299 propagate_weights(iocg, iocg->weight,
1300 iocg->last_inuse ?: iocg->weight, true, now);
1302 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1303 last_period, cur_period, vtime);
1305 iocg->activated_at = now->now;
1307 if (ioc->running == IOC_IDLE) {
1308 ioc->running = IOC_RUNNING;
1309 ioc->dfgv_period_at = now->now;
1310 ioc->dfgv_period_rem = 0;
1311 ioc_start_period(ioc, now);
1315 spin_unlock_irq(&ioc->lock);
1319 spin_unlock_irq(&ioc->lock);
1323 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1325 struct ioc *ioc = iocg->ioc;
1326 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1327 u64 tdelta, delay, new_delay;
1328 s64 vover, vover_pct;
1331 lockdep_assert_held(&iocg->waitq.lock);
1333 /* calculate the current delay in effect - 1/2 every second */
1334 tdelta = now->now - iocg->delay_at;
1336 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1340 /* calculate the new delay from the debt amount */
1341 current_hweight(iocg, &hwa, NULL);
1342 vover = atomic64_read(&iocg->vtime) +
1343 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1344 vover_pct = div64_s64(100 * vover,
1345 ioc->period_us * ioc->vtime_base_rate);
1347 if (vover_pct <= MIN_DELAY_THR_PCT)
1349 else if (vover_pct >= MAX_DELAY_THR_PCT)
1350 new_delay = MAX_DELAY;
1352 new_delay = MIN_DELAY +
1353 div_u64((MAX_DELAY - MIN_DELAY) *
1354 (vover_pct - MIN_DELAY_THR_PCT),
1355 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1357 /* pick the higher one and apply */
1358 if (new_delay > delay) {
1359 iocg->delay = new_delay;
1360 iocg->delay_at = now->now;
1364 if (delay >= MIN_DELAY) {
1365 if (!iocg->indelay_since)
1366 iocg->indelay_since = now->now;
1367 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1370 if (iocg->indelay_since) {
1371 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1372 iocg->indelay_since = 0;
1375 blkcg_clear_delay(blkg);
1380 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1381 struct ioc_now *now)
1383 struct iocg_pcpu_stat *gcs;
1385 lockdep_assert_held(&iocg->ioc->lock);
1386 lockdep_assert_held(&iocg->waitq.lock);
1387 WARN_ON_ONCE(list_empty(&iocg->active_list));
1390 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1391 * inuse donating all of it share to others until its debt is paid off.
1393 if (!iocg->abs_vdebt && abs_cost) {
1394 iocg->indebt_since = now->now;
1395 propagate_weights(iocg, iocg->active, 0, false, now);
1398 iocg->abs_vdebt += abs_cost;
1400 gcs = get_cpu_ptr(iocg->pcpu_stat);
1401 local64_add(abs_cost, &gcs->abs_vusage);
1405 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1406 struct ioc_now *now)
1408 lockdep_assert_held(&iocg->ioc->lock);
1409 lockdep_assert_held(&iocg->waitq.lock);
1411 /* make sure that nobody messed with @iocg */
1412 WARN_ON_ONCE(list_empty(&iocg->active_list));
1413 WARN_ON_ONCE(iocg->inuse > 1);
1415 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1417 /* if debt is paid in full, restore inuse */
1418 if (!iocg->abs_vdebt) {
1419 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1420 iocg->indebt_since = 0;
1422 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1427 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1428 int flags, void *key)
1430 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1431 struct iocg_wake_ctx *ctx = key;
1432 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1434 ctx->vbudget -= cost;
1436 if (ctx->vbudget < 0)
1439 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1440 wait->committed = true;
1443 * autoremove_wake_function() removes the wait entry only when it
1444 * actually changed the task state. We want the wait always removed.
1445 * Remove explicitly and use default_wake_function(). Note that the
1446 * order of operations is important as finish_wait() tests whether
1447 * @wq_entry is removed without grabbing the lock.
1449 default_wake_function(wq_entry, mode, flags, key);
1450 list_del_init_careful(&wq_entry->entry);
1455 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1456 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1457 * addition to iocg->waitq.lock.
1459 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1460 struct ioc_now *now)
1462 struct ioc *ioc = iocg->ioc;
1463 struct iocg_wake_ctx ctx = { .iocg = iocg };
1464 u64 vshortage, expires, oexpires;
1468 lockdep_assert_held(&iocg->waitq.lock);
1470 current_hweight(iocg, &hwa, NULL);
1471 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1474 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1475 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1476 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1477 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1479 lockdep_assert_held(&ioc->lock);
1481 atomic64_add(vpay, &iocg->vtime);
1482 atomic64_add(vpay, &iocg->done_vtime);
1483 iocg_pay_debt(iocg, abs_vpay, now);
1487 if (iocg->abs_vdebt || iocg->delay)
1488 iocg_kick_delay(iocg, now);
1491 * Debt can still be outstanding if we haven't paid all yet or the
1492 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1493 * under debt. Make sure @vbudget reflects the outstanding amount and is
1496 if (iocg->abs_vdebt) {
1497 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1498 vbudget = min_t(s64, 0, vbudget - vdebt);
1502 * Wake up the ones which are due and see how much vtime we'll need for
1503 * the next one. As paying off debt restores hw_inuse, it must be read
1504 * after the above debt payment.
1506 ctx.vbudget = vbudget;
1507 current_hweight(iocg, NULL, &ctx.hw_inuse);
1509 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1511 if (!waitqueue_active(&iocg->waitq)) {
1512 if (iocg->wait_since) {
1513 iocg->stat.wait_us += now->now - iocg->wait_since;
1514 iocg->wait_since = 0;
1519 if (!iocg->wait_since)
1520 iocg->wait_since = now->now;
1522 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1525 /* determine next wakeup, add a timer margin to guarantee chunking */
1526 vshortage = -ctx.vbudget;
1527 expires = now->now_ns +
1528 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1530 expires += ioc->timer_slack_ns;
1532 /* if already active and close enough, don't bother */
1533 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1534 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1535 abs(oexpires - expires) <= ioc->timer_slack_ns)
1538 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1539 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1542 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1544 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1545 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1547 unsigned long flags;
1549 ioc_now(iocg->ioc, &now);
1551 iocg_lock(iocg, pay_debt, &flags);
1552 iocg_kick_waitq(iocg, pay_debt, &now);
1553 iocg_unlock(iocg, pay_debt, &flags);
1555 return HRTIMER_NORESTART;
1558 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1560 u32 nr_met[2] = { };
1561 u32 nr_missed[2] = { };
1565 for_each_online_cpu(cpu) {
1566 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1567 u64 this_rq_wait_ns;
1569 for (rw = READ; rw <= WRITE; rw++) {
1570 u32 this_met = local_read(&stat->missed[rw].nr_met);
1571 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1573 nr_met[rw] += this_met - stat->missed[rw].last_met;
1574 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1575 stat->missed[rw].last_met = this_met;
1576 stat->missed[rw].last_missed = this_missed;
1579 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1580 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1581 stat->last_rq_wait_ns = this_rq_wait_ns;
1584 for (rw = READ; rw <= WRITE; rw++) {
1585 if (nr_met[rw] + nr_missed[rw])
1587 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1588 nr_met[rw] + nr_missed[rw]);
1590 missed_ppm_ar[rw] = 0;
1593 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1594 ioc->period_us * NSEC_PER_USEC);
1597 /* was iocg idle this period? */
1598 static bool iocg_is_idle(struct ioc_gq *iocg)
1600 struct ioc *ioc = iocg->ioc;
1602 /* did something get issued this period? */
1603 if (atomic64_read(&iocg->active_period) ==
1604 atomic64_read(&ioc->cur_period))
1607 /* is something in flight? */
1608 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1615 * Call this function on the target leaf @iocg's to build pre-order traversal
1616 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1617 * ->walk_list and the caller is responsible for dissolving the list after use.
1619 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1620 struct list_head *inner_walk)
1624 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1626 /* find the first ancestor which hasn't been visited yet */
1627 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1628 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1632 /* walk down and visit the inner nodes to get pre-order traversal */
1633 while (++lvl <= iocg->level - 1) {
1634 struct ioc_gq *inner = iocg->ancestors[lvl];
1636 /* record traversal order */
1637 list_add_tail(&inner->walk_list, inner_walk);
1641 /* propagate the deltas to the parent */
1642 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1644 if (iocg->level > 0) {
1645 struct iocg_stat *parent_stat =
1646 &iocg->ancestors[iocg->level - 1]->stat;
1648 parent_stat->usage_us +=
1649 iocg->stat.usage_us - iocg->last_stat.usage_us;
1650 parent_stat->wait_us +=
1651 iocg->stat.wait_us - iocg->last_stat.wait_us;
1652 parent_stat->indebt_us +=
1653 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1654 parent_stat->indelay_us +=
1655 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1658 iocg->last_stat = iocg->stat;
1661 /* collect per-cpu counters and propagate the deltas to the parent */
1662 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1664 struct ioc *ioc = iocg->ioc;
1669 lockdep_assert_held(&iocg->ioc->lock);
1671 /* collect per-cpu counters */
1672 for_each_possible_cpu(cpu) {
1673 abs_vusage += local64_read(
1674 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1676 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1677 iocg->last_stat_abs_vusage = abs_vusage;
1679 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1680 iocg->stat.usage_us += iocg->usage_delta_us;
1682 iocg_flush_stat_upward(iocg);
1685 /* get stat counters ready for reading on all active iocgs */
1686 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1688 LIST_HEAD(inner_walk);
1689 struct ioc_gq *iocg, *tiocg;
1691 /* flush leaves and build inner node walk list */
1692 list_for_each_entry(iocg, target_iocgs, active_list) {
1693 iocg_flush_stat_leaf(iocg, now);
1694 iocg_build_inner_walk(iocg, &inner_walk);
1697 /* keep flushing upwards by walking the inner list backwards */
1698 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1699 iocg_flush_stat_upward(iocg);
1700 list_del_init(&iocg->walk_list);
1705 * Determine what @iocg's hweight_inuse should be after donating unused
1706 * capacity. @hwm is the upper bound and used to signal no donation. This
1707 * function also throws away @iocg's excess budget.
1709 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1710 u32 usage, struct ioc_now *now)
1712 struct ioc *ioc = iocg->ioc;
1713 u64 vtime = atomic64_read(&iocg->vtime);
1714 s64 excess, delta, target, new_hwi;
1716 /* debt handling owns inuse for debtors */
1717 if (iocg->abs_vdebt)
1720 /* see whether minimum margin requirement is met */
1721 if (waitqueue_active(&iocg->waitq) ||
1722 time_after64(vtime, now->vnow - ioc->margins.min))
1725 /* throw away excess above target */
1726 excess = now->vnow - vtime - ioc->margins.target;
1728 atomic64_add(excess, &iocg->vtime);
1729 atomic64_add(excess, &iocg->done_vtime);
1731 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1735 * Let's say the distance between iocg's and device's vtimes as a
1736 * fraction of period duration is delta. Assuming that the iocg will
1737 * consume the usage determined above, we want to determine new_hwi so
1738 * that delta equals MARGIN_TARGET at the end of the next period.
1740 * We need to execute usage worth of IOs while spending the sum of the
1741 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1744 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1746 * Therefore, the new_hwi is:
1748 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1750 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1751 now->vnow - ioc->period_at_vtime);
1752 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1753 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1755 return clamp_t(s64, new_hwi, 1, hwm);
1759 * For work-conservation, an iocg which isn't using all of its share should
1760 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1761 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1763 * #1 is mathematically simpler but has the drawback of requiring synchronous
1764 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1765 * change due to donation snapbacks as it has the possibility of grossly
1766 * overshooting what's allowed by the model and vrate.
1768 * #2 is inherently safe with local operations. The donating iocg can easily
1769 * snap back to higher weights when needed without worrying about impacts on
1770 * other nodes as the impacts will be inherently correct. This also makes idle
1771 * iocg activations safe. The only effect activations have is decreasing
1772 * hweight_inuse of others, the right solution to which is for those iocgs to
1773 * snap back to higher weights.
1775 * So, we go with #2. The challenge is calculating how each donating iocg's
1776 * inuse should be adjusted to achieve the target donation amounts. This is done
1777 * using Andy's method described in the following pdf.
1779 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1781 * Given the weights and target after-donation hweight_inuse values, Andy's
1782 * method determines how the proportional distribution should look like at each
1783 * sibling level to maintain the relative relationship between all non-donating
1784 * pairs. To roughly summarize, it divides the tree into donating and
1785 * non-donating parts, calculates global donation rate which is used to
1786 * determine the target hweight_inuse for each node, and then derives per-level
1789 * The following pdf shows that global distribution calculated this way can be
1790 * achieved by scaling inuse weights of donating leaves and propagating the
1791 * adjustments upwards proportionally.
1793 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1795 * Combining the above two, we can determine how each leaf iocg's inuse should
1796 * be adjusted to achieve the target donation.
1798 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1800 * The inline comments use symbols from the last pdf.
1802 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1803 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1804 * t is the sum of the absolute budgets of donating nodes in the subtree.
1805 * w is the weight of the node. w = w_f + w_t
1806 * w_f is the non-donating portion of w. w_f = w * f / b
1807 * w_b is the donating portion of w. w_t = w * t / b
1808 * s is the sum of all sibling weights. s = Sum(w) for siblings
1809 * s_f and s_t are the non-donating and donating portions of s.
1811 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1812 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1813 * after adjustments. Subscript r denotes the root node's values.
1815 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1817 LIST_HEAD(over_hwa);
1818 LIST_HEAD(inner_walk);
1819 struct ioc_gq *iocg, *tiocg, *root_iocg;
1820 u32 after_sum, over_sum, over_target, gamma;
1823 * It's pretty unlikely but possible for the total sum of
1824 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1825 * confuse the following calculations. If such condition is detected,
1826 * scale down everyone over its full share equally to keep the sum below
1831 list_for_each_entry(iocg, surpluses, surplus_list) {
1834 current_hweight(iocg, &hwa, NULL);
1835 after_sum += iocg->hweight_after_donation;
1837 if (iocg->hweight_after_donation > hwa) {
1838 over_sum += iocg->hweight_after_donation;
1839 list_add(&iocg->walk_list, &over_hwa);
1843 if (after_sum >= WEIGHT_ONE) {
1845 * The delta should be deducted from the over_sum, calculate
1846 * target over_sum value.
1848 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1849 WARN_ON_ONCE(over_sum <= over_delta);
1850 over_target = over_sum - over_delta;
1855 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1857 iocg->hweight_after_donation =
1858 div_u64((u64)iocg->hweight_after_donation *
1859 over_target, over_sum);
1860 list_del_init(&iocg->walk_list);
1864 * Build pre-order inner node walk list and prepare for donation
1865 * adjustment calculations.
1867 list_for_each_entry(iocg, surpluses, surplus_list) {
1868 iocg_build_inner_walk(iocg, &inner_walk);
1871 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1872 WARN_ON_ONCE(root_iocg->level > 0);
1874 list_for_each_entry(iocg, &inner_walk, walk_list) {
1875 iocg->child_adjusted_sum = 0;
1876 iocg->hweight_donating = 0;
1877 iocg->hweight_after_donation = 0;
1881 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1884 list_for_each_entry(iocg, surpluses, surplus_list) {
1885 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1887 parent->hweight_donating += iocg->hweight_donating;
1888 parent->hweight_after_donation += iocg->hweight_after_donation;
1891 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1892 if (iocg->level > 0) {
1893 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1895 parent->hweight_donating += iocg->hweight_donating;
1896 parent->hweight_after_donation += iocg->hweight_after_donation;
1901 * Calculate inner hwa's (b) and make sure the donation values are
1902 * within the accepted ranges as we're doing low res calculations with
1905 list_for_each_entry(iocg, &inner_walk, walk_list) {
1907 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1909 iocg->hweight_active = DIV64_U64_ROUND_UP(
1910 (u64)parent->hweight_active * iocg->active,
1911 parent->child_active_sum);
1915 iocg->hweight_donating = min(iocg->hweight_donating,
1916 iocg->hweight_active);
1917 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1918 iocg->hweight_donating - 1);
1919 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1920 iocg->hweight_donating <= 1 ||
1921 iocg->hweight_after_donation == 0)) {
1922 pr_warn("iocg: invalid donation weights in ");
1923 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1924 pr_cont(": active=%u donating=%u after=%u\n",
1925 iocg->hweight_active, iocg->hweight_donating,
1926 iocg->hweight_after_donation);
1931 * Calculate the global donation rate (gamma) - the rate to adjust
1932 * non-donating budgets by.
1934 * No need to use 64bit multiplication here as the first operand is
1935 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1937 * We know that there are beneficiary nodes and the sum of the donating
1938 * hweights can't be whole; however, due to the round-ups during hweight
1939 * calculations, root_iocg->hweight_donating might still end up equal to
1940 * or greater than whole. Limit the range when calculating the divider.
1942 * gamma = (1 - t_r') / (1 - t_r)
1944 gamma = DIV_ROUND_UP(
1945 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1946 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1949 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1952 list_for_each_entry(iocg, &inner_walk, walk_list) {
1953 struct ioc_gq *parent;
1954 u32 inuse, wpt, wptp;
1957 if (iocg->level == 0) {
1958 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1959 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1960 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1961 WEIGHT_ONE - iocg->hweight_after_donation);
1965 parent = iocg->ancestors[iocg->level - 1];
1967 /* b' = gamma * b_f + b_t' */
1968 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1969 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1970 WEIGHT_ONE) + iocg->hweight_after_donation;
1972 /* w' = s' * b' / b'_p */
1973 inuse = DIV64_U64_ROUND_UP(
1974 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1975 parent->hweight_inuse);
1977 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1978 st = DIV64_U64_ROUND_UP(
1979 iocg->child_active_sum * iocg->hweight_donating,
1980 iocg->hweight_active);
1981 sf = iocg->child_active_sum - st;
1982 wpt = DIV64_U64_ROUND_UP(
1983 (u64)iocg->active * iocg->hweight_donating,
1984 iocg->hweight_active);
1985 wptp = DIV64_U64_ROUND_UP(
1986 (u64)inuse * iocg->hweight_after_donation,
1987 iocg->hweight_inuse);
1989 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1993 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1994 * we can finally determine leaf adjustments.
1996 list_for_each_entry(iocg, surpluses, surplus_list) {
1997 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2001 * In-debt iocgs participated in the donation calculation with
2002 * the minimum target hweight_inuse. Configuring inuse
2003 * accordingly would work fine but debt handling expects
2004 * @iocg->inuse stay at the minimum and we don't wanna
2007 if (iocg->abs_vdebt) {
2008 WARN_ON_ONCE(iocg->inuse > 1);
2012 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2013 inuse = DIV64_U64_ROUND_UP(
2014 parent->child_adjusted_sum * iocg->hweight_after_donation,
2015 parent->hweight_inuse);
2017 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2019 iocg->hweight_inuse,
2020 iocg->hweight_after_donation);
2022 __propagate_weights(iocg, iocg->active, inuse, true, now);
2025 /* walk list should be dissolved after use */
2026 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2027 list_del_init(&iocg->walk_list);
2031 * A low weight iocg can amass a large amount of debt, for example, when
2032 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2033 * memory paired with a slow IO device, the debt can span multiple seconds or
2034 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2035 * up blocked paying its debt while the IO device is idle.
2037 * The following protects against such cases. If the device has been
2038 * sufficiently idle for a while, the debts are halved and delays are
2041 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2042 struct ioc_now *now)
2044 struct ioc_gq *iocg;
2045 u64 dur, usage_pct, nr_cycles;
2047 /* if no debtor, reset the cycle */
2049 ioc->dfgv_period_at = now->now;
2050 ioc->dfgv_period_rem = 0;
2051 ioc->dfgv_usage_us_sum = 0;
2056 * Debtors can pass through a lot of writes choking the device and we
2057 * don't want to be forgiving debts while the device is struggling from
2058 * write bursts. If we're missing latency targets, consider the device
2061 if (ioc->busy_level > 0)
2062 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2064 ioc->dfgv_usage_us_sum += usage_us_sum;
2065 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2069 * At least DFGV_PERIOD has passed since the last period. Calculate the
2070 * average usage and reset the period counters.
2072 dur = now->now - ioc->dfgv_period_at;
2073 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2075 ioc->dfgv_period_at = now->now;
2076 ioc->dfgv_usage_us_sum = 0;
2078 /* if was too busy, reset everything */
2079 if (usage_pct > DFGV_USAGE_PCT) {
2080 ioc->dfgv_period_rem = 0;
2085 * Usage is lower than threshold. Let's forgive some debts. Debt
2086 * forgiveness runs off of the usual ioc timer but its period usually
2087 * doesn't match ioc's. Compensate the difference by performing the
2088 * reduction as many times as would fit in the duration since the last
2089 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2090 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2091 * reductions is doubled.
2093 nr_cycles = dur + ioc->dfgv_period_rem;
2094 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2096 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2097 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2099 if (!iocg->abs_vdebt && !iocg->delay)
2102 spin_lock(&iocg->waitq.lock);
2104 old_debt = iocg->abs_vdebt;
2105 old_delay = iocg->delay;
2107 if (iocg->abs_vdebt)
2108 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2110 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2112 iocg_kick_waitq(iocg, true, now);
2114 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2115 old_debt, iocg->abs_vdebt,
2116 old_delay, iocg->delay);
2118 spin_unlock(&iocg->waitq.lock);
2123 * Check the active iocgs' state to avoid oversleeping and deactive
2126 * Since waiters determine the sleep durations based on the vrate
2127 * they saw at the time of sleep, if vrate has increased, some
2128 * waiters could be sleeping for too long. Wake up tardy waiters
2129 * which should have woken up in the last period and expire idle
2132 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2135 struct ioc_gq *iocg, *tiocg;
2137 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2138 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2139 !iocg->delay && !iocg_is_idle(iocg))
2142 spin_lock(&iocg->waitq.lock);
2144 /* flush wait and indebt stat deltas */
2145 if (iocg->wait_since) {
2146 iocg->stat.wait_us += now->now - iocg->wait_since;
2147 iocg->wait_since = now->now;
2149 if (iocg->indebt_since) {
2150 iocg->stat.indebt_us +=
2151 now->now - iocg->indebt_since;
2152 iocg->indebt_since = now->now;
2154 if (iocg->indelay_since) {
2155 iocg->stat.indelay_us +=
2156 now->now - iocg->indelay_since;
2157 iocg->indelay_since = now->now;
2160 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2162 /* might be oversleeping vtime / hweight changes, kick */
2163 iocg_kick_waitq(iocg, true, now);
2164 if (iocg->abs_vdebt || iocg->delay)
2166 } else if (iocg_is_idle(iocg)) {
2167 /* no waiter and idle, deactivate */
2168 u64 vtime = atomic64_read(&iocg->vtime);
2172 * @iocg has been inactive for a full duration and will
2173 * have a high budget. Account anything above target as
2174 * error and throw away. On reactivation, it'll start
2175 * with the target budget.
2177 excess = now->vnow - vtime - ioc->margins.target;
2181 current_hweight(iocg, NULL, &old_hwi);
2182 ioc->vtime_err -= div64_u64(excess * old_hwi,
2186 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2187 atomic64_read(&iocg->active_period),
2188 atomic64_read(&ioc->cur_period), vtime);
2189 __propagate_weights(iocg, 0, 0, false, now);
2190 list_del_init(&iocg->active_list);
2193 spin_unlock(&iocg->waitq.lock);
2196 commit_weights(ioc);
2200 static void ioc_timer_fn(struct timer_list *timer)
2202 struct ioc *ioc = container_of(timer, struct ioc, timer);
2203 struct ioc_gq *iocg, *tiocg;
2205 LIST_HEAD(surpluses);
2206 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2207 u64 usage_us_sum = 0;
2210 u32 missed_ppm[2], rq_wait_pct;
2212 int prev_busy_level;
2214 /* how were the latencies during the period? */
2215 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2217 /* take care of active iocgs */
2218 spin_lock_irq(&ioc->lock);
2220 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2221 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2224 period_vtime = now.vnow - ioc->period_at_vtime;
2225 if (WARN_ON_ONCE(!period_vtime)) {
2226 spin_unlock_irq(&ioc->lock);
2230 nr_debtors = ioc_check_iocgs(ioc, &now);
2233 * Wait and indebt stat are flushed above and the donation calculation
2234 * below needs updated usage stat. Let's bring stat up-to-date.
2236 iocg_flush_stat(&ioc->active_iocgs, &now);
2238 /* calc usage and see whether some weights need to be moved around */
2239 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2240 u64 vdone, vtime, usage_us;
2241 u32 hw_active, hw_inuse;
2244 * Collect unused and wind vtime closer to vnow to prevent
2245 * iocgs from accumulating a large amount of budget.
2247 vdone = atomic64_read(&iocg->done_vtime);
2248 vtime = atomic64_read(&iocg->vtime);
2249 current_hweight(iocg, &hw_active, &hw_inuse);
2252 * Latency QoS detection doesn't account for IOs which are
2253 * in-flight for longer than a period. Detect them by
2254 * comparing vdone against period start. If lagging behind
2255 * IOs from past periods, don't increase vrate.
2257 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2258 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2259 time_after64(vtime, vdone) &&
2260 time_after64(vtime, now.vnow -
2261 MAX_LAGGING_PERIODS * period_vtime) &&
2262 time_before64(vdone, now.vnow - period_vtime))
2266 * Determine absolute usage factoring in in-flight IOs to avoid
2267 * high-latency completions appearing as idle.
2269 usage_us = iocg->usage_delta_us;
2270 usage_us_sum += usage_us;
2272 /* see whether there's surplus vtime */
2273 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2274 if (hw_inuse < hw_active ||
2275 (!waitqueue_active(&iocg->waitq) &&
2276 time_before64(vtime, now.vnow - ioc->margins.low))) {
2277 u32 hwa, old_hwi, hwm, new_hwi, usage;
2280 if (vdone != vtime) {
2281 u64 inflight_us = DIV64_U64_ROUND_UP(
2282 cost_to_abs_cost(vtime - vdone, hw_inuse),
2283 ioc->vtime_base_rate);
2285 usage_us = max(usage_us, inflight_us);
2288 /* convert to hweight based usage ratio */
2289 if (time_after64(iocg->activated_at, ioc->period_at))
2290 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2292 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2294 usage = clamp_t(u32,
2295 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2300 * Already donating or accumulated enough to start.
2301 * Determine the donation amount.
2303 current_hweight(iocg, &hwa, &old_hwi);
2304 hwm = current_hweight_max(iocg);
2305 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2308 * Donation calculation assumes hweight_after_donation
2309 * to be positive, a condition that a donor w/ hwa < 2
2310 * can't meet. Don't bother with donation if hwa is
2311 * below 2. It's not gonna make a meaningful difference
2314 if (new_hwi < hwm && hwa >= 2) {
2315 iocg->hweight_donating = hwa;
2316 iocg->hweight_after_donation = new_hwi;
2317 list_add(&iocg->surplus_list, &surpluses);
2318 } else if (!iocg->abs_vdebt) {
2320 * @iocg doesn't have enough to donate. Reset
2321 * its inuse to active.
2323 * Don't reset debtors as their inuse's are
2324 * owned by debt handling. This shouldn't affect
2325 * donation calculuation in any meaningful way
2326 * as @iocg doesn't have a meaningful amount of
2329 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2330 iocg->inuse, iocg->active,
2331 iocg->hweight_inuse, new_hwi);
2333 __propagate_weights(iocg, iocg->active,
2334 iocg->active, true, &now);
2338 /* genuinely short on vtime */
2343 if (!list_empty(&surpluses) && nr_shortages)
2344 transfer_surpluses(&surpluses, &now);
2346 commit_weights(ioc);
2348 /* surplus list should be dissolved after use */
2349 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2350 list_del_init(&iocg->surplus_list);
2353 * If q is getting clogged or we're missing too much, we're issuing
2354 * too much IO and should lower vtime rate. If we're not missing
2355 * and experiencing shortages but not surpluses, we're too stingy
2356 * and should increase vtime rate.
2358 prev_busy_level = ioc->busy_level;
2359 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2360 missed_ppm[READ] > ppm_rthr ||
2361 missed_ppm[WRITE] > ppm_wthr) {
2362 /* clearly missing QoS targets, slow down vrate */
2363 ioc->busy_level = max(ioc->busy_level, 0);
2365 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2366 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2367 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2368 /* QoS targets are being met with >25% margin */
2371 * We're throttling while the device has spare
2372 * capacity. If vrate was being slowed down, stop.
2374 ioc->busy_level = min(ioc->busy_level, 0);
2377 * If there are IOs spanning multiple periods, wait
2378 * them out before pushing the device harder.
2384 * Nobody is being throttled and the users aren't
2385 * issuing enough IOs to saturate the device. We
2386 * simply don't know how close the device is to
2387 * saturation. Coast.
2389 ioc->busy_level = 0;
2392 /* inside the hysterisis margin, we're good */
2393 ioc->busy_level = 0;
2396 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2398 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2399 prev_busy_level, missed_ppm);
2401 ioc_refresh_params(ioc, false);
2403 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2406 * This period is done. Move onto the next one. If nothing's
2407 * going on with the device, stop the timer.
2409 atomic64_inc(&ioc->cur_period);
2411 if (ioc->running != IOC_STOP) {
2412 if (!list_empty(&ioc->active_iocgs)) {
2413 ioc_start_period(ioc, &now);
2415 ioc->busy_level = 0;
2417 ioc->running = IOC_IDLE;
2420 ioc_refresh_vrate(ioc, &now);
2423 spin_unlock_irq(&ioc->lock);
2426 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2427 u64 abs_cost, struct ioc_now *now)
2429 struct ioc *ioc = iocg->ioc;
2430 struct ioc_margins *margins = &ioc->margins;
2431 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2434 u64 cost, new_inuse;
2436 current_hweight(iocg, NULL, &hwi);
2438 cost = abs_cost_to_cost(abs_cost, hwi);
2439 margin = now->vnow - vtime - cost;
2441 /* debt handling owns inuse for debtors */
2442 if (iocg->abs_vdebt)
2446 * We only increase inuse during period and do so if the margin has
2447 * deteriorated since the previous adjustment.
2449 if (margin >= iocg->saved_margin || margin >= margins->low ||
2450 iocg->inuse == iocg->active)
2453 spin_lock_irq(&ioc->lock);
2455 /* we own inuse only when @iocg is in the normal active state */
2456 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2457 spin_unlock_irq(&ioc->lock);
2462 * Bump up inuse till @abs_cost fits in the existing budget.
2463 * adj_step must be determined after acquiring ioc->lock - we might
2464 * have raced and lost to another thread for activation and could
2465 * be reading 0 iocg->active before ioc->lock which will lead to
2468 new_inuse = iocg->inuse;
2469 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2471 new_inuse = new_inuse + adj_step;
2472 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2473 current_hweight(iocg, NULL, &hwi);
2474 cost = abs_cost_to_cost(abs_cost, hwi);
2475 } while (time_after64(vtime + cost, now->vnow) &&
2476 iocg->inuse != iocg->active);
2478 spin_unlock_irq(&ioc->lock);
2480 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2481 old_inuse, iocg->inuse, old_hwi, hwi);
2486 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2487 bool is_merge, u64 *costp)
2489 struct ioc *ioc = iocg->ioc;
2490 u64 coef_seqio, coef_randio, coef_page;
2491 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2495 switch (bio_op(bio)) {
2497 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2498 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2499 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2502 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2503 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2504 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2511 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2512 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2516 if (seek_pages > LCOEF_RANDIO_PAGES) {
2517 cost += coef_randio;
2522 cost += pages * coef_page;
2527 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2531 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2535 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2538 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2540 switch (req_op(rq)) {
2542 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2545 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2552 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2556 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2560 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2562 struct blkcg_gq *blkg = bio->bi_blkg;
2563 struct ioc *ioc = rqos_to_ioc(rqos);
2564 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2566 struct iocg_wait wait;
2567 u64 abs_cost, cost, vtime;
2568 bool use_debt, ioc_locked;
2569 unsigned long flags;
2571 /* bypass IOs if disabled, still initializing, or for root cgroup */
2572 if (!ioc->enabled || !iocg || !iocg->level)
2575 /* calculate the absolute vtime cost */
2576 abs_cost = calc_vtime_cost(bio, iocg, false);
2580 if (!iocg_activate(iocg, &now))
2583 iocg->cursor = bio_end_sector(bio);
2584 vtime = atomic64_read(&iocg->vtime);
2585 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2588 * If no one's waiting and within budget, issue right away. The
2589 * tests are racy but the races aren't systemic - we only miss once
2590 * in a while which is fine.
2592 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2593 time_before_eq64(vtime + cost, now.vnow)) {
2594 iocg_commit_bio(iocg, bio, abs_cost, cost);
2599 * We're over budget. This can be handled in two ways. IOs which may
2600 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2601 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2602 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2603 * whether debt handling is needed and acquire locks accordingly.
2605 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2606 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2608 iocg_lock(iocg, ioc_locked, &flags);
2611 * @iocg must stay activated for debt and waitq handling. Deactivation
2612 * is synchronized against both ioc->lock and waitq.lock and we won't
2613 * get deactivated as long as we're waiting or has debt, so we're good
2614 * if we're activated here. In the unlikely cases that we aren't, just
2617 if (unlikely(list_empty(&iocg->active_list))) {
2618 iocg_unlock(iocg, ioc_locked, &flags);
2619 iocg_commit_bio(iocg, bio, abs_cost, cost);
2624 * We're over budget. If @bio has to be issued regardless, remember
2625 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2626 * off the debt before waking more IOs.
2628 * This way, the debt is continuously paid off each period with the
2629 * actual budget available to the cgroup. If we just wound vtime, we
2630 * would incorrectly use the current hw_inuse for the entire amount
2631 * which, for example, can lead to the cgroup staying blocked for a
2632 * long time even with substantially raised hw_inuse.
2634 * An iocg with vdebt should stay online so that the timer can keep
2635 * deducting its vdebt and [de]activate use_delay mechanism
2636 * accordingly. We don't want to race against the timer trying to
2637 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2638 * penalizing the cgroup and its descendants.
2641 iocg_incur_debt(iocg, abs_cost, &now);
2642 if (iocg_kick_delay(iocg, &now))
2643 blkcg_schedule_throttle(rqos->q->disk,
2644 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2645 iocg_unlock(iocg, ioc_locked, &flags);
2649 /* guarantee that iocgs w/ waiters have maximum inuse */
2650 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2652 iocg_unlock(iocg, false, &flags);
2656 propagate_weights(iocg, iocg->active, iocg->active, true,
2661 * Append self to the waitq and schedule the wakeup timer if we're
2662 * the first waiter. The timer duration is calculated based on the
2663 * current vrate. vtime and hweight changes can make it too short
2664 * or too long. Each wait entry records the absolute cost it's
2665 * waiting for to allow re-evaluation using a custom wait entry.
2667 * If too short, the timer simply reschedules itself. If too long,
2668 * the period timer will notice and trigger wakeups.
2670 * All waiters are on iocg->waitq and the wait states are
2671 * synchronized using waitq.lock.
2673 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2674 wait.wait.private = current;
2676 wait.abs_cost = abs_cost;
2677 wait.committed = false; /* will be set true by waker */
2679 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2680 iocg_kick_waitq(iocg, ioc_locked, &now);
2682 iocg_unlock(iocg, ioc_locked, &flags);
2685 set_current_state(TASK_UNINTERRUPTIBLE);
2691 /* waker already committed us, proceed */
2692 finish_wait(&iocg->waitq, &wait.wait);
2695 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2698 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2699 struct ioc *ioc = rqos_to_ioc(rqos);
2700 sector_t bio_end = bio_end_sector(bio);
2702 u64 vtime, abs_cost, cost;
2703 unsigned long flags;
2705 /* bypass if disabled, still initializing, or for root cgroup */
2706 if (!ioc->enabled || !iocg || !iocg->level)
2709 abs_cost = calc_vtime_cost(bio, iocg, true);
2715 vtime = atomic64_read(&iocg->vtime);
2716 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2718 /* update cursor if backmerging into the request at the cursor */
2719 if (blk_rq_pos(rq) < bio_end &&
2720 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2721 iocg->cursor = bio_end;
2724 * Charge if there's enough vtime budget and the existing request has
2727 if (rq->bio && rq->bio->bi_iocost_cost &&
2728 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2729 iocg_commit_bio(iocg, bio, abs_cost, cost);
2734 * Otherwise, account it as debt if @iocg is online, which it should
2735 * be for the vast majority of cases. See debt handling in
2736 * ioc_rqos_throttle() for details.
2738 spin_lock_irqsave(&ioc->lock, flags);
2739 spin_lock(&iocg->waitq.lock);
2741 if (likely(!list_empty(&iocg->active_list))) {
2742 iocg_incur_debt(iocg, abs_cost, &now);
2743 if (iocg_kick_delay(iocg, &now))
2744 blkcg_schedule_throttle(rqos->q->disk,
2745 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2747 iocg_commit_bio(iocg, bio, abs_cost, cost);
2750 spin_unlock(&iocg->waitq.lock);
2751 spin_unlock_irqrestore(&ioc->lock, flags);
2754 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2756 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2758 if (iocg && bio->bi_iocost_cost)
2759 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2762 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2764 struct ioc *ioc = rqos_to_ioc(rqos);
2765 struct ioc_pcpu_stat *ccs;
2766 u64 on_q_ns, rq_wait_ns, size_nsec;
2769 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2772 switch (req_op(rq)) {
2785 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2786 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2787 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2789 ccs = get_cpu_ptr(ioc->pcpu_stat);
2791 if (on_q_ns <= size_nsec ||
2792 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2793 local_inc(&ccs->missed[rw].nr_met);
2795 local_inc(&ccs->missed[rw].nr_missed);
2797 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2802 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2804 struct ioc *ioc = rqos_to_ioc(rqos);
2806 spin_lock_irq(&ioc->lock);
2807 ioc_refresh_params(ioc, false);
2808 spin_unlock_irq(&ioc->lock);
2811 static void ioc_rqos_exit(struct rq_qos *rqos)
2813 struct ioc *ioc = rqos_to_ioc(rqos);
2815 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2817 spin_lock_irq(&ioc->lock);
2818 ioc->running = IOC_STOP;
2819 spin_unlock_irq(&ioc->lock);
2821 del_timer_sync(&ioc->timer);
2822 free_percpu(ioc->pcpu_stat);
2826 static struct rq_qos_ops ioc_rqos_ops = {
2827 .throttle = ioc_rqos_throttle,
2828 .merge = ioc_rqos_merge,
2829 .done_bio = ioc_rqos_done_bio,
2830 .done = ioc_rqos_done,
2831 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2832 .exit = ioc_rqos_exit,
2835 static int blk_iocost_init(struct gendisk *disk)
2837 struct request_queue *q = disk->queue;
2839 struct rq_qos *rqos;
2842 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2846 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2847 if (!ioc->pcpu_stat) {
2852 for_each_possible_cpu(cpu) {
2853 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2855 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2856 local_set(&ccs->missed[i].nr_met, 0);
2857 local_set(&ccs->missed[i].nr_missed, 0);
2859 local64_set(&ccs->rq_wait_ns, 0);
2863 rqos->id = RQ_QOS_COST;
2864 rqos->ops = &ioc_rqos_ops;
2867 spin_lock_init(&ioc->lock);
2868 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2869 INIT_LIST_HEAD(&ioc->active_iocgs);
2871 ioc->running = IOC_IDLE;
2872 ioc->vtime_base_rate = VTIME_PER_USEC;
2873 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2874 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2875 ioc->period_at = ktime_to_us(ktime_get());
2876 atomic64_set(&ioc->cur_period, 0);
2877 atomic_set(&ioc->hweight_gen, 0);
2879 spin_lock_irq(&ioc->lock);
2880 ioc->autop_idx = AUTOP_INVALID;
2881 ioc_refresh_params(ioc, true);
2882 spin_unlock_irq(&ioc->lock);
2885 * rqos must be added before activation to allow ioc_pd_init() to
2886 * lookup the ioc from q. This means that the rqos methods may get
2887 * called before policy activation completion, can't assume that the
2888 * target bio has an iocg associated and need to test for NULL iocg.
2890 ret = rq_qos_add(q, rqos);
2894 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2900 rq_qos_del(q, rqos);
2902 free_percpu(ioc->pcpu_stat);
2907 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2909 struct ioc_cgrp *iocc;
2911 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2915 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2919 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2921 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2924 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2925 struct blkcg *blkcg)
2927 int levels = blkcg->css.cgroup->level + 1;
2928 struct ioc_gq *iocg;
2930 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2934 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2935 if (!iocg->pcpu_stat) {
2943 static void ioc_pd_init(struct blkg_policy_data *pd)
2945 struct ioc_gq *iocg = pd_to_iocg(pd);
2946 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2947 struct ioc *ioc = q_to_ioc(blkg->q);
2949 struct blkcg_gq *tblkg;
2950 unsigned long flags;
2955 atomic64_set(&iocg->vtime, now.vnow);
2956 atomic64_set(&iocg->done_vtime, now.vnow);
2957 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2958 INIT_LIST_HEAD(&iocg->active_list);
2959 INIT_LIST_HEAD(&iocg->walk_list);
2960 INIT_LIST_HEAD(&iocg->surplus_list);
2961 iocg->hweight_active = WEIGHT_ONE;
2962 iocg->hweight_inuse = WEIGHT_ONE;
2964 init_waitqueue_head(&iocg->waitq);
2965 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2966 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2968 iocg->level = blkg->blkcg->css.cgroup->level;
2970 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2971 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2972 iocg->ancestors[tiocg->level] = tiocg;
2975 spin_lock_irqsave(&ioc->lock, flags);
2976 weight_updated(iocg, &now);
2977 spin_unlock_irqrestore(&ioc->lock, flags);
2980 static void ioc_pd_free(struct blkg_policy_data *pd)
2982 struct ioc_gq *iocg = pd_to_iocg(pd);
2983 struct ioc *ioc = iocg->ioc;
2984 unsigned long flags;
2987 spin_lock_irqsave(&ioc->lock, flags);
2989 if (!list_empty(&iocg->active_list)) {
2993 propagate_weights(iocg, 0, 0, false, &now);
2994 list_del_init(&iocg->active_list);
2997 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2998 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3000 spin_unlock_irqrestore(&ioc->lock, flags);
3002 hrtimer_cancel(&iocg->waitq_timer);
3004 free_percpu(iocg->pcpu_stat);
3008 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3010 struct ioc_gq *iocg = pd_to_iocg(pd);
3011 struct ioc *ioc = iocg->ioc;
3016 if (iocg->level == 0) {
3017 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3018 ioc->vtime_base_rate * 10000,
3020 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3023 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3025 if (blkcg_debug_stats)
3026 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3027 iocg->last_stat.wait_us,
3028 iocg->last_stat.indebt_us,
3029 iocg->last_stat.indelay_us);
3032 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3035 const char *dname = blkg_dev_name(pd->blkg);
3036 struct ioc_gq *iocg = pd_to_iocg(pd);
3038 if (dname && iocg->cfg_weight)
3039 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3044 static int ioc_weight_show(struct seq_file *sf, void *v)
3046 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3047 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3049 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3050 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3051 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3055 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3056 size_t nbytes, loff_t off)
3058 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3059 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3060 struct blkg_conf_ctx ctx;
3062 struct ioc_gq *iocg;
3066 if (!strchr(buf, ':')) {
3067 struct blkcg_gq *blkg;
3069 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3072 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3075 spin_lock_irq(&blkcg->lock);
3076 iocc->dfl_weight = v * WEIGHT_ONE;
3077 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3078 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3081 spin_lock(&iocg->ioc->lock);
3082 ioc_now(iocg->ioc, &now);
3083 weight_updated(iocg, &now);
3084 spin_unlock(&iocg->ioc->lock);
3087 spin_unlock_irq(&blkcg->lock);
3092 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3096 iocg = blkg_to_iocg(ctx.blkg);
3098 if (!strncmp(ctx.body, "default", 7)) {
3101 if (!sscanf(ctx.body, "%u", &v))
3103 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3107 spin_lock(&iocg->ioc->lock);
3108 iocg->cfg_weight = v * WEIGHT_ONE;
3109 ioc_now(iocg->ioc, &now);
3110 weight_updated(iocg, &now);
3111 spin_unlock(&iocg->ioc->lock);
3113 blkg_conf_finish(&ctx);
3117 blkg_conf_finish(&ctx);
3121 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3124 const char *dname = blkg_dev_name(pd->blkg);
3125 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3130 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3131 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3132 ioc->params.qos[QOS_RPPM] / 10000,
3133 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3134 ioc->params.qos[QOS_RLAT],
3135 ioc->params.qos[QOS_WPPM] / 10000,
3136 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3137 ioc->params.qos[QOS_WLAT],
3138 ioc->params.qos[QOS_MIN] / 10000,
3139 ioc->params.qos[QOS_MIN] % 10000 / 100,
3140 ioc->params.qos[QOS_MAX] / 10000,
3141 ioc->params.qos[QOS_MAX] % 10000 / 100);
3145 static int ioc_qos_show(struct seq_file *sf, void *v)
3147 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3149 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3150 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3154 static const match_table_t qos_ctrl_tokens = {
3155 { QOS_ENABLE, "enable=%u" },
3156 { QOS_CTRL, "ctrl=%s" },
3157 { NR_QOS_CTRL_PARAMS, NULL },
3160 static const match_table_t qos_tokens = {
3161 { QOS_RPPM, "rpct=%s" },
3162 { QOS_RLAT, "rlat=%u" },
3163 { QOS_WPPM, "wpct=%s" },
3164 { QOS_WLAT, "wlat=%u" },
3165 { QOS_MIN, "min=%s" },
3166 { QOS_MAX, "max=%s" },
3167 { NR_QOS_PARAMS, NULL },
3170 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3171 size_t nbytes, loff_t off)
3173 struct block_device *bdev;
3174 struct gendisk *disk;
3176 u32 qos[NR_QOS_PARAMS];
3181 bdev = blkcg_conf_open_bdev(&input);
3183 return PTR_ERR(bdev);
3185 disk = bdev->bd_disk;
3186 ioc = q_to_ioc(disk->queue);
3188 ret = blk_iocost_init(disk);
3191 ioc = q_to_ioc(disk->queue);
3194 blk_mq_freeze_queue(disk->queue);
3195 blk_mq_quiesce_queue(disk->queue);
3197 spin_lock_irq(&ioc->lock);
3198 memcpy(qos, ioc->params.qos, sizeof(qos));
3199 enable = ioc->enabled;
3200 user = ioc->user_qos_params;
3202 while ((p = strsep(&input, " \t\n"))) {
3203 substring_t args[MAX_OPT_ARGS];
3211 switch (match_token(p, qos_ctrl_tokens, args)) {
3213 match_u64(&args[0], &v);
3217 match_strlcpy(buf, &args[0], sizeof(buf));
3218 if (!strcmp(buf, "auto"))
3220 else if (!strcmp(buf, "user"))
3227 tok = match_token(p, qos_tokens, args);
3231 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3234 if (cgroup_parse_float(buf, 2, &v))
3236 if (v < 0 || v > 10000)
3242 if (match_u64(&args[0], &v))
3248 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3251 if (cgroup_parse_float(buf, 2, &v))
3255 qos[tok] = clamp_t(s64, v * 100,
3256 VRATE_MIN_PPM, VRATE_MAX_PPM);
3264 if (qos[QOS_MIN] > qos[QOS_MAX])
3268 blk_stat_enable_accounting(disk->queue);
3269 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3270 ioc->enabled = true;
3271 wbt_disable_default(disk->queue);
3273 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3274 ioc->enabled = false;
3275 wbt_enable_default(disk->queue);
3279 memcpy(ioc->params.qos, qos, sizeof(qos));
3280 ioc->user_qos_params = true;
3282 ioc->user_qos_params = false;
3285 ioc_refresh_params(ioc, true);
3286 spin_unlock_irq(&ioc->lock);
3288 blk_mq_unquiesce_queue(disk->queue);
3289 blk_mq_unfreeze_queue(disk->queue);
3291 blkdev_put_no_open(bdev);
3294 spin_unlock_irq(&ioc->lock);
3296 blk_mq_unquiesce_queue(disk->queue);
3297 blk_mq_unfreeze_queue(disk->queue);
3301 blkdev_put_no_open(bdev);
3305 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3306 struct blkg_policy_data *pd, int off)
3308 const char *dname = blkg_dev_name(pd->blkg);
3309 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3310 u64 *u = ioc->params.i_lcoefs;
3315 seq_printf(sf, "%s ctrl=%s model=linear "
3316 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3317 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3318 dname, ioc->user_cost_model ? "user" : "auto",
3319 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3320 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3324 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3326 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3328 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3329 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3333 static const match_table_t cost_ctrl_tokens = {
3334 { COST_CTRL, "ctrl=%s" },
3335 { COST_MODEL, "model=%s" },
3336 { NR_COST_CTRL_PARAMS, NULL },
3339 static const match_table_t i_lcoef_tokens = {
3340 { I_LCOEF_RBPS, "rbps=%u" },
3341 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3342 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3343 { I_LCOEF_WBPS, "wbps=%u" },
3344 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3345 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3346 { NR_I_LCOEFS, NULL },
3349 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3350 size_t nbytes, loff_t off)
3352 struct block_device *bdev;
3353 struct request_queue *q;
3360 bdev = blkcg_conf_open_bdev(&input);
3362 return PTR_ERR(bdev);
3364 q = bdev_get_queue(bdev);
3367 ret = blk_iocost_init(bdev->bd_disk);
3373 blk_mq_freeze_queue(q);
3374 blk_mq_quiesce_queue(q);
3376 spin_lock_irq(&ioc->lock);
3377 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3378 user = ioc->user_cost_model;
3380 while ((p = strsep(&input, " \t\n"))) {
3381 substring_t args[MAX_OPT_ARGS];
3389 switch (match_token(p, cost_ctrl_tokens, args)) {
3391 match_strlcpy(buf, &args[0], sizeof(buf));
3392 if (!strcmp(buf, "auto"))
3394 else if (!strcmp(buf, "user"))
3400 match_strlcpy(buf, &args[0], sizeof(buf));
3401 if (strcmp(buf, "linear"))
3406 tok = match_token(p, i_lcoef_tokens, args);
3407 if (tok == NR_I_LCOEFS)
3409 if (match_u64(&args[0], &v))
3416 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3417 ioc->user_cost_model = true;
3419 ioc->user_cost_model = false;
3421 ioc_refresh_params(ioc, true);
3422 spin_unlock_irq(&ioc->lock);
3424 blk_mq_unquiesce_queue(q);
3425 blk_mq_unfreeze_queue(q);
3427 blkdev_put_no_open(bdev);
3431 spin_unlock_irq(&ioc->lock);
3433 blk_mq_unquiesce_queue(q);
3434 blk_mq_unfreeze_queue(q);
3438 blkdev_put_no_open(bdev);
3442 static struct cftype ioc_files[] = {
3445 .flags = CFTYPE_NOT_ON_ROOT,
3446 .seq_show = ioc_weight_show,
3447 .write = ioc_weight_write,
3451 .flags = CFTYPE_ONLY_ON_ROOT,
3452 .seq_show = ioc_qos_show,
3453 .write = ioc_qos_write,
3456 .name = "cost.model",
3457 .flags = CFTYPE_ONLY_ON_ROOT,
3458 .seq_show = ioc_cost_model_show,
3459 .write = ioc_cost_model_write,
3464 static struct blkcg_policy blkcg_policy_iocost = {
3465 .dfl_cftypes = ioc_files,
3466 .cpd_alloc_fn = ioc_cpd_alloc,
3467 .cpd_free_fn = ioc_cpd_free,
3468 .pd_alloc_fn = ioc_pd_alloc,
3469 .pd_init_fn = ioc_pd_init,
3470 .pd_free_fn = ioc_pd_free,
3471 .pd_stat_fn = ioc_pd_stat,
3474 static int __init ioc_init(void)
3476 return blkcg_policy_register(&blkcg_policy_iocost);
3479 static void __exit ioc_exit(void)
3481 blkcg_policy_unregister(&blkcg_policy_iocost);
3484 module_init(ioc_init);
3485 module_exit(ioc_exit);