1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, solely depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
262 /* unbusy hysterisis */
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
290 /* halve debts if avg usage over 100ms is under 50% */
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
294 /* don't let cmds which take a very long time pin lagging for too long */
295 MAX_LAGGING_PERIODS = 10,
297 /* switch iff the conditions are met for longer than this */
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
301 * Count IO size in 4k pages. The 12bit shift helps keeping
302 * size-proportional components of cost calculation in closer
303 * numbers of digits to per-IO cost components.
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
309 /* if apart further than 16M, consider randio for linear model */
310 LCOEF_RANDIO_PAGES = 4096,
319 /* io.cost.qos controls including per-dev enable of the whole controller */
326 /* io.cost.qos params */
337 /* io.cost.model controls */
344 /* builtin linear cost model coefficients */
374 u32 qos[NR_QOS_PARAMS];
375 u64 i_lcoefs[NR_I_LCOEFS];
376 u64 lcoefs[NR_LCOEFS];
377 u32 too_fast_vrate_pct;
378 u32 too_slow_vrate_pct;
394 struct ioc_pcpu_stat {
395 struct ioc_missed missed[2];
397 local64_t rq_wait_ns;
407 struct ioc_params params;
408 struct ioc_margins margins;
415 struct timer_list timer;
416 struct list_head active_iocgs; /* active cgroups */
417 struct ioc_pcpu_stat __percpu *pcpu_stat;
419 enum ioc_running running;
420 atomic64_t vtime_rate;
424 seqcount_spinlock_t period_seqcount;
425 u64 period_at; /* wallclock starttime */
426 u64 period_at_vtime; /* vtime starttime */
428 atomic64_t cur_period; /* inc'd each period */
429 int busy_level; /* saturation history */
431 bool weights_updated;
432 atomic_t hweight_gen; /* for lazy hweights */
434 /* debt forgivness */
437 u64 dfgv_usage_us_sum;
439 u64 autop_too_fast_at;
440 u64 autop_too_slow_at;
442 bool user_qos_params:1;
443 bool user_cost_model:1;
446 struct iocg_pcpu_stat {
447 local64_t abs_vusage;
457 /* per device-cgroup pair */
459 struct blkg_policy_data pd;
463 * A iocg can get its weight from two sources - an explicit
464 * per-device-cgroup configuration or the default weight of the
465 * cgroup. `cfg_weight` is the explicit per-device-cgroup
466 * configuration. `weight` is the effective considering both
469 * When an idle cgroup becomes active its `active` goes from 0 to
470 * `weight`. `inuse` is the surplus adjusted active weight.
471 * `active` and `inuse` are used to calculate `hweight_active` and
474 * `last_inuse` remembers `inuse` while an iocg is idle to persist
475 * surplus adjustments.
477 * `inuse` may be adjusted dynamically during period. `saved_*` are used
478 * to determine and track adjustments.
488 sector_t cursor; /* to detect randio */
491 * `vtime` is this iocg's vtime cursor which progresses as IOs are
492 * issued. If lagging behind device vtime, the delta represents
493 * the currently available IO budget. If running ahead, the
496 * `vtime_done` is the same but progressed on completion rather
497 * than issue. The delta behind `vtime` represents the cost of
498 * currently in-flight IOs.
501 atomic64_t done_vtime;
504 /* current delay in effect and when it started */
509 * The period this iocg was last active in. Used for deactivation
510 * and invalidating `vtime`.
512 atomic64_t active_period;
513 struct list_head active_list;
515 /* see __propagate_weights() and current_hweight() for details */
516 u64 child_active_sum;
518 u64 child_adjusted_sum;
522 u32 hweight_donating;
523 u32 hweight_after_donation;
525 struct list_head walk_list;
526 struct list_head surplus_list;
528 struct wait_queue_head waitq;
529 struct hrtimer waitq_timer;
531 /* timestamp at the latest activation */
535 struct iocg_pcpu_stat __percpu *pcpu_stat;
536 struct iocg_stat stat;
537 struct iocg_stat last_stat;
538 u64 last_stat_abs_vusage;
544 /* this iocg's depth in the hierarchy and ancestors including self */
546 struct ioc_gq *ancestors[];
551 struct blkcg_policy_data cpd;
552 unsigned int dfl_weight;
563 struct wait_queue_entry wait;
569 struct iocg_wake_ctx {
575 static const struct ioc_params autop[] = {
578 [QOS_RLAT] = 250000, /* 250ms */
580 [QOS_MIN] = VRATE_MIN_PPM,
581 [QOS_MAX] = VRATE_MAX_PPM,
584 [I_LCOEF_RBPS] = 174019176,
585 [I_LCOEF_RSEQIOPS] = 41708,
586 [I_LCOEF_RRANDIOPS] = 370,
587 [I_LCOEF_WBPS] = 178075866,
588 [I_LCOEF_WSEQIOPS] = 42705,
589 [I_LCOEF_WRANDIOPS] = 378,
594 [QOS_RLAT] = 25000, /* 25ms */
596 [QOS_MIN] = VRATE_MIN_PPM,
597 [QOS_MAX] = VRATE_MAX_PPM,
600 [I_LCOEF_RBPS] = 245855193,
601 [I_LCOEF_RSEQIOPS] = 61575,
602 [I_LCOEF_RRANDIOPS] = 6946,
603 [I_LCOEF_WBPS] = 141365009,
604 [I_LCOEF_WSEQIOPS] = 33716,
605 [I_LCOEF_WRANDIOPS] = 26796,
610 [QOS_RLAT] = 25000, /* 25ms */
612 [QOS_MIN] = VRATE_MIN_PPM,
613 [QOS_MAX] = VRATE_MAX_PPM,
616 [I_LCOEF_RBPS] = 488636629,
617 [I_LCOEF_RSEQIOPS] = 8932,
618 [I_LCOEF_RRANDIOPS] = 8518,
619 [I_LCOEF_WBPS] = 427891549,
620 [I_LCOEF_WSEQIOPS] = 28755,
621 [I_LCOEF_WRANDIOPS] = 21940,
623 .too_fast_vrate_pct = 500,
627 [QOS_RLAT] = 5000, /* 5ms */
629 [QOS_MIN] = VRATE_MIN_PPM,
630 [QOS_MAX] = VRATE_MAX_PPM,
633 [I_LCOEF_RBPS] = 3102524156LLU,
634 [I_LCOEF_RSEQIOPS] = 724816,
635 [I_LCOEF_RRANDIOPS] = 778122,
636 [I_LCOEF_WBPS] = 1742780862LLU,
637 [I_LCOEF_WSEQIOPS] = 425702,
638 [I_LCOEF_WRANDIOPS] = 443193,
640 .too_slow_vrate_pct = 10,
645 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
646 * vtime credit shortage and down on device saturation.
648 static u32 vrate_adj_pct[] =
650 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
651 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
652 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
654 static struct blkcg_policy blkcg_policy_iocost;
656 /* accessors and helpers */
657 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
659 return container_of(rqos, struct ioc, rqos);
662 static struct ioc *q_to_ioc(struct request_queue *q)
664 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
667 static const char __maybe_unused *ioc_name(struct ioc *ioc)
669 struct gendisk *disk = ioc->rqos.q->disk;
673 return disk->disk_name;
676 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
678 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
681 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
683 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
686 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
688 return pd_to_blkg(&iocg->pd);
691 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
693 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
694 struct ioc_cgrp, cpd);
698 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
699 * weight, the more expensive each IO. Must round up.
701 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
703 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
707 * The inverse of abs_cost_to_cost(). Must round up.
709 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
711 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
714 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
715 u64 abs_cost, u64 cost)
717 struct iocg_pcpu_stat *gcs;
719 bio->bi_iocost_cost = cost;
720 atomic64_add(cost, &iocg->vtime);
722 gcs = get_cpu_ptr(iocg->pcpu_stat);
723 local64_add(abs_cost, &gcs->abs_vusage);
727 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
730 spin_lock_irqsave(&iocg->ioc->lock, *flags);
731 spin_lock(&iocg->waitq.lock);
733 spin_lock_irqsave(&iocg->waitq.lock, *flags);
737 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
740 spin_unlock(&iocg->waitq.lock);
741 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
743 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
747 #define CREATE_TRACE_POINTS
748 #include <trace/events/iocost.h>
750 static void ioc_refresh_margins(struct ioc *ioc)
752 struct ioc_margins *margins = &ioc->margins;
753 u32 period_us = ioc->period_us;
754 u64 vrate = ioc->vtime_base_rate;
756 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
757 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
758 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
761 /* latency Qos params changed, update period_us and all the dependent params */
762 static void ioc_refresh_period_us(struct ioc *ioc)
764 u32 ppm, lat, multi, period_us;
766 lockdep_assert_held(&ioc->lock);
768 /* pick the higher latency target */
769 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
770 ppm = ioc->params.qos[QOS_RPPM];
771 lat = ioc->params.qos[QOS_RLAT];
773 ppm = ioc->params.qos[QOS_WPPM];
774 lat = ioc->params.qos[QOS_WLAT];
778 * We want the period to be long enough to contain a healthy number
779 * of IOs while short enough for granular control. Define it as a
780 * multiple of the latency target. Ideally, the multiplier should
781 * be scaled according to the percentile so that it would nominally
782 * contain a certain number of requests. Let's be simpler and
783 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
786 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
789 period_us = multi * lat;
790 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
792 /* calculate dependent params */
793 ioc->period_us = period_us;
794 ioc->timer_slack_ns = div64_u64(
795 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
797 ioc_refresh_margins(ioc);
800 static int ioc_autop_idx(struct ioc *ioc)
802 int idx = ioc->autop_idx;
803 const struct ioc_params *p = &autop[idx];
808 if (!blk_queue_nonrot(ioc->rqos.q))
811 /* handle SATA SSDs w/ broken NCQ */
812 if (blk_queue_depth(ioc->rqos.q) == 1)
813 return AUTOP_SSD_QD1;
815 /* use one of the normal ssd sets */
816 if (idx < AUTOP_SSD_DFL)
817 return AUTOP_SSD_DFL;
819 /* if user is overriding anything, maintain what was there */
820 if (ioc->user_qos_params || ioc->user_cost_model)
823 /* step up/down based on the vrate */
824 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
825 now_ns = ktime_get_ns();
827 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
828 if (!ioc->autop_too_fast_at)
829 ioc->autop_too_fast_at = now_ns;
830 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
833 ioc->autop_too_fast_at = 0;
836 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
837 if (!ioc->autop_too_slow_at)
838 ioc->autop_too_slow_at = now_ns;
839 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
842 ioc->autop_too_slow_at = 0;
849 * Take the followings as input
851 * @bps maximum sequential throughput
852 * @seqiops maximum sequential 4k iops
853 * @randiops maximum random 4k iops
855 * and calculate the linear model cost coefficients.
857 * *@page per-page cost 1s / (@bps / 4096)
858 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
859 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
861 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
862 u64 *page, u64 *seqio, u64 *randio)
866 *page = *seqio = *randio = 0;
869 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
870 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
873 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
879 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
885 static void ioc_refresh_lcoefs(struct ioc *ioc)
887 u64 *u = ioc->params.i_lcoefs;
888 u64 *c = ioc->params.lcoefs;
890 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
891 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
892 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
893 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
896 static bool ioc_refresh_params(struct ioc *ioc, bool force)
898 const struct ioc_params *p;
901 lockdep_assert_held(&ioc->lock);
903 idx = ioc_autop_idx(ioc);
906 if (idx == ioc->autop_idx && !force)
909 if (idx != ioc->autop_idx)
910 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
912 ioc->autop_idx = idx;
913 ioc->autop_too_fast_at = 0;
914 ioc->autop_too_slow_at = 0;
916 if (!ioc->user_qos_params)
917 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
918 if (!ioc->user_cost_model)
919 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
921 ioc_refresh_period_us(ioc);
922 ioc_refresh_lcoefs(ioc);
924 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
925 VTIME_PER_USEC, MILLION);
926 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
927 VTIME_PER_USEC, MILLION);
933 * When an iocg accumulates too much vtime or gets deactivated, we throw away
934 * some vtime, which lowers the overall device utilization. As the exact amount
935 * which is being thrown away is known, we can compensate by accelerating the
936 * vrate accordingly so that the extra vtime generated in the current period
937 * matches what got lost.
939 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
941 s64 pleft = ioc->period_at + ioc->period_us - now->now;
942 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
943 s64 vcomp, vcomp_min, vcomp_max;
945 lockdep_assert_held(&ioc->lock);
947 /* we need some time left in this period */
952 * Calculate how much vrate should be adjusted to offset the error.
953 * Limit the amount of adjustment and deduct the adjusted amount from
956 vcomp = -div64_s64(ioc->vtime_err, pleft);
957 vcomp_min = -(ioc->vtime_base_rate >> 1);
958 vcomp_max = ioc->vtime_base_rate;
959 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
961 ioc->vtime_err += vcomp * pleft;
963 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
965 /* bound how much error can accumulate */
966 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
969 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
970 int nr_lagging, int nr_shortages,
971 int prev_busy_level, u32 *missed_ppm)
973 u64 vrate = ioc->vtime_base_rate;
974 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
976 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
977 if (ioc->busy_level != prev_busy_level || nr_lagging)
978 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
979 missed_ppm, rq_wait_pct,
980 nr_lagging, nr_shortages);
986 * If vrate is out of bounds, apply clamp gradually as the
987 * bounds can change abruptly. Otherwise, apply busy_level
990 if (vrate < vrate_min) {
991 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
992 vrate = min(vrate, vrate_min);
993 } else if (vrate > vrate_max) {
994 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
995 vrate = max(vrate, vrate_max);
997 int idx = min_t(int, abs(ioc->busy_level),
998 ARRAY_SIZE(vrate_adj_pct) - 1);
999 u32 adj_pct = vrate_adj_pct[idx];
1001 if (ioc->busy_level > 0)
1002 adj_pct = 100 - adj_pct;
1004 adj_pct = 100 + adj_pct;
1006 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1007 vrate_min, vrate_max);
1010 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1011 nr_lagging, nr_shortages);
1013 ioc->vtime_base_rate = vrate;
1014 ioc_refresh_margins(ioc);
1017 /* take a snapshot of the current [v]time and vrate */
1018 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1022 now->now_ns = ktime_get();
1023 now->now = ktime_to_us(now->now_ns);
1024 now->vrate = atomic64_read(&ioc->vtime_rate);
1027 * The current vtime is
1029 * vtime at period start + (wallclock time since the start) * vrate
1031 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1032 * needed, they're seqcount protected.
1035 seq = read_seqcount_begin(&ioc->period_seqcount);
1036 now->vnow = ioc->period_at_vtime +
1037 (now->now - ioc->period_at) * now->vrate;
1038 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1041 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1043 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1045 write_seqcount_begin(&ioc->period_seqcount);
1046 ioc->period_at = now->now;
1047 ioc->period_at_vtime = now->vnow;
1048 write_seqcount_end(&ioc->period_seqcount);
1050 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1051 add_timer(&ioc->timer);
1055 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1056 * weight sums and propagate upwards accordingly. If @save, the current margin
1057 * is saved to be used as reference for later inuse in-period adjustments.
1059 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1060 bool save, struct ioc_now *now)
1062 struct ioc *ioc = iocg->ioc;
1065 lockdep_assert_held(&ioc->lock);
1068 * For an active leaf node, its inuse shouldn't be zero or exceed
1069 * @active. An active internal node's inuse is solely determined by the
1070 * inuse to active ratio of its children regardless of @inuse.
1072 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1073 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1074 iocg->child_active_sum);
1076 inuse = clamp_t(u32, inuse, 1, active);
1079 iocg->last_inuse = iocg->inuse;
1081 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1083 if (active == iocg->active && inuse == iocg->inuse)
1086 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1087 struct ioc_gq *parent = iocg->ancestors[lvl];
1088 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1089 u32 parent_active = 0, parent_inuse = 0;
1091 /* update the level sums */
1092 parent->child_active_sum += (s32)(active - child->active);
1093 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1094 /* apply the updates */
1095 child->active = active;
1096 child->inuse = inuse;
1099 * The delta between inuse and active sums indicates that
1100 * much of weight is being given away. Parent's inuse
1101 * and active should reflect the ratio.
1103 if (parent->child_active_sum) {
1104 parent_active = parent->weight;
1105 parent_inuse = DIV64_U64_ROUND_UP(
1106 parent_active * parent->child_inuse_sum,
1107 parent->child_active_sum);
1110 /* do we need to keep walking up? */
1111 if (parent_active == parent->active &&
1112 parent_inuse == parent->inuse)
1115 active = parent_active;
1116 inuse = parent_inuse;
1119 ioc->weights_updated = true;
1122 static void commit_weights(struct ioc *ioc)
1124 lockdep_assert_held(&ioc->lock);
1126 if (ioc->weights_updated) {
1127 /* paired with rmb in current_hweight(), see there */
1129 atomic_inc(&ioc->hweight_gen);
1130 ioc->weights_updated = false;
1134 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1135 bool save, struct ioc_now *now)
1137 __propagate_weights(iocg, active, inuse, save, now);
1138 commit_weights(iocg->ioc);
1141 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1143 struct ioc *ioc = iocg->ioc;
1148 /* hot path - if uptodate, use cached */
1149 ioc_gen = atomic_read(&ioc->hweight_gen);
1150 if (ioc_gen == iocg->hweight_gen)
1154 * Paired with wmb in commit_weights(). If we saw the updated
1155 * hweight_gen, all the weight updates from __propagate_weights() are
1158 * We can race with weight updates during calculation and get it
1159 * wrong. However, hweight_gen would have changed and a future
1160 * reader will recalculate and we're guaranteed to discard the
1161 * wrong result soon.
1165 hwa = hwi = WEIGHT_ONE;
1166 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1167 struct ioc_gq *parent = iocg->ancestors[lvl];
1168 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1169 u64 active_sum = READ_ONCE(parent->child_active_sum);
1170 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1171 u32 active = READ_ONCE(child->active);
1172 u32 inuse = READ_ONCE(child->inuse);
1174 /* we can race with deactivations and either may read as zero */
1175 if (!active_sum || !inuse_sum)
1178 active_sum = max_t(u64, active, active_sum);
1179 hwa = div64_u64((u64)hwa * active, active_sum);
1181 inuse_sum = max_t(u64, inuse, inuse_sum);
1182 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1185 iocg->hweight_active = max_t(u32, hwa, 1);
1186 iocg->hweight_inuse = max_t(u32, hwi, 1);
1187 iocg->hweight_gen = ioc_gen;
1190 *hw_activep = iocg->hweight_active;
1192 *hw_inusep = iocg->hweight_inuse;
1196 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1197 * other weights stay unchanged.
1199 static u32 current_hweight_max(struct ioc_gq *iocg)
1201 u32 hwm = WEIGHT_ONE;
1202 u32 inuse = iocg->active;
1203 u64 child_inuse_sum;
1206 lockdep_assert_held(&iocg->ioc->lock);
1208 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1209 struct ioc_gq *parent = iocg->ancestors[lvl];
1210 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1212 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1213 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1214 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1215 parent->child_active_sum);
1218 return max_t(u32, hwm, 1);
1221 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1223 struct ioc *ioc = iocg->ioc;
1224 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1225 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1228 lockdep_assert_held(&ioc->lock);
1230 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1231 if (weight != iocg->weight && iocg->active)
1232 propagate_weights(iocg, weight, iocg->inuse, true, now);
1233 iocg->weight = weight;
1236 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1238 struct ioc *ioc = iocg->ioc;
1239 u64 last_period, cur_period;
1244 * If seem to be already active, just update the stamp to tell the
1245 * timer that we're still active. We don't mind occassional races.
1247 if (!list_empty(&iocg->active_list)) {
1249 cur_period = atomic64_read(&ioc->cur_period);
1250 if (atomic64_read(&iocg->active_period) != cur_period)
1251 atomic64_set(&iocg->active_period, cur_period);
1255 /* racy check on internal node IOs, treat as root level IOs */
1256 if (iocg->child_active_sum)
1259 spin_lock_irq(&ioc->lock);
1264 cur_period = atomic64_read(&ioc->cur_period);
1265 last_period = atomic64_read(&iocg->active_period);
1266 atomic64_set(&iocg->active_period, cur_period);
1268 /* already activated or breaking leaf-only constraint? */
1269 if (!list_empty(&iocg->active_list))
1270 goto succeed_unlock;
1271 for (i = iocg->level - 1; i > 0; i--)
1272 if (!list_empty(&iocg->ancestors[i]->active_list))
1275 if (iocg->child_active_sum)
1279 * Always start with the target budget. On deactivation, we throw away
1280 * anything above it.
1282 vtarget = now->vnow - ioc->margins.target;
1283 vtime = atomic64_read(&iocg->vtime);
1285 atomic64_add(vtarget - vtime, &iocg->vtime);
1286 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1290 * Activate, propagate weight and start period timer if not
1291 * running. Reset hweight_gen to avoid accidental match from
1294 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1295 list_add(&iocg->active_list, &ioc->active_iocgs);
1297 propagate_weights(iocg, iocg->weight,
1298 iocg->last_inuse ?: iocg->weight, true, now);
1300 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1301 last_period, cur_period, vtime);
1303 iocg->activated_at = now->now;
1305 if (ioc->running == IOC_IDLE) {
1306 ioc->running = IOC_RUNNING;
1307 ioc->dfgv_period_at = now->now;
1308 ioc->dfgv_period_rem = 0;
1309 ioc_start_period(ioc, now);
1313 spin_unlock_irq(&ioc->lock);
1317 spin_unlock_irq(&ioc->lock);
1321 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1323 struct ioc *ioc = iocg->ioc;
1324 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1325 u64 tdelta, delay, new_delay;
1326 s64 vover, vover_pct;
1329 lockdep_assert_held(&iocg->waitq.lock);
1331 /* calculate the current delay in effect - 1/2 every second */
1332 tdelta = now->now - iocg->delay_at;
1334 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1338 /* calculate the new delay from the debt amount */
1339 current_hweight(iocg, &hwa, NULL);
1340 vover = atomic64_read(&iocg->vtime) +
1341 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1342 vover_pct = div64_s64(100 * vover,
1343 ioc->period_us * ioc->vtime_base_rate);
1345 if (vover_pct <= MIN_DELAY_THR_PCT)
1347 else if (vover_pct >= MAX_DELAY_THR_PCT)
1348 new_delay = MAX_DELAY;
1350 new_delay = MIN_DELAY +
1351 div_u64((MAX_DELAY - MIN_DELAY) *
1352 (vover_pct - MIN_DELAY_THR_PCT),
1353 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1355 /* pick the higher one and apply */
1356 if (new_delay > delay) {
1357 iocg->delay = new_delay;
1358 iocg->delay_at = now->now;
1362 if (delay >= MIN_DELAY) {
1363 if (!iocg->indelay_since)
1364 iocg->indelay_since = now->now;
1365 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1368 if (iocg->indelay_since) {
1369 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1370 iocg->indelay_since = 0;
1373 blkcg_clear_delay(blkg);
1378 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1379 struct ioc_now *now)
1381 struct iocg_pcpu_stat *gcs;
1383 lockdep_assert_held(&iocg->ioc->lock);
1384 lockdep_assert_held(&iocg->waitq.lock);
1385 WARN_ON_ONCE(list_empty(&iocg->active_list));
1388 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1389 * inuse donating all of it share to others until its debt is paid off.
1391 if (!iocg->abs_vdebt && abs_cost) {
1392 iocg->indebt_since = now->now;
1393 propagate_weights(iocg, iocg->active, 0, false, now);
1396 iocg->abs_vdebt += abs_cost;
1398 gcs = get_cpu_ptr(iocg->pcpu_stat);
1399 local64_add(abs_cost, &gcs->abs_vusage);
1403 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1404 struct ioc_now *now)
1406 lockdep_assert_held(&iocg->ioc->lock);
1407 lockdep_assert_held(&iocg->waitq.lock);
1409 /* make sure that nobody messed with @iocg */
1410 WARN_ON_ONCE(list_empty(&iocg->active_list));
1411 WARN_ON_ONCE(iocg->inuse > 1);
1413 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1415 /* if debt is paid in full, restore inuse */
1416 if (!iocg->abs_vdebt) {
1417 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1418 iocg->indebt_since = 0;
1420 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1425 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1426 int flags, void *key)
1428 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1429 struct iocg_wake_ctx *ctx = key;
1430 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1432 ctx->vbudget -= cost;
1434 if (ctx->vbudget < 0)
1437 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1438 wait->committed = true;
1441 * autoremove_wake_function() removes the wait entry only when it
1442 * actually changed the task state. We want the wait always removed.
1443 * Remove explicitly and use default_wake_function(). Note that the
1444 * order of operations is important as finish_wait() tests whether
1445 * @wq_entry is removed without grabbing the lock.
1447 default_wake_function(wq_entry, mode, flags, key);
1448 list_del_init_careful(&wq_entry->entry);
1453 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1454 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1455 * addition to iocg->waitq.lock.
1457 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1458 struct ioc_now *now)
1460 struct ioc *ioc = iocg->ioc;
1461 struct iocg_wake_ctx ctx = { .iocg = iocg };
1462 u64 vshortage, expires, oexpires;
1466 lockdep_assert_held(&iocg->waitq.lock);
1468 current_hweight(iocg, &hwa, NULL);
1469 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1472 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1473 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1474 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1475 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1477 lockdep_assert_held(&ioc->lock);
1479 atomic64_add(vpay, &iocg->vtime);
1480 atomic64_add(vpay, &iocg->done_vtime);
1481 iocg_pay_debt(iocg, abs_vpay, now);
1485 if (iocg->abs_vdebt || iocg->delay)
1486 iocg_kick_delay(iocg, now);
1489 * Debt can still be outstanding if we haven't paid all yet or the
1490 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1491 * under debt. Make sure @vbudget reflects the outstanding amount and is
1494 if (iocg->abs_vdebt) {
1495 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1496 vbudget = min_t(s64, 0, vbudget - vdebt);
1500 * Wake up the ones which are due and see how much vtime we'll need for
1501 * the next one. As paying off debt restores hw_inuse, it must be read
1502 * after the above debt payment.
1504 ctx.vbudget = vbudget;
1505 current_hweight(iocg, NULL, &ctx.hw_inuse);
1507 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1509 if (!waitqueue_active(&iocg->waitq)) {
1510 if (iocg->wait_since) {
1511 iocg->stat.wait_us += now->now - iocg->wait_since;
1512 iocg->wait_since = 0;
1517 if (!iocg->wait_since)
1518 iocg->wait_since = now->now;
1520 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1523 /* determine next wakeup, add a timer margin to guarantee chunking */
1524 vshortage = -ctx.vbudget;
1525 expires = now->now_ns +
1526 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1528 expires += ioc->timer_slack_ns;
1530 /* if already active and close enough, don't bother */
1531 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1532 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1533 abs(oexpires - expires) <= ioc->timer_slack_ns)
1536 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1537 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1540 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1542 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1543 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1545 unsigned long flags;
1547 ioc_now(iocg->ioc, &now);
1549 iocg_lock(iocg, pay_debt, &flags);
1550 iocg_kick_waitq(iocg, pay_debt, &now);
1551 iocg_unlock(iocg, pay_debt, &flags);
1553 return HRTIMER_NORESTART;
1556 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1558 u32 nr_met[2] = { };
1559 u32 nr_missed[2] = { };
1563 for_each_online_cpu(cpu) {
1564 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1565 u64 this_rq_wait_ns;
1567 for (rw = READ; rw <= WRITE; rw++) {
1568 u32 this_met = local_read(&stat->missed[rw].nr_met);
1569 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1571 nr_met[rw] += this_met - stat->missed[rw].last_met;
1572 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1573 stat->missed[rw].last_met = this_met;
1574 stat->missed[rw].last_missed = this_missed;
1577 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1578 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1579 stat->last_rq_wait_ns = this_rq_wait_ns;
1582 for (rw = READ; rw <= WRITE; rw++) {
1583 if (nr_met[rw] + nr_missed[rw])
1585 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1586 nr_met[rw] + nr_missed[rw]);
1588 missed_ppm_ar[rw] = 0;
1591 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1592 ioc->period_us * NSEC_PER_USEC);
1595 /* was iocg idle this period? */
1596 static bool iocg_is_idle(struct ioc_gq *iocg)
1598 struct ioc *ioc = iocg->ioc;
1600 /* did something get issued this period? */
1601 if (atomic64_read(&iocg->active_period) ==
1602 atomic64_read(&ioc->cur_period))
1605 /* is something in flight? */
1606 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1613 * Call this function on the target leaf @iocg's to build pre-order traversal
1614 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1615 * ->walk_list and the caller is responsible for dissolving the list after use.
1617 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1618 struct list_head *inner_walk)
1622 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1624 /* find the first ancestor which hasn't been visited yet */
1625 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1626 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1630 /* walk down and visit the inner nodes to get pre-order traversal */
1631 while (++lvl <= iocg->level - 1) {
1632 struct ioc_gq *inner = iocg->ancestors[lvl];
1634 /* record traversal order */
1635 list_add_tail(&inner->walk_list, inner_walk);
1639 /* propagate the deltas to the parent */
1640 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1642 if (iocg->level > 0) {
1643 struct iocg_stat *parent_stat =
1644 &iocg->ancestors[iocg->level - 1]->stat;
1646 parent_stat->usage_us +=
1647 iocg->stat.usage_us - iocg->last_stat.usage_us;
1648 parent_stat->wait_us +=
1649 iocg->stat.wait_us - iocg->last_stat.wait_us;
1650 parent_stat->indebt_us +=
1651 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1652 parent_stat->indelay_us +=
1653 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1656 iocg->last_stat = iocg->stat;
1659 /* collect per-cpu counters and propagate the deltas to the parent */
1660 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1662 struct ioc *ioc = iocg->ioc;
1667 lockdep_assert_held(&iocg->ioc->lock);
1669 /* collect per-cpu counters */
1670 for_each_possible_cpu(cpu) {
1671 abs_vusage += local64_read(
1672 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1674 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1675 iocg->last_stat_abs_vusage = abs_vusage;
1677 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1678 iocg->stat.usage_us += iocg->usage_delta_us;
1680 iocg_flush_stat_upward(iocg);
1683 /* get stat counters ready for reading on all active iocgs */
1684 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1686 LIST_HEAD(inner_walk);
1687 struct ioc_gq *iocg, *tiocg;
1689 /* flush leaves and build inner node walk list */
1690 list_for_each_entry(iocg, target_iocgs, active_list) {
1691 iocg_flush_stat_leaf(iocg, now);
1692 iocg_build_inner_walk(iocg, &inner_walk);
1695 /* keep flushing upwards by walking the inner list backwards */
1696 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1697 iocg_flush_stat_upward(iocg);
1698 list_del_init(&iocg->walk_list);
1703 * Determine what @iocg's hweight_inuse should be after donating unused
1704 * capacity. @hwm is the upper bound and used to signal no donation. This
1705 * function also throws away @iocg's excess budget.
1707 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1708 u32 usage, struct ioc_now *now)
1710 struct ioc *ioc = iocg->ioc;
1711 u64 vtime = atomic64_read(&iocg->vtime);
1712 s64 excess, delta, target, new_hwi;
1714 /* debt handling owns inuse for debtors */
1715 if (iocg->abs_vdebt)
1718 /* see whether minimum margin requirement is met */
1719 if (waitqueue_active(&iocg->waitq) ||
1720 time_after64(vtime, now->vnow - ioc->margins.min))
1723 /* throw away excess above target */
1724 excess = now->vnow - vtime - ioc->margins.target;
1726 atomic64_add(excess, &iocg->vtime);
1727 atomic64_add(excess, &iocg->done_vtime);
1729 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1733 * Let's say the distance between iocg's and device's vtimes as a
1734 * fraction of period duration is delta. Assuming that the iocg will
1735 * consume the usage determined above, we want to determine new_hwi so
1736 * that delta equals MARGIN_TARGET at the end of the next period.
1738 * We need to execute usage worth of IOs while spending the sum of the
1739 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1742 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1744 * Therefore, the new_hwi is:
1746 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1748 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1749 now->vnow - ioc->period_at_vtime);
1750 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1751 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1753 return clamp_t(s64, new_hwi, 1, hwm);
1757 * For work-conservation, an iocg which isn't using all of its share should
1758 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1759 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1761 * #1 is mathematically simpler but has the drawback of requiring synchronous
1762 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1763 * change due to donation snapbacks as it has the possibility of grossly
1764 * overshooting what's allowed by the model and vrate.
1766 * #2 is inherently safe with local operations. The donating iocg can easily
1767 * snap back to higher weights when needed without worrying about impacts on
1768 * other nodes as the impacts will be inherently correct. This also makes idle
1769 * iocg activations safe. The only effect activations have is decreasing
1770 * hweight_inuse of others, the right solution to which is for those iocgs to
1771 * snap back to higher weights.
1773 * So, we go with #2. The challenge is calculating how each donating iocg's
1774 * inuse should be adjusted to achieve the target donation amounts. This is done
1775 * using Andy's method described in the following pdf.
1777 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1779 * Given the weights and target after-donation hweight_inuse values, Andy's
1780 * method determines how the proportional distribution should look like at each
1781 * sibling level to maintain the relative relationship between all non-donating
1782 * pairs. To roughly summarize, it divides the tree into donating and
1783 * non-donating parts, calculates global donation rate which is used to
1784 * determine the target hweight_inuse for each node, and then derives per-level
1787 * The following pdf shows that global distribution calculated this way can be
1788 * achieved by scaling inuse weights of donating leaves and propagating the
1789 * adjustments upwards proportionally.
1791 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1793 * Combining the above two, we can determine how each leaf iocg's inuse should
1794 * be adjusted to achieve the target donation.
1796 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1798 * The inline comments use symbols from the last pdf.
1800 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1801 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1802 * t is the sum of the absolute budgets of donating nodes in the subtree.
1803 * w is the weight of the node. w = w_f + w_t
1804 * w_f is the non-donating portion of w. w_f = w * f / b
1805 * w_b is the donating portion of w. w_t = w * t / b
1806 * s is the sum of all sibling weights. s = Sum(w) for siblings
1807 * s_f and s_t are the non-donating and donating portions of s.
1809 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1810 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1811 * after adjustments. Subscript r denotes the root node's values.
1813 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1815 LIST_HEAD(over_hwa);
1816 LIST_HEAD(inner_walk);
1817 struct ioc_gq *iocg, *tiocg, *root_iocg;
1818 u32 after_sum, over_sum, over_target, gamma;
1821 * It's pretty unlikely but possible for the total sum of
1822 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1823 * confuse the following calculations. If such condition is detected,
1824 * scale down everyone over its full share equally to keep the sum below
1829 list_for_each_entry(iocg, surpluses, surplus_list) {
1832 current_hweight(iocg, &hwa, NULL);
1833 after_sum += iocg->hweight_after_donation;
1835 if (iocg->hweight_after_donation > hwa) {
1836 over_sum += iocg->hweight_after_donation;
1837 list_add(&iocg->walk_list, &over_hwa);
1841 if (after_sum >= WEIGHT_ONE) {
1843 * The delta should be deducted from the over_sum, calculate
1844 * target over_sum value.
1846 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1847 WARN_ON_ONCE(over_sum <= over_delta);
1848 over_target = over_sum - over_delta;
1853 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1855 iocg->hweight_after_donation =
1856 div_u64((u64)iocg->hweight_after_donation *
1857 over_target, over_sum);
1858 list_del_init(&iocg->walk_list);
1862 * Build pre-order inner node walk list and prepare for donation
1863 * adjustment calculations.
1865 list_for_each_entry(iocg, surpluses, surplus_list) {
1866 iocg_build_inner_walk(iocg, &inner_walk);
1869 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1870 WARN_ON_ONCE(root_iocg->level > 0);
1872 list_for_each_entry(iocg, &inner_walk, walk_list) {
1873 iocg->child_adjusted_sum = 0;
1874 iocg->hweight_donating = 0;
1875 iocg->hweight_after_donation = 0;
1879 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1882 list_for_each_entry(iocg, surpluses, surplus_list) {
1883 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1885 parent->hweight_donating += iocg->hweight_donating;
1886 parent->hweight_after_donation += iocg->hweight_after_donation;
1889 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1890 if (iocg->level > 0) {
1891 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1893 parent->hweight_donating += iocg->hweight_donating;
1894 parent->hweight_after_donation += iocg->hweight_after_donation;
1899 * Calculate inner hwa's (b) and make sure the donation values are
1900 * within the accepted ranges as we're doing low res calculations with
1903 list_for_each_entry(iocg, &inner_walk, walk_list) {
1905 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1907 iocg->hweight_active = DIV64_U64_ROUND_UP(
1908 (u64)parent->hweight_active * iocg->active,
1909 parent->child_active_sum);
1913 iocg->hweight_donating = min(iocg->hweight_donating,
1914 iocg->hweight_active);
1915 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1916 iocg->hweight_donating - 1);
1917 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1918 iocg->hweight_donating <= 1 ||
1919 iocg->hweight_after_donation == 0)) {
1920 pr_warn("iocg: invalid donation weights in ");
1921 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1922 pr_cont(": active=%u donating=%u after=%u\n",
1923 iocg->hweight_active, iocg->hweight_donating,
1924 iocg->hweight_after_donation);
1929 * Calculate the global donation rate (gamma) - the rate to adjust
1930 * non-donating budgets by.
1932 * No need to use 64bit multiplication here as the first operand is
1933 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1935 * We know that there are beneficiary nodes and the sum of the donating
1936 * hweights can't be whole; however, due to the round-ups during hweight
1937 * calculations, root_iocg->hweight_donating might still end up equal to
1938 * or greater than whole. Limit the range when calculating the divider.
1940 * gamma = (1 - t_r') / (1 - t_r)
1942 gamma = DIV_ROUND_UP(
1943 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1944 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1947 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1950 list_for_each_entry(iocg, &inner_walk, walk_list) {
1951 struct ioc_gq *parent;
1952 u32 inuse, wpt, wptp;
1955 if (iocg->level == 0) {
1956 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1957 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1958 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1959 WEIGHT_ONE - iocg->hweight_after_donation);
1963 parent = iocg->ancestors[iocg->level - 1];
1965 /* b' = gamma * b_f + b_t' */
1966 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1967 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1968 WEIGHT_ONE) + iocg->hweight_after_donation;
1970 /* w' = s' * b' / b'_p */
1971 inuse = DIV64_U64_ROUND_UP(
1972 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1973 parent->hweight_inuse);
1975 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1976 st = DIV64_U64_ROUND_UP(
1977 iocg->child_active_sum * iocg->hweight_donating,
1978 iocg->hweight_active);
1979 sf = iocg->child_active_sum - st;
1980 wpt = DIV64_U64_ROUND_UP(
1981 (u64)iocg->active * iocg->hweight_donating,
1982 iocg->hweight_active);
1983 wptp = DIV64_U64_ROUND_UP(
1984 (u64)inuse * iocg->hweight_after_donation,
1985 iocg->hweight_inuse);
1987 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1991 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1992 * we can finally determine leaf adjustments.
1994 list_for_each_entry(iocg, surpluses, surplus_list) {
1995 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1999 * In-debt iocgs participated in the donation calculation with
2000 * the minimum target hweight_inuse. Configuring inuse
2001 * accordingly would work fine but debt handling expects
2002 * @iocg->inuse stay at the minimum and we don't wanna
2005 if (iocg->abs_vdebt) {
2006 WARN_ON_ONCE(iocg->inuse > 1);
2010 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2011 inuse = DIV64_U64_ROUND_UP(
2012 parent->child_adjusted_sum * iocg->hweight_after_donation,
2013 parent->hweight_inuse);
2015 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2017 iocg->hweight_inuse,
2018 iocg->hweight_after_donation);
2020 __propagate_weights(iocg, iocg->active, inuse, true, now);
2023 /* walk list should be dissolved after use */
2024 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2025 list_del_init(&iocg->walk_list);
2029 * A low weight iocg can amass a large amount of debt, for example, when
2030 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2031 * memory paired with a slow IO device, the debt can span multiple seconds or
2032 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2033 * up blocked paying its debt while the IO device is idle.
2035 * The following protects against such cases. If the device has been
2036 * sufficiently idle for a while, the debts are halved and delays are
2039 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2040 struct ioc_now *now)
2042 struct ioc_gq *iocg;
2043 u64 dur, usage_pct, nr_cycles;
2045 /* if no debtor, reset the cycle */
2047 ioc->dfgv_period_at = now->now;
2048 ioc->dfgv_period_rem = 0;
2049 ioc->dfgv_usage_us_sum = 0;
2054 * Debtors can pass through a lot of writes choking the device and we
2055 * don't want to be forgiving debts while the device is struggling from
2056 * write bursts. If we're missing latency targets, consider the device
2059 if (ioc->busy_level > 0)
2060 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2062 ioc->dfgv_usage_us_sum += usage_us_sum;
2063 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2067 * At least DFGV_PERIOD has passed since the last period. Calculate the
2068 * average usage and reset the period counters.
2070 dur = now->now - ioc->dfgv_period_at;
2071 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2073 ioc->dfgv_period_at = now->now;
2074 ioc->dfgv_usage_us_sum = 0;
2076 /* if was too busy, reset everything */
2077 if (usage_pct > DFGV_USAGE_PCT) {
2078 ioc->dfgv_period_rem = 0;
2083 * Usage is lower than threshold. Let's forgive some debts. Debt
2084 * forgiveness runs off of the usual ioc timer but its period usually
2085 * doesn't match ioc's. Compensate the difference by performing the
2086 * reduction as many times as would fit in the duration since the last
2087 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2088 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2089 * reductions is doubled.
2091 nr_cycles = dur + ioc->dfgv_period_rem;
2092 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2094 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2095 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2097 if (!iocg->abs_vdebt && !iocg->delay)
2100 spin_lock(&iocg->waitq.lock);
2102 old_debt = iocg->abs_vdebt;
2103 old_delay = iocg->delay;
2105 if (iocg->abs_vdebt)
2106 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2108 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2110 iocg_kick_waitq(iocg, true, now);
2112 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2113 old_debt, iocg->abs_vdebt,
2114 old_delay, iocg->delay);
2116 spin_unlock(&iocg->waitq.lock);
2121 * Check the active iocgs' state to avoid oversleeping and deactive
2124 * Since waiters determine the sleep durations based on the vrate
2125 * they saw at the time of sleep, if vrate has increased, some
2126 * waiters could be sleeping for too long. Wake up tardy waiters
2127 * which should have woken up in the last period and expire idle
2130 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2133 struct ioc_gq *iocg, *tiocg;
2135 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2136 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2137 !iocg->delay && !iocg_is_idle(iocg))
2140 spin_lock(&iocg->waitq.lock);
2142 /* flush wait and indebt stat deltas */
2143 if (iocg->wait_since) {
2144 iocg->stat.wait_us += now->now - iocg->wait_since;
2145 iocg->wait_since = now->now;
2147 if (iocg->indebt_since) {
2148 iocg->stat.indebt_us +=
2149 now->now - iocg->indebt_since;
2150 iocg->indebt_since = now->now;
2152 if (iocg->indelay_since) {
2153 iocg->stat.indelay_us +=
2154 now->now - iocg->indelay_since;
2155 iocg->indelay_since = now->now;
2158 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2160 /* might be oversleeping vtime / hweight changes, kick */
2161 iocg_kick_waitq(iocg, true, now);
2162 if (iocg->abs_vdebt || iocg->delay)
2164 } else if (iocg_is_idle(iocg)) {
2165 /* no waiter and idle, deactivate */
2166 u64 vtime = atomic64_read(&iocg->vtime);
2170 * @iocg has been inactive for a full duration and will
2171 * have a high budget. Account anything above target as
2172 * error and throw away. On reactivation, it'll start
2173 * with the target budget.
2175 excess = now->vnow - vtime - ioc->margins.target;
2179 current_hweight(iocg, NULL, &old_hwi);
2180 ioc->vtime_err -= div64_u64(excess * old_hwi,
2184 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2185 atomic64_read(&iocg->active_period),
2186 atomic64_read(&ioc->cur_period), vtime);
2187 __propagate_weights(iocg, 0, 0, false, now);
2188 list_del_init(&iocg->active_list);
2191 spin_unlock(&iocg->waitq.lock);
2194 commit_weights(ioc);
2198 static void ioc_timer_fn(struct timer_list *timer)
2200 struct ioc *ioc = container_of(timer, struct ioc, timer);
2201 struct ioc_gq *iocg, *tiocg;
2203 LIST_HEAD(surpluses);
2204 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2205 u64 usage_us_sum = 0;
2208 u32 missed_ppm[2], rq_wait_pct;
2210 int prev_busy_level;
2212 /* how were the latencies during the period? */
2213 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2215 /* take care of active iocgs */
2216 spin_lock_irq(&ioc->lock);
2218 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2219 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2222 period_vtime = now.vnow - ioc->period_at_vtime;
2223 if (WARN_ON_ONCE(!period_vtime)) {
2224 spin_unlock_irq(&ioc->lock);
2228 nr_debtors = ioc_check_iocgs(ioc, &now);
2231 * Wait and indebt stat are flushed above and the donation calculation
2232 * below needs updated usage stat. Let's bring stat up-to-date.
2234 iocg_flush_stat(&ioc->active_iocgs, &now);
2236 /* calc usage and see whether some weights need to be moved around */
2237 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2238 u64 vdone, vtime, usage_us;
2239 u32 hw_active, hw_inuse;
2242 * Collect unused and wind vtime closer to vnow to prevent
2243 * iocgs from accumulating a large amount of budget.
2245 vdone = atomic64_read(&iocg->done_vtime);
2246 vtime = atomic64_read(&iocg->vtime);
2247 current_hweight(iocg, &hw_active, &hw_inuse);
2250 * Latency QoS detection doesn't account for IOs which are
2251 * in-flight for longer than a period. Detect them by
2252 * comparing vdone against period start. If lagging behind
2253 * IOs from past periods, don't increase vrate.
2255 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2256 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2257 time_after64(vtime, vdone) &&
2258 time_after64(vtime, now.vnow -
2259 MAX_LAGGING_PERIODS * period_vtime) &&
2260 time_before64(vdone, now.vnow - period_vtime))
2264 * Determine absolute usage factoring in in-flight IOs to avoid
2265 * high-latency completions appearing as idle.
2267 usage_us = iocg->usage_delta_us;
2268 usage_us_sum += usage_us;
2270 /* see whether there's surplus vtime */
2271 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2272 if (hw_inuse < hw_active ||
2273 (!waitqueue_active(&iocg->waitq) &&
2274 time_before64(vtime, now.vnow - ioc->margins.low))) {
2275 u32 hwa, old_hwi, hwm, new_hwi, usage;
2278 if (vdone != vtime) {
2279 u64 inflight_us = DIV64_U64_ROUND_UP(
2280 cost_to_abs_cost(vtime - vdone, hw_inuse),
2281 ioc->vtime_base_rate);
2283 usage_us = max(usage_us, inflight_us);
2286 /* convert to hweight based usage ratio */
2287 if (time_after64(iocg->activated_at, ioc->period_at))
2288 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2290 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2292 usage = clamp_t(u32,
2293 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2298 * Already donating or accumulated enough to start.
2299 * Determine the donation amount.
2301 current_hweight(iocg, &hwa, &old_hwi);
2302 hwm = current_hweight_max(iocg);
2303 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2306 * Donation calculation assumes hweight_after_donation
2307 * to be positive, a condition that a donor w/ hwa < 2
2308 * can't meet. Don't bother with donation if hwa is
2309 * below 2. It's not gonna make a meaningful difference
2312 if (new_hwi < hwm && hwa >= 2) {
2313 iocg->hweight_donating = hwa;
2314 iocg->hweight_after_donation = new_hwi;
2315 list_add(&iocg->surplus_list, &surpluses);
2316 } else if (!iocg->abs_vdebt) {
2318 * @iocg doesn't have enough to donate. Reset
2319 * its inuse to active.
2321 * Don't reset debtors as their inuse's are
2322 * owned by debt handling. This shouldn't affect
2323 * donation calculuation in any meaningful way
2324 * as @iocg doesn't have a meaningful amount of
2327 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2328 iocg->inuse, iocg->active,
2329 iocg->hweight_inuse, new_hwi);
2331 __propagate_weights(iocg, iocg->active,
2332 iocg->active, true, &now);
2336 /* genuinely short on vtime */
2341 if (!list_empty(&surpluses) && nr_shortages)
2342 transfer_surpluses(&surpluses, &now);
2344 commit_weights(ioc);
2346 /* surplus list should be dissolved after use */
2347 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2348 list_del_init(&iocg->surplus_list);
2351 * If q is getting clogged or we're missing too much, we're issuing
2352 * too much IO and should lower vtime rate. If we're not missing
2353 * and experiencing shortages but not surpluses, we're too stingy
2354 * and should increase vtime rate.
2356 prev_busy_level = ioc->busy_level;
2357 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2358 missed_ppm[READ] > ppm_rthr ||
2359 missed_ppm[WRITE] > ppm_wthr) {
2360 /* clearly missing QoS targets, slow down vrate */
2361 ioc->busy_level = max(ioc->busy_level, 0);
2363 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2364 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2365 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2366 /* QoS targets are being met with >25% margin */
2369 * We're throttling while the device has spare
2370 * capacity. If vrate was being slowed down, stop.
2372 ioc->busy_level = min(ioc->busy_level, 0);
2375 * If there are IOs spanning multiple periods, wait
2376 * them out before pushing the device harder.
2382 * Nobody is being throttled and the users aren't
2383 * issuing enough IOs to saturate the device. We
2384 * simply don't know how close the device is to
2385 * saturation. Coast.
2387 ioc->busy_level = 0;
2390 /* inside the hysterisis margin, we're good */
2391 ioc->busy_level = 0;
2394 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2396 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2397 prev_busy_level, missed_ppm);
2399 ioc_refresh_params(ioc, false);
2401 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2404 * This period is done. Move onto the next one. If nothing's
2405 * going on with the device, stop the timer.
2407 atomic64_inc(&ioc->cur_period);
2409 if (ioc->running != IOC_STOP) {
2410 if (!list_empty(&ioc->active_iocgs)) {
2411 ioc_start_period(ioc, &now);
2413 ioc->busy_level = 0;
2415 ioc->running = IOC_IDLE;
2418 ioc_refresh_vrate(ioc, &now);
2421 spin_unlock_irq(&ioc->lock);
2424 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2425 u64 abs_cost, struct ioc_now *now)
2427 struct ioc *ioc = iocg->ioc;
2428 struct ioc_margins *margins = &ioc->margins;
2429 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2432 u64 cost, new_inuse;
2434 current_hweight(iocg, NULL, &hwi);
2436 cost = abs_cost_to_cost(abs_cost, hwi);
2437 margin = now->vnow - vtime - cost;
2439 /* debt handling owns inuse for debtors */
2440 if (iocg->abs_vdebt)
2444 * We only increase inuse during period and do so if the margin has
2445 * deteriorated since the previous adjustment.
2447 if (margin >= iocg->saved_margin || margin >= margins->low ||
2448 iocg->inuse == iocg->active)
2451 spin_lock_irq(&ioc->lock);
2453 /* we own inuse only when @iocg is in the normal active state */
2454 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2455 spin_unlock_irq(&ioc->lock);
2460 * Bump up inuse till @abs_cost fits in the existing budget.
2461 * adj_step must be determined after acquiring ioc->lock - we might
2462 * have raced and lost to another thread for activation and could
2463 * be reading 0 iocg->active before ioc->lock which will lead to
2466 new_inuse = iocg->inuse;
2467 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2469 new_inuse = new_inuse + adj_step;
2470 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2471 current_hweight(iocg, NULL, &hwi);
2472 cost = abs_cost_to_cost(abs_cost, hwi);
2473 } while (time_after64(vtime + cost, now->vnow) &&
2474 iocg->inuse != iocg->active);
2476 spin_unlock_irq(&ioc->lock);
2478 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2479 old_inuse, iocg->inuse, old_hwi, hwi);
2484 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2485 bool is_merge, u64 *costp)
2487 struct ioc *ioc = iocg->ioc;
2488 u64 coef_seqio, coef_randio, coef_page;
2489 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2493 switch (bio_op(bio)) {
2495 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2496 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2497 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2500 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2501 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2502 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2509 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2510 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2514 if (seek_pages > LCOEF_RANDIO_PAGES) {
2515 cost += coef_randio;
2520 cost += pages * coef_page;
2525 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2529 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2533 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2536 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2538 switch (req_op(rq)) {
2540 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2543 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2550 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2554 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2558 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2560 struct blkcg_gq *blkg = bio->bi_blkg;
2561 struct ioc *ioc = rqos_to_ioc(rqos);
2562 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2564 struct iocg_wait wait;
2565 u64 abs_cost, cost, vtime;
2566 bool use_debt, ioc_locked;
2567 unsigned long flags;
2569 /* bypass IOs if disabled, still initializing, or for root cgroup */
2570 if (!ioc->enabled || !iocg || !iocg->level)
2573 /* calculate the absolute vtime cost */
2574 abs_cost = calc_vtime_cost(bio, iocg, false);
2578 if (!iocg_activate(iocg, &now))
2581 iocg->cursor = bio_end_sector(bio);
2582 vtime = atomic64_read(&iocg->vtime);
2583 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2586 * If no one's waiting and within budget, issue right away. The
2587 * tests are racy but the races aren't systemic - we only miss once
2588 * in a while which is fine.
2590 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2591 time_before_eq64(vtime + cost, now.vnow)) {
2592 iocg_commit_bio(iocg, bio, abs_cost, cost);
2597 * We're over budget. This can be handled in two ways. IOs which may
2598 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2599 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2600 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2601 * whether debt handling is needed and acquire locks accordingly.
2603 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2604 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2606 iocg_lock(iocg, ioc_locked, &flags);
2609 * @iocg must stay activated for debt and waitq handling. Deactivation
2610 * is synchronized against both ioc->lock and waitq.lock and we won't
2611 * get deactivated as long as we're waiting or has debt, so we're good
2612 * if we're activated here. In the unlikely cases that we aren't, just
2615 if (unlikely(list_empty(&iocg->active_list))) {
2616 iocg_unlock(iocg, ioc_locked, &flags);
2617 iocg_commit_bio(iocg, bio, abs_cost, cost);
2622 * We're over budget. If @bio has to be issued regardless, remember
2623 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2624 * off the debt before waking more IOs.
2626 * This way, the debt is continuously paid off each period with the
2627 * actual budget available to the cgroup. If we just wound vtime, we
2628 * would incorrectly use the current hw_inuse for the entire amount
2629 * which, for example, can lead to the cgroup staying blocked for a
2630 * long time even with substantially raised hw_inuse.
2632 * An iocg with vdebt should stay online so that the timer can keep
2633 * deducting its vdebt and [de]activate use_delay mechanism
2634 * accordingly. We don't want to race against the timer trying to
2635 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2636 * penalizing the cgroup and its descendants.
2639 iocg_incur_debt(iocg, abs_cost, &now);
2640 if (iocg_kick_delay(iocg, &now))
2641 blkcg_schedule_throttle(rqos->q->disk,
2642 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2643 iocg_unlock(iocg, ioc_locked, &flags);
2647 /* guarantee that iocgs w/ waiters have maximum inuse */
2648 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2650 iocg_unlock(iocg, false, &flags);
2654 propagate_weights(iocg, iocg->active, iocg->active, true,
2659 * Append self to the waitq and schedule the wakeup timer if we're
2660 * the first waiter. The timer duration is calculated based on the
2661 * current vrate. vtime and hweight changes can make it too short
2662 * or too long. Each wait entry records the absolute cost it's
2663 * waiting for to allow re-evaluation using a custom wait entry.
2665 * If too short, the timer simply reschedules itself. If too long,
2666 * the period timer will notice and trigger wakeups.
2668 * All waiters are on iocg->waitq and the wait states are
2669 * synchronized using waitq.lock.
2671 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2672 wait.wait.private = current;
2674 wait.abs_cost = abs_cost;
2675 wait.committed = false; /* will be set true by waker */
2677 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2678 iocg_kick_waitq(iocg, ioc_locked, &now);
2680 iocg_unlock(iocg, ioc_locked, &flags);
2683 set_current_state(TASK_UNINTERRUPTIBLE);
2689 /* waker already committed us, proceed */
2690 finish_wait(&iocg->waitq, &wait.wait);
2693 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2696 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2697 struct ioc *ioc = rqos_to_ioc(rqos);
2698 sector_t bio_end = bio_end_sector(bio);
2700 u64 vtime, abs_cost, cost;
2701 unsigned long flags;
2703 /* bypass if disabled, still initializing, or for root cgroup */
2704 if (!ioc->enabled || !iocg || !iocg->level)
2707 abs_cost = calc_vtime_cost(bio, iocg, true);
2713 vtime = atomic64_read(&iocg->vtime);
2714 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2716 /* update cursor if backmerging into the request at the cursor */
2717 if (blk_rq_pos(rq) < bio_end &&
2718 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2719 iocg->cursor = bio_end;
2722 * Charge if there's enough vtime budget and the existing request has
2725 if (rq->bio && rq->bio->bi_iocost_cost &&
2726 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2727 iocg_commit_bio(iocg, bio, abs_cost, cost);
2732 * Otherwise, account it as debt if @iocg is online, which it should
2733 * be for the vast majority of cases. See debt handling in
2734 * ioc_rqos_throttle() for details.
2736 spin_lock_irqsave(&ioc->lock, flags);
2737 spin_lock(&iocg->waitq.lock);
2739 if (likely(!list_empty(&iocg->active_list))) {
2740 iocg_incur_debt(iocg, abs_cost, &now);
2741 if (iocg_kick_delay(iocg, &now))
2742 blkcg_schedule_throttle(rqos->q->disk,
2743 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2745 iocg_commit_bio(iocg, bio, abs_cost, cost);
2748 spin_unlock(&iocg->waitq.lock);
2749 spin_unlock_irqrestore(&ioc->lock, flags);
2752 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2754 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2756 if (iocg && bio->bi_iocost_cost)
2757 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2760 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2762 struct ioc *ioc = rqos_to_ioc(rqos);
2763 struct ioc_pcpu_stat *ccs;
2764 u64 on_q_ns, rq_wait_ns, size_nsec;
2767 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2770 switch (req_op(rq)) {
2783 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2784 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2785 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2787 ccs = get_cpu_ptr(ioc->pcpu_stat);
2789 if (on_q_ns <= size_nsec ||
2790 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2791 local_inc(&ccs->missed[rw].nr_met);
2793 local_inc(&ccs->missed[rw].nr_missed);
2795 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2800 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2802 struct ioc *ioc = rqos_to_ioc(rqos);
2804 spin_lock_irq(&ioc->lock);
2805 ioc_refresh_params(ioc, false);
2806 spin_unlock_irq(&ioc->lock);
2809 static void ioc_rqos_exit(struct rq_qos *rqos)
2811 struct ioc *ioc = rqos_to_ioc(rqos);
2813 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2815 spin_lock_irq(&ioc->lock);
2816 ioc->running = IOC_STOP;
2817 spin_unlock_irq(&ioc->lock);
2819 del_timer_sync(&ioc->timer);
2820 free_percpu(ioc->pcpu_stat);
2824 static struct rq_qos_ops ioc_rqos_ops = {
2825 .throttle = ioc_rqos_throttle,
2826 .merge = ioc_rqos_merge,
2827 .done_bio = ioc_rqos_done_bio,
2828 .done = ioc_rqos_done,
2829 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2830 .exit = ioc_rqos_exit,
2833 static int blk_iocost_init(struct gendisk *disk)
2835 struct request_queue *q = disk->queue;
2837 struct rq_qos *rqos;
2840 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2844 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2845 if (!ioc->pcpu_stat) {
2850 for_each_possible_cpu(cpu) {
2851 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2853 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2854 local_set(&ccs->missed[i].nr_met, 0);
2855 local_set(&ccs->missed[i].nr_missed, 0);
2857 local64_set(&ccs->rq_wait_ns, 0);
2861 rqos->id = RQ_QOS_COST;
2862 rqos->ops = &ioc_rqos_ops;
2865 spin_lock_init(&ioc->lock);
2866 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2867 INIT_LIST_HEAD(&ioc->active_iocgs);
2869 ioc->running = IOC_IDLE;
2870 ioc->vtime_base_rate = VTIME_PER_USEC;
2871 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2872 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2873 ioc->period_at = ktime_to_us(ktime_get());
2874 atomic64_set(&ioc->cur_period, 0);
2875 atomic_set(&ioc->hweight_gen, 0);
2877 spin_lock_irq(&ioc->lock);
2878 ioc->autop_idx = AUTOP_INVALID;
2879 ioc_refresh_params(ioc, true);
2880 spin_unlock_irq(&ioc->lock);
2883 * rqos must be added before activation to allow iocg_pd_init() to
2884 * lookup the ioc from q. This means that the rqos methods may get
2885 * called before policy activation completion, can't assume that the
2886 * target bio has an iocg associated and need to test for NULL iocg.
2888 ret = rq_qos_add(q, rqos);
2892 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2898 rq_qos_del(q, rqos);
2900 free_percpu(ioc->pcpu_stat);
2905 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2907 struct ioc_cgrp *iocc;
2909 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2913 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2917 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2919 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2922 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2923 struct blkcg *blkcg)
2925 int levels = blkcg->css.cgroup->level + 1;
2926 struct ioc_gq *iocg;
2928 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2932 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2933 if (!iocg->pcpu_stat) {
2941 static void ioc_pd_init(struct blkg_policy_data *pd)
2943 struct ioc_gq *iocg = pd_to_iocg(pd);
2944 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2945 struct ioc *ioc = q_to_ioc(blkg->q);
2947 struct blkcg_gq *tblkg;
2948 unsigned long flags;
2953 atomic64_set(&iocg->vtime, now.vnow);
2954 atomic64_set(&iocg->done_vtime, now.vnow);
2955 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2956 INIT_LIST_HEAD(&iocg->active_list);
2957 INIT_LIST_HEAD(&iocg->walk_list);
2958 INIT_LIST_HEAD(&iocg->surplus_list);
2959 iocg->hweight_active = WEIGHT_ONE;
2960 iocg->hweight_inuse = WEIGHT_ONE;
2962 init_waitqueue_head(&iocg->waitq);
2963 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2964 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2966 iocg->level = blkg->blkcg->css.cgroup->level;
2968 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2969 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2970 iocg->ancestors[tiocg->level] = tiocg;
2973 spin_lock_irqsave(&ioc->lock, flags);
2974 weight_updated(iocg, &now);
2975 spin_unlock_irqrestore(&ioc->lock, flags);
2978 static void ioc_pd_free(struct blkg_policy_data *pd)
2980 struct ioc_gq *iocg = pd_to_iocg(pd);
2981 struct ioc *ioc = iocg->ioc;
2982 unsigned long flags;
2985 spin_lock_irqsave(&ioc->lock, flags);
2987 if (!list_empty(&iocg->active_list)) {
2991 propagate_weights(iocg, 0, 0, false, &now);
2992 list_del_init(&iocg->active_list);
2995 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2996 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2998 spin_unlock_irqrestore(&ioc->lock, flags);
3000 hrtimer_cancel(&iocg->waitq_timer);
3002 free_percpu(iocg->pcpu_stat);
3006 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3008 struct ioc_gq *iocg = pd_to_iocg(pd);
3009 struct ioc *ioc = iocg->ioc;
3014 if (iocg->level == 0) {
3015 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3016 ioc->vtime_base_rate * 10000,
3018 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3021 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3023 if (blkcg_debug_stats)
3024 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3025 iocg->last_stat.wait_us,
3026 iocg->last_stat.indebt_us,
3027 iocg->last_stat.indelay_us);
3030 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3033 const char *dname = blkg_dev_name(pd->blkg);
3034 struct ioc_gq *iocg = pd_to_iocg(pd);
3036 if (dname && iocg->cfg_weight)
3037 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3042 static int ioc_weight_show(struct seq_file *sf, void *v)
3044 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3045 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3047 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3048 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3049 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3053 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3054 size_t nbytes, loff_t off)
3056 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3057 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3058 struct blkg_conf_ctx ctx;
3060 struct ioc_gq *iocg;
3064 if (!strchr(buf, ':')) {
3065 struct blkcg_gq *blkg;
3067 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3070 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3073 spin_lock_irq(&blkcg->lock);
3074 iocc->dfl_weight = v * WEIGHT_ONE;
3075 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3076 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3079 spin_lock(&iocg->ioc->lock);
3080 ioc_now(iocg->ioc, &now);
3081 weight_updated(iocg, &now);
3082 spin_unlock(&iocg->ioc->lock);
3085 spin_unlock_irq(&blkcg->lock);
3090 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3094 iocg = blkg_to_iocg(ctx.blkg);
3096 if (!strncmp(ctx.body, "default", 7)) {
3099 if (!sscanf(ctx.body, "%u", &v))
3101 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3105 spin_lock(&iocg->ioc->lock);
3106 iocg->cfg_weight = v * WEIGHT_ONE;
3107 ioc_now(iocg->ioc, &now);
3108 weight_updated(iocg, &now);
3109 spin_unlock(&iocg->ioc->lock);
3111 blkg_conf_finish(&ctx);
3115 blkg_conf_finish(&ctx);
3119 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3122 const char *dname = blkg_dev_name(pd->blkg);
3123 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3128 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3129 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3130 ioc->params.qos[QOS_RPPM] / 10000,
3131 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3132 ioc->params.qos[QOS_RLAT],
3133 ioc->params.qos[QOS_WPPM] / 10000,
3134 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3135 ioc->params.qos[QOS_WLAT],
3136 ioc->params.qos[QOS_MIN] / 10000,
3137 ioc->params.qos[QOS_MIN] % 10000 / 100,
3138 ioc->params.qos[QOS_MAX] / 10000,
3139 ioc->params.qos[QOS_MAX] % 10000 / 100);
3143 static int ioc_qos_show(struct seq_file *sf, void *v)
3145 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3147 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3148 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3152 static const match_table_t qos_ctrl_tokens = {
3153 { QOS_ENABLE, "enable=%u" },
3154 { QOS_CTRL, "ctrl=%s" },
3155 { NR_QOS_CTRL_PARAMS, NULL },
3158 static const match_table_t qos_tokens = {
3159 { QOS_RPPM, "rpct=%s" },
3160 { QOS_RLAT, "rlat=%u" },
3161 { QOS_WPPM, "wpct=%s" },
3162 { QOS_WLAT, "wlat=%u" },
3163 { QOS_MIN, "min=%s" },
3164 { QOS_MAX, "max=%s" },
3165 { NR_QOS_PARAMS, NULL },
3168 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3169 size_t nbytes, loff_t off)
3171 struct block_device *bdev;
3172 struct gendisk *disk;
3174 u32 qos[NR_QOS_PARAMS];
3179 bdev = blkcg_conf_open_bdev(&input);
3181 return PTR_ERR(bdev);
3183 disk = bdev->bd_disk;
3184 ioc = q_to_ioc(disk->queue);
3186 ret = blk_iocost_init(disk);
3189 ioc = q_to_ioc(disk->queue);
3192 blk_mq_freeze_queue(disk->queue);
3193 blk_mq_quiesce_queue(disk->queue);
3195 spin_lock_irq(&ioc->lock);
3196 memcpy(qos, ioc->params.qos, sizeof(qos));
3197 enable = ioc->enabled;
3198 user = ioc->user_qos_params;
3200 while ((p = strsep(&input, " \t\n"))) {
3201 substring_t args[MAX_OPT_ARGS];
3209 switch (match_token(p, qos_ctrl_tokens, args)) {
3211 match_u64(&args[0], &v);
3215 match_strlcpy(buf, &args[0], sizeof(buf));
3216 if (!strcmp(buf, "auto"))
3218 else if (!strcmp(buf, "user"))
3225 tok = match_token(p, qos_tokens, args);
3229 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3232 if (cgroup_parse_float(buf, 2, &v))
3234 if (v < 0 || v > 10000)
3240 if (match_u64(&args[0], &v))
3246 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3249 if (cgroup_parse_float(buf, 2, &v))
3253 qos[tok] = clamp_t(s64, v * 100,
3254 VRATE_MIN_PPM, VRATE_MAX_PPM);
3262 if (qos[QOS_MIN] > qos[QOS_MAX])
3266 blk_stat_enable_accounting(disk->queue);
3267 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3268 ioc->enabled = true;
3269 wbt_disable_default(disk->queue);
3271 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3272 ioc->enabled = false;
3273 wbt_enable_default(disk->queue);
3277 memcpy(ioc->params.qos, qos, sizeof(qos));
3278 ioc->user_qos_params = true;
3280 ioc->user_qos_params = false;
3283 ioc_refresh_params(ioc, true);
3284 spin_unlock_irq(&ioc->lock);
3286 blk_mq_unquiesce_queue(disk->queue);
3287 blk_mq_unfreeze_queue(disk->queue);
3289 blkdev_put_no_open(bdev);
3292 spin_unlock_irq(&ioc->lock);
3294 blk_mq_unquiesce_queue(disk->queue);
3295 blk_mq_unfreeze_queue(disk->queue);
3299 blkdev_put_no_open(bdev);
3303 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3304 struct blkg_policy_data *pd, int off)
3306 const char *dname = blkg_dev_name(pd->blkg);
3307 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3308 u64 *u = ioc->params.i_lcoefs;
3313 seq_printf(sf, "%s ctrl=%s model=linear "
3314 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3315 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3316 dname, ioc->user_cost_model ? "user" : "auto",
3317 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3318 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3322 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3324 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3326 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3327 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3331 static const match_table_t cost_ctrl_tokens = {
3332 { COST_CTRL, "ctrl=%s" },
3333 { COST_MODEL, "model=%s" },
3334 { NR_COST_CTRL_PARAMS, NULL },
3337 static const match_table_t i_lcoef_tokens = {
3338 { I_LCOEF_RBPS, "rbps=%u" },
3339 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3340 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3341 { I_LCOEF_WBPS, "wbps=%u" },
3342 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3343 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3344 { NR_I_LCOEFS, NULL },
3347 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3348 size_t nbytes, loff_t off)
3350 struct block_device *bdev;
3351 struct request_queue *q;
3358 bdev = blkcg_conf_open_bdev(&input);
3360 return PTR_ERR(bdev);
3362 q = bdev_get_queue(bdev);
3365 ret = blk_iocost_init(bdev->bd_disk);
3371 blk_mq_freeze_queue(q);
3372 blk_mq_quiesce_queue(q);
3374 spin_lock_irq(&ioc->lock);
3375 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3376 user = ioc->user_cost_model;
3378 while ((p = strsep(&input, " \t\n"))) {
3379 substring_t args[MAX_OPT_ARGS];
3387 switch (match_token(p, cost_ctrl_tokens, args)) {
3389 match_strlcpy(buf, &args[0], sizeof(buf));
3390 if (!strcmp(buf, "auto"))
3392 else if (!strcmp(buf, "user"))
3398 match_strlcpy(buf, &args[0], sizeof(buf));
3399 if (strcmp(buf, "linear"))
3404 tok = match_token(p, i_lcoef_tokens, args);
3405 if (tok == NR_I_LCOEFS)
3407 if (match_u64(&args[0], &v))
3414 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3415 ioc->user_cost_model = true;
3417 ioc->user_cost_model = false;
3419 ioc_refresh_params(ioc, true);
3420 spin_unlock_irq(&ioc->lock);
3422 blk_mq_unquiesce_queue(q);
3423 blk_mq_unfreeze_queue(q);
3425 blkdev_put_no_open(bdev);
3429 spin_unlock_irq(&ioc->lock);
3431 blk_mq_unquiesce_queue(q);
3432 blk_mq_unfreeze_queue(q);
3436 blkdev_put_no_open(bdev);
3440 static struct cftype ioc_files[] = {
3443 .flags = CFTYPE_NOT_ON_ROOT,
3444 .seq_show = ioc_weight_show,
3445 .write = ioc_weight_write,
3449 .flags = CFTYPE_ONLY_ON_ROOT,
3450 .seq_show = ioc_qos_show,
3451 .write = ioc_qos_write,
3454 .name = "cost.model",
3455 .flags = CFTYPE_ONLY_ON_ROOT,
3456 .seq_show = ioc_cost_model_show,
3457 .write = ioc_cost_model_write,
3462 static struct blkcg_policy blkcg_policy_iocost = {
3463 .dfl_cftypes = ioc_files,
3464 .cpd_alloc_fn = ioc_cpd_alloc,
3465 .cpd_free_fn = ioc_cpd_free,
3466 .pd_alloc_fn = ioc_pd_alloc,
3467 .pd_init_fn = ioc_pd_init,
3468 .pd_free_fn = ioc_pd_free,
3469 .pd_stat_fn = ioc_pd_stat,
3472 static int __init ioc_init(void)
3474 return blkcg_policy_register(&blkcg_policy_iocost);
3477 static void __exit ioc_exit(void)
3479 blkcg_policy_unregister(&blkcg_policy_iocost);
3482 module_init(ioc_init);
3483 module_exit(ioc_exit);