1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
262 /* unbusy hysterisis */
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
291 * Halve debts if total usage keeps staying under 25% w/o any shortages
294 DEBT_BUSY_USAGE_PCT = 25,
295 DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC,
297 /* don't let cmds which take a very long time pin lagging for too long */
298 MAX_LAGGING_PERIODS = 10,
300 /* switch iff the conditions are met for longer than this */
301 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
304 * Count IO size in 4k pages. The 12bit shift helps keeping
305 * size-proportional components of cost calculation in closer
306 * numbers of digits to per-IO cost components.
309 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
310 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
312 /* if apart further than 16M, consider randio for linear model */
313 LCOEF_RANDIO_PAGES = 4096,
322 /* io.cost.qos controls including per-dev enable of the whole controller */
329 /* io.cost.qos params */
340 /* io.cost.model controls */
347 /* builtin linear cost model coefficients */
379 u32 qos[NR_QOS_PARAMS];
380 u64 i_lcoefs[NR_I_LCOEFS];
381 u64 lcoefs[NR_LCOEFS];
382 u32 too_fast_vrate_pct;
383 u32 too_slow_vrate_pct;
399 struct ioc_pcpu_stat {
400 struct ioc_missed missed[2];
402 local64_t rq_wait_ns;
412 struct ioc_params params;
413 struct ioc_margins margins;
420 struct timer_list timer;
421 struct list_head active_iocgs; /* active cgroups */
422 struct ioc_pcpu_stat __percpu *pcpu_stat;
424 enum ioc_running running;
425 atomic64_t vtime_rate;
429 seqcount_spinlock_t period_seqcount;
430 u64 period_at; /* wallclock starttime */
431 u64 period_at_vtime; /* vtime starttime */
433 atomic64_t cur_period; /* inc'd each period */
434 int busy_level; /* saturation history */
436 bool weights_updated;
437 atomic_t hweight_gen; /* for lazy hweights */
439 /* the last time debt cancel condition wasn't met */
442 u64 autop_too_fast_at;
443 u64 autop_too_slow_at;
445 bool user_qos_params:1;
446 bool user_cost_model:1;
449 struct iocg_pcpu_stat {
450 local64_t abs_vusage;
460 /* per device-cgroup pair */
462 struct blkg_policy_data pd;
466 * A iocg can get its weight from two sources - an explicit
467 * per-device-cgroup configuration or the default weight of the
468 * cgroup. `cfg_weight` is the explicit per-device-cgroup
469 * configuration. `weight` is the effective considering both
472 * When an idle cgroup becomes active its `active` goes from 0 to
473 * `weight`. `inuse` is the surplus adjusted active weight.
474 * `active` and `inuse` are used to calculate `hweight_active` and
477 * `last_inuse` remembers `inuse` while an iocg is idle to persist
478 * surplus adjustments.
480 * `inuse` may be adjusted dynamically during period. `saved_*` are used
481 * to determine and track adjustments.
491 sector_t cursor; /* to detect randio */
494 * `vtime` is this iocg's vtime cursor which progresses as IOs are
495 * issued. If lagging behind device vtime, the delta represents
496 * the currently available IO budget. If runnning ahead, the
499 * `vtime_done` is the same but progressed on completion rather
500 * than issue. The delta behind `vtime` represents the cost of
501 * currently in-flight IOs.
504 atomic64_t done_vtime;
507 /* current delay in effect and when it started */
512 * The period this iocg was last active in. Used for deactivation
513 * and invalidating `vtime`.
515 atomic64_t active_period;
516 struct list_head active_list;
518 /* see __propagate_weights() and current_hweight() for details */
519 u64 child_active_sum;
521 u64 child_adjusted_sum;
525 u32 hweight_donating;
526 u32 hweight_after_donation;
528 struct list_head walk_list;
529 struct list_head surplus_list;
531 struct wait_queue_head waitq;
532 struct hrtimer waitq_timer;
534 /* timestamp at the latest activation */
538 struct iocg_pcpu_stat __percpu *pcpu_stat;
539 struct iocg_stat local_stat;
540 struct iocg_stat desc_stat;
541 struct iocg_stat last_stat;
542 u64 last_stat_abs_vusage;
548 /* this iocg's depth in the hierarchy and ancestors including self */
550 struct ioc_gq *ancestors[];
555 struct blkcg_policy_data cpd;
556 unsigned int dfl_weight;
567 struct wait_queue_entry wait;
573 struct iocg_wake_ctx {
579 static const struct ioc_params autop[] = {
582 [QOS_RLAT] = 250000, /* 250ms */
584 [QOS_MIN] = VRATE_MIN_PPM,
585 [QOS_MAX] = VRATE_MAX_PPM,
588 [I_LCOEF_RBPS] = 174019176,
589 [I_LCOEF_RSEQIOPS] = 41708,
590 [I_LCOEF_RRANDIOPS] = 370,
591 [I_LCOEF_WBPS] = 178075866,
592 [I_LCOEF_WSEQIOPS] = 42705,
593 [I_LCOEF_WRANDIOPS] = 378,
598 [QOS_RLAT] = 25000, /* 25ms */
600 [QOS_MIN] = VRATE_MIN_PPM,
601 [QOS_MAX] = VRATE_MAX_PPM,
604 [I_LCOEF_RBPS] = 245855193,
605 [I_LCOEF_RSEQIOPS] = 61575,
606 [I_LCOEF_RRANDIOPS] = 6946,
607 [I_LCOEF_WBPS] = 141365009,
608 [I_LCOEF_WSEQIOPS] = 33716,
609 [I_LCOEF_WRANDIOPS] = 26796,
614 [QOS_RLAT] = 25000, /* 25ms */
616 [QOS_MIN] = VRATE_MIN_PPM,
617 [QOS_MAX] = VRATE_MAX_PPM,
620 [I_LCOEF_RBPS] = 488636629,
621 [I_LCOEF_RSEQIOPS] = 8932,
622 [I_LCOEF_RRANDIOPS] = 8518,
623 [I_LCOEF_WBPS] = 427891549,
624 [I_LCOEF_WSEQIOPS] = 28755,
625 [I_LCOEF_WRANDIOPS] = 21940,
627 .too_fast_vrate_pct = 500,
631 [QOS_RLAT] = 5000, /* 5ms */
633 [QOS_MIN] = VRATE_MIN_PPM,
634 [QOS_MAX] = VRATE_MAX_PPM,
637 [I_LCOEF_RBPS] = 3102524156LLU,
638 [I_LCOEF_RSEQIOPS] = 724816,
639 [I_LCOEF_RRANDIOPS] = 778122,
640 [I_LCOEF_WBPS] = 1742780862LLU,
641 [I_LCOEF_WSEQIOPS] = 425702,
642 [I_LCOEF_WRANDIOPS] = 443193,
644 .too_slow_vrate_pct = 10,
649 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
650 * vtime credit shortage and down on device saturation.
652 static u32 vrate_adj_pct[] =
654 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
655 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
656 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
658 static struct blkcg_policy blkcg_policy_iocost;
660 /* accessors and helpers */
661 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
663 return container_of(rqos, struct ioc, rqos);
666 static struct ioc *q_to_ioc(struct request_queue *q)
668 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
671 static const char *q_name(struct request_queue *q)
673 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
674 return kobject_name(q->kobj.parent);
679 static const char __maybe_unused *ioc_name(struct ioc *ioc)
681 return q_name(ioc->rqos.q);
684 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
686 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
689 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
691 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
694 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
696 return pd_to_blkg(&iocg->pd);
699 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
701 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
702 struct ioc_cgrp, cpd);
706 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
707 * weight, the more expensive each IO. Must round up.
709 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
711 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
715 * The inverse of abs_cost_to_cost(). Must round up.
717 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
719 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
722 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
723 u64 abs_cost, u64 cost)
725 struct iocg_pcpu_stat *gcs;
727 bio->bi_iocost_cost = cost;
728 atomic64_add(cost, &iocg->vtime);
730 gcs = get_cpu_ptr(iocg->pcpu_stat);
731 local64_add(abs_cost, &gcs->abs_vusage);
735 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
738 spin_lock_irqsave(&iocg->ioc->lock, *flags);
739 spin_lock(&iocg->waitq.lock);
741 spin_lock_irqsave(&iocg->waitq.lock, *flags);
745 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
748 spin_unlock(&iocg->waitq.lock);
749 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
751 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
755 #define CREATE_TRACE_POINTS
756 #include <trace/events/iocost.h>
758 static void ioc_refresh_margins(struct ioc *ioc)
760 struct ioc_margins *margins = &ioc->margins;
761 u32 period_us = ioc->period_us;
762 u64 vrate = ioc->vtime_base_rate;
764 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
765 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
766 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
769 /* latency Qos params changed, update period_us and all the dependent params */
770 static void ioc_refresh_period_us(struct ioc *ioc)
772 u32 ppm, lat, multi, period_us;
774 lockdep_assert_held(&ioc->lock);
776 /* pick the higher latency target */
777 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
778 ppm = ioc->params.qos[QOS_RPPM];
779 lat = ioc->params.qos[QOS_RLAT];
781 ppm = ioc->params.qos[QOS_WPPM];
782 lat = ioc->params.qos[QOS_WLAT];
786 * We want the period to be long enough to contain a healthy number
787 * of IOs while short enough for granular control. Define it as a
788 * multiple of the latency target. Ideally, the multiplier should
789 * be scaled according to the percentile so that it would nominally
790 * contain a certain number of requests. Let's be simpler and
791 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
794 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
797 period_us = multi * lat;
798 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
800 /* calculate dependent params */
801 ioc->period_us = period_us;
802 ioc->timer_slack_ns = div64_u64(
803 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
805 ioc_refresh_margins(ioc);
808 static int ioc_autop_idx(struct ioc *ioc)
810 int idx = ioc->autop_idx;
811 const struct ioc_params *p = &autop[idx];
816 if (!blk_queue_nonrot(ioc->rqos.q))
819 /* handle SATA SSDs w/ broken NCQ */
820 if (blk_queue_depth(ioc->rqos.q) == 1)
821 return AUTOP_SSD_QD1;
823 /* use one of the normal ssd sets */
824 if (idx < AUTOP_SSD_DFL)
825 return AUTOP_SSD_DFL;
827 /* if user is overriding anything, maintain what was there */
828 if (ioc->user_qos_params || ioc->user_cost_model)
831 /* step up/down based on the vrate */
832 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
833 now_ns = ktime_get_ns();
835 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
836 if (!ioc->autop_too_fast_at)
837 ioc->autop_too_fast_at = now_ns;
838 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
841 ioc->autop_too_fast_at = 0;
844 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
845 if (!ioc->autop_too_slow_at)
846 ioc->autop_too_slow_at = now_ns;
847 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
850 ioc->autop_too_slow_at = 0;
857 * Take the followings as input
859 * @bps maximum sequential throughput
860 * @seqiops maximum sequential 4k iops
861 * @randiops maximum random 4k iops
863 * and calculate the linear model cost coefficients.
865 * *@page per-page cost 1s / (@bps / 4096)
866 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
867 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
869 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
870 u64 *page, u64 *seqio, u64 *randio)
874 *page = *seqio = *randio = 0;
877 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
878 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
881 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
887 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
893 static void ioc_refresh_lcoefs(struct ioc *ioc)
895 u64 *u = ioc->params.i_lcoefs;
896 u64 *c = ioc->params.lcoefs;
898 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
899 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
900 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
901 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
904 static bool ioc_refresh_params(struct ioc *ioc, bool force)
906 const struct ioc_params *p;
909 lockdep_assert_held(&ioc->lock);
911 idx = ioc_autop_idx(ioc);
914 if (idx == ioc->autop_idx && !force)
917 if (idx != ioc->autop_idx)
918 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
920 ioc->autop_idx = idx;
921 ioc->autop_too_fast_at = 0;
922 ioc->autop_too_slow_at = 0;
924 if (!ioc->user_qos_params)
925 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
926 if (!ioc->user_cost_model)
927 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
929 ioc_refresh_period_us(ioc);
930 ioc_refresh_lcoefs(ioc);
932 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
933 VTIME_PER_USEC, MILLION);
934 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
935 VTIME_PER_USEC, MILLION);
941 * When an iocg accumulates too much vtime or gets deactivated, we throw away
942 * some vtime, which lowers the overall device utilization. As the exact amount
943 * which is being thrown away is known, we can compensate by accelerating the
944 * vrate accordingly so that the extra vtime generated in the current period
945 * matches what got lost.
947 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
949 s64 pleft = ioc->period_at + ioc->period_us - now->now;
950 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
951 s64 vcomp, vcomp_min, vcomp_max;
953 lockdep_assert_held(&ioc->lock);
955 /* we need some time left in this period */
960 * Calculate how much vrate should be adjusted to offset the error.
961 * Limit the amount of adjustment and deduct the adjusted amount from
964 vcomp = -div64_s64(ioc->vtime_err, pleft);
965 vcomp_min = -(ioc->vtime_base_rate >> 1);
966 vcomp_max = ioc->vtime_base_rate;
967 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
969 ioc->vtime_err += vcomp * pleft;
971 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
973 /* bound how much error can accumulate */
974 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
977 /* take a snapshot of the current [v]time and vrate */
978 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
982 now->now_ns = ktime_get();
983 now->now = ktime_to_us(now->now_ns);
984 now->vrate = atomic64_read(&ioc->vtime_rate);
987 * The current vtime is
989 * vtime at period start + (wallclock time since the start) * vrate
991 * As a consistent snapshot of `period_at_vtime` and `period_at` is
992 * needed, they're seqcount protected.
995 seq = read_seqcount_begin(&ioc->period_seqcount);
996 now->vnow = ioc->period_at_vtime +
997 (now->now - ioc->period_at) * now->vrate;
998 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1001 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1003 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1005 write_seqcount_begin(&ioc->period_seqcount);
1006 ioc->period_at = now->now;
1007 ioc->period_at_vtime = now->vnow;
1008 write_seqcount_end(&ioc->period_seqcount);
1010 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1011 add_timer(&ioc->timer);
1015 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1016 * weight sums and propagate upwards accordingly. If @save, the current margin
1017 * is saved to be used as reference for later inuse in-period adjustments.
1019 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1020 bool save, struct ioc_now *now)
1022 struct ioc *ioc = iocg->ioc;
1025 lockdep_assert_held(&ioc->lock);
1027 inuse = clamp_t(u32, inuse, 1, active);
1029 iocg->last_inuse = iocg->inuse;
1031 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1033 if (active == iocg->active && inuse == iocg->inuse)
1036 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1037 struct ioc_gq *parent = iocg->ancestors[lvl];
1038 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1039 u32 parent_active = 0, parent_inuse = 0;
1041 /* update the level sums */
1042 parent->child_active_sum += (s32)(active - child->active);
1043 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1044 /* apply the udpates */
1045 child->active = active;
1046 child->inuse = inuse;
1049 * The delta between inuse and active sums indicates that
1050 * that much of weight is being given away. Parent's inuse
1051 * and active should reflect the ratio.
1053 if (parent->child_active_sum) {
1054 parent_active = parent->weight;
1055 parent_inuse = DIV64_U64_ROUND_UP(
1056 parent_active * parent->child_inuse_sum,
1057 parent->child_active_sum);
1060 /* do we need to keep walking up? */
1061 if (parent_active == parent->active &&
1062 parent_inuse == parent->inuse)
1065 active = parent_active;
1066 inuse = parent_inuse;
1069 ioc->weights_updated = true;
1072 static void commit_weights(struct ioc *ioc)
1074 lockdep_assert_held(&ioc->lock);
1076 if (ioc->weights_updated) {
1077 /* paired with rmb in current_hweight(), see there */
1079 atomic_inc(&ioc->hweight_gen);
1080 ioc->weights_updated = false;
1084 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1085 bool save, struct ioc_now *now)
1087 __propagate_weights(iocg, active, inuse, save, now);
1088 commit_weights(iocg->ioc);
1091 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1093 struct ioc *ioc = iocg->ioc;
1098 /* hot path - if uptodate, use cached */
1099 ioc_gen = atomic_read(&ioc->hweight_gen);
1100 if (ioc_gen == iocg->hweight_gen)
1104 * Paired with wmb in commit_weights(). If we saw the updated
1105 * hweight_gen, all the weight updates from __propagate_weights() are
1108 * We can race with weight updates during calculation and get it
1109 * wrong. However, hweight_gen would have changed and a future
1110 * reader will recalculate and we're guaranteed to discard the
1111 * wrong result soon.
1115 hwa = hwi = WEIGHT_ONE;
1116 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1117 struct ioc_gq *parent = iocg->ancestors[lvl];
1118 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1119 u64 active_sum = READ_ONCE(parent->child_active_sum);
1120 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1121 u32 active = READ_ONCE(child->active);
1122 u32 inuse = READ_ONCE(child->inuse);
1124 /* we can race with deactivations and either may read as zero */
1125 if (!active_sum || !inuse_sum)
1128 active_sum = max_t(u64, active, active_sum);
1129 hwa = div64_u64((u64)hwa * active, active_sum);
1131 inuse_sum = max_t(u64, inuse, inuse_sum);
1132 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1135 iocg->hweight_active = max_t(u32, hwa, 1);
1136 iocg->hweight_inuse = max_t(u32, hwi, 1);
1137 iocg->hweight_gen = ioc_gen;
1140 *hw_activep = iocg->hweight_active;
1142 *hw_inusep = iocg->hweight_inuse;
1146 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1147 * other weights stay unchanged.
1149 static u32 current_hweight_max(struct ioc_gq *iocg)
1151 u32 hwm = WEIGHT_ONE;
1152 u32 inuse = iocg->active;
1153 u64 child_inuse_sum;
1156 lockdep_assert_held(&iocg->ioc->lock);
1158 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1159 struct ioc_gq *parent = iocg->ancestors[lvl];
1160 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1162 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1163 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1164 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1165 parent->child_active_sum);
1168 return max_t(u32, hwm, 1);
1171 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1173 struct ioc *ioc = iocg->ioc;
1174 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1175 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1178 lockdep_assert_held(&ioc->lock);
1180 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1181 if (weight != iocg->weight && iocg->active)
1182 propagate_weights(iocg, weight, iocg->inuse, true, now);
1183 iocg->weight = weight;
1186 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1188 struct ioc *ioc = iocg->ioc;
1189 u64 last_period, cur_period;
1194 * If seem to be already active, just update the stamp to tell the
1195 * timer that we're still active. We don't mind occassional races.
1197 if (!list_empty(&iocg->active_list)) {
1199 cur_period = atomic64_read(&ioc->cur_period);
1200 if (atomic64_read(&iocg->active_period) != cur_period)
1201 atomic64_set(&iocg->active_period, cur_period);
1205 /* racy check on internal node IOs, treat as root level IOs */
1206 if (iocg->child_active_sum)
1209 spin_lock_irq(&ioc->lock);
1214 cur_period = atomic64_read(&ioc->cur_period);
1215 last_period = atomic64_read(&iocg->active_period);
1216 atomic64_set(&iocg->active_period, cur_period);
1218 /* already activated or breaking leaf-only constraint? */
1219 if (!list_empty(&iocg->active_list))
1220 goto succeed_unlock;
1221 for (i = iocg->level - 1; i > 0; i--)
1222 if (!list_empty(&iocg->ancestors[i]->active_list))
1225 if (iocg->child_active_sum)
1229 * Always start with the target budget. On deactivation, we throw away
1230 * anything above it.
1232 vtarget = now->vnow - ioc->margins.target;
1233 vtime = atomic64_read(&iocg->vtime);
1235 atomic64_add(vtarget - vtime, &iocg->vtime);
1236 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1240 * Activate, propagate weight and start period timer if not
1241 * running. Reset hweight_gen to avoid accidental match from
1244 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1245 list_add(&iocg->active_list, &ioc->active_iocgs);
1247 propagate_weights(iocg, iocg->weight,
1248 iocg->last_inuse ?: iocg->weight, true, now);
1250 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1251 last_period, cur_period, vtime);
1253 iocg->activated_at = now->now;
1255 if (ioc->running == IOC_IDLE) {
1256 ioc->running = IOC_RUNNING;
1257 ioc->debt_busy_at = now->now;
1258 ioc_start_period(ioc, now);
1262 spin_unlock_irq(&ioc->lock);
1266 spin_unlock_irq(&ioc->lock);
1270 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1272 struct ioc *ioc = iocg->ioc;
1273 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1274 u64 tdelta, delay, new_delay;
1275 s64 vover, vover_pct;
1278 lockdep_assert_held(&iocg->waitq.lock);
1280 /* calculate the current delay in effect - 1/2 every second */
1281 tdelta = now->now - iocg->delay_at;
1283 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1287 /* calculate the new delay from the debt amount */
1288 current_hweight(iocg, &hwa, NULL);
1289 vover = atomic64_read(&iocg->vtime) +
1290 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1291 vover_pct = div64_s64(100 * vover,
1292 ioc->period_us * ioc->vtime_base_rate);
1294 if (vover_pct <= MIN_DELAY_THR_PCT)
1296 else if (vover_pct >= MAX_DELAY_THR_PCT)
1297 new_delay = MAX_DELAY;
1299 new_delay = MIN_DELAY +
1300 div_u64((MAX_DELAY - MIN_DELAY) *
1301 (vover_pct - MIN_DELAY_THR_PCT),
1302 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1304 /* pick the higher one and apply */
1305 if (new_delay > delay) {
1306 iocg->delay = new_delay;
1307 iocg->delay_at = now->now;
1311 if (delay >= MIN_DELAY) {
1312 if (!iocg->indelay_since)
1313 iocg->indelay_since = now->now;
1314 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1317 if (iocg->indelay_since) {
1318 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1319 iocg->indelay_since = 0;
1322 blkcg_clear_delay(blkg);
1327 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1328 struct ioc_now *now)
1330 struct iocg_pcpu_stat *gcs;
1332 lockdep_assert_held(&iocg->ioc->lock);
1333 lockdep_assert_held(&iocg->waitq.lock);
1334 WARN_ON_ONCE(list_empty(&iocg->active_list));
1337 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1338 * inuse donating all of it share to others until its debt is paid off.
1340 if (!iocg->abs_vdebt && abs_cost) {
1341 iocg->indebt_since = now->now;
1342 propagate_weights(iocg, iocg->active, 0, false, now);
1345 iocg->abs_vdebt += abs_cost;
1347 gcs = get_cpu_ptr(iocg->pcpu_stat);
1348 local64_add(abs_cost, &gcs->abs_vusage);
1352 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1353 struct ioc_now *now)
1355 lockdep_assert_held(&iocg->ioc->lock);
1356 lockdep_assert_held(&iocg->waitq.lock);
1358 /* make sure that nobody messed with @iocg */
1359 WARN_ON_ONCE(list_empty(&iocg->active_list));
1360 WARN_ON_ONCE(iocg->inuse > 1);
1362 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1364 /* if debt is paid in full, restore inuse */
1365 if (!iocg->abs_vdebt) {
1366 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1367 iocg->indebt_since = 0;
1369 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1374 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1375 int flags, void *key)
1377 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1378 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1379 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1381 ctx->vbudget -= cost;
1383 if (ctx->vbudget < 0)
1386 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1389 * autoremove_wake_function() removes the wait entry only when it
1390 * actually changed the task state. We want the wait always
1391 * removed. Remove explicitly and use default_wake_function().
1393 list_del_init(&wq_entry->entry);
1394 wait->committed = true;
1396 default_wake_function(wq_entry, mode, flags, key);
1401 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1402 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1403 * addition to iocg->waitq.lock.
1405 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1406 struct ioc_now *now)
1408 struct ioc *ioc = iocg->ioc;
1409 struct iocg_wake_ctx ctx = { .iocg = iocg };
1410 u64 vshortage, expires, oexpires;
1414 lockdep_assert_held(&iocg->waitq.lock);
1416 current_hweight(iocg, &hwa, NULL);
1417 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1420 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1421 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1422 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1423 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1425 lockdep_assert_held(&ioc->lock);
1427 atomic64_add(vpay, &iocg->vtime);
1428 atomic64_add(vpay, &iocg->done_vtime);
1429 iocg_pay_debt(iocg, abs_vpay, now);
1433 if (iocg->abs_vdebt || iocg->delay)
1434 iocg_kick_delay(iocg, now);
1437 * Debt can still be outstanding if we haven't paid all yet or the
1438 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1439 * under debt. Make sure @vbudget reflects the outstanding amount and is
1442 if (iocg->abs_vdebt) {
1443 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1444 vbudget = min_t(s64, 0, vbudget - vdebt);
1448 * Wake up the ones which are due and see how much vtime we'll need for
1449 * the next one. As paying off debt restores hw_inuse, it must be read
1450 * after the above debt payment.
1452 ctx.vbudget = vbudget;
1453 current_hweight(iocg, NULL, &ctx.hw_inuse);
1455 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1457 if (!waitqueue_active(&iocg->waitq)) {
1458 if (iocg->wait_since) {
1459 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1460 iocg->wait_since = 0;
1465 if (!iocg->wait_since)
1466 iocg->wait_since = now->now;
1468 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1471 /* determine next wakeup, add a timer margin to guarantee chunking */
1472 vshortage = -ctx.vbudget;
1473 expires = now->now_ns +
1474 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1476 expires += ioc->timer_slack_ns;
1478 /* if already active and close enough, don't bother */
1479 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1480 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1481 abs(oexpires - expires) <= ioc->timer_slack_ns)
1484 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1485 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1488 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1490 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1491 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1493 unsigned long flags;
1495 ioc_now(iocg->ioc, &now);
1497 iocg_lock(iocg, pay_debt, &flags);
1498 iocg_kick_waitq(iocg, pay_debt, &now);
1499 iocg_unlock(iocg, pay_debt, &flags);
1501 return HRTIMER_NORESTART;
1504 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1506 u32 nr_met[2] = { };
1507 u32 nr_missed[2] = { };
1511 for_each_online_cpu(cpu) {
1512 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1513 u64 this_rq_wait_ns;
1515 for (rw = READ; rw <= WRITE; rw++) {
1516 u32 this_met = local_read(&stat->missed[rw].nr_met);
1517 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1519 nr_met[rw] += this_met - stat->missed[rw].last_met;
1520 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1521 stat->missed[rw].last_met = this_met;
1522 stat->missed[rw].last_missed = this_missed;
1525 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1526 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1527 stat->last_rq_wait_ns = this_rq_wait_ns;
1530 for (rw = READ; rw <= WRITE; rw++) {
1531 if (nr_met[rw] + nr_missed[rw])
1533 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1534 nr_met[rw] + nr_missed[rw]);
1536 missed_ppm_ar[rw] = 0;
1539 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1540 ioc->period_us * NSEC_PER_USEC);
1543 /* was iocg idle this period? */
1544 static bool iocg_is_idle(struct ioc_gq *iocg)
1546 struct ioc *ioc = iocg->ioc;
1548 /* did something get issued this period? */
1549 if (atomic64_read(&iocg->active_period) ==
1550 atomic64_read(&ioc->cur_period))
1553 /* is something in flight? */
1554 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1561 * Call this function on the target leaf @iocg's to build pre-order traversal
1562 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1563 * ->walk_list and the caller is responsible for dissolving the list after use.
1565 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1566 struct list_head *inner_walk)
1570 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1572 /* find the first ancestor which hasn't been visited yet */
1573 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1574 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1578 /* walk down and visit the inner nodes to get pre-order traversal */
1579 while (++lvl <= iocg->level - 1) {
1580 struct ioc_gq *inner = iocg->ancestors[lvl];
1582 /* record traversal order */
1583 list_add_tail(&inner->walk_list, inner_walk);
1587 /* collect per-cpu counters and propagate the deltas to the parent */
1588 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1590 struct ioc *ioc = iocg->ioc;
1591 struct iocg_stat new_stat;
1596 lockdep_assert_held(&iocg->ioc->lock);
1598 /* collect per-cpu counters */
1599 for_each_possible_cpu(cpu) {
1600 abs_vusage += local64_read(
1601 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1603 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1604 iocg->last_stat_abs_vusage = abs_vusage;
1606 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1607 iocg->local_stat.usage_us += iocg->usage_delta_us;
1609 /* propagate upwards */
1611 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1613 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1614 new_stat.indebt_us =
1615 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1616 new_stat.indelay_us =
1617 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1619 /* propagate the deltas to the parent */
1620 if (iocg->level > 0) {
1621 struct iocg_stat *parent_stat =
1622 &iocg->ancestors[iocg->level - 1]->desc_stat;
1624 parent_stat->usage_us +=
1625 new_stat.usage_us - iocg->last_stat.usage_us;
1626 parent_stat->wait_us +=
1627 new_stat.wait_us - iocg->last_stat.wait_us;
1628 parent_stat->indebt_us +=
1629 new_stat.indebt_us - iocg->last_stat.indebt_us;
1630 parent_stat->indelay_us +=
1631 new_stat.indelay_us - iocg->last_stat.indelay_us;
1634 iocg->last_stat = new_stat;
1637 /* get stat counters ready for reading on all active iocgs */
1638 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1640 LIST_HEAD(inner_walk);
1641 struct ioc_gq *iocg, *tiocg;
1643 /* flush leaves and build inner node walk list */
1644 list_for_each_entry(iocg, target_iocgs, active_list) {
1645 iocg_flush_stat_one(iocg, now);
1646 iocg_build_inner_walk(iocg, &inner_walk);
1649 /* keep flushing upwards by walking the inner list backwards */
1650 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1651 iocg_flush_stat_one(iocg, now);
1652 list_del_init(&iocg->walk_list);
1657 * Determine what @iocg's hweight_inuse should be after donating unused
1658 * capacity. @hwm is the upper bound and used to signal no donation. This
1659 * function also throws away @iocg's excess budget.
1661 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1662 u32 usage, struct ioc_now *now)
1664 struct ioc *ioc = iocg->ioc;
1665 u64 vtime = atomic64_read(&iocg->vtime);
1666 s64 excess, delta, target, new_hwi;
1668 /* debt handling owns inuse for debtors */
1669 if (iocg->abs_vdebt)
1672 /* see whether minimum margin requirement is met */
1673 if (waitqueue_active(&iocg->waitq) ||
1674 time_after64(vtime, now->vnow - ioc->margins.min))
1677 /* throw away excess above target */
1678 excess = now->vnow - vtime - ioc->margins.target;
1680 atomic64_add(excess, &iocg->vtime);
1681 atomic64_add(excess, &iocg->done_vtime);
1683 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1687 * Let's say the distance between iocg's and device's vtimes as a
1688 * fraction of period duration is delta. Assuming that the iocg will
1689 * consume the usage determined above, we want to determine new_hwi so
1690 * that delta equals MARGIN_TARGET at the end of the next period.
1692 * We need to execute usage worth of IOs while spending the sum of the
1693 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1696 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1698 * Therefore, the new_hwi is:
1700 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1702 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1703 now->vnow - ioc->period_at_vtime);
1704 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1705 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1707 return clamp_t(s64, new_hwi, 1, hwm);
1711 * For work-conservation, an iocg which isn't using all of its share should
1712 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1713 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1715 * #1 is mathematically simpler but has the drawback of requiring synchronous
1716 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1717 * change due to donation snapbacks as it has the possibility of grossly
1718 * overshooting what's allowed by the model and vrate.
1720 * #2 is inherently safe with local operations. The donating iocg can easily
1721 * snap back to higher weights when needed without worrying about impacts on
1722 * other nodes as the impacts will be inherently correct. This also makes idle
1723 * iocg activations safe. The only effect activations have is decreasing
1724 * hweight_inuse of others, the right solution to which is for those iocgs to
1725 * snap back to higher weights.
1727 * So, we go with #2. The challenge is calculating how each donating iocg's
1728 * inuse should be adjusted to achieve the target donation amounts. This is done
1729 * using Andy's method described in the following pdf.
1731 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1733 * Given the weights and target after-donation hweight_inuse values, Andy's
1734 * method determines how the proportional distribution should look like at each
1735 * sibling level to maintain the relative relationship between all non-donating
1736 * pairs. To roughly summarize, it divides the tree into donating and
1737 * non-donating parts, calculates global donation rate which is used to
1738 * determine the target hweight_inuse for each node, and then derives per-level
1741 * The following pdf shows that global distribution calculated this way can be
1742 * achieved by scaling inuse weights of donating leaves and propagating the
1743 * adjustments upwards proportionally.
1745 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1747 * Combining the above two, we can determine how each leaf iocg's inuse should
1748 * be adjusted to achieve the target donation.
1750 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1752 * The inline comments use symbols from the last pdf.
1754 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1755 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1756 * t is the sum of the absolute budgets of donating nodes in the subtree.
1757 * w is the weight of the node. w = w_f + w_t
1758 * w_f is the non-donating portion of w. w_f = w * f / b
1759 * w_b is the donating portion of w. w_t = w * t / b
1760 * s is the sum of all sibling weights. s = Sum(w) for siblings
1761 * s_f and s_t are the non-donating and donating portions of s.
1763 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1764 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1765 * after adjustments. Subscript r denotes the root node's values.
1767 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1769 LIST_HEAD(over_hwa);
1770 LIST_HEAD(inner_walk);
1771 struct ioc_gq *iocg, *tiocg, *root_iocg;
1772 u32 after_sum, over_sum, over_target, gamma;
1775 * It's pretty unlikely but possible for the total sum of
1776 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1777 * confuse the following calculations. If such condition is detected,
1778 * scale down everyone over its full share equally to keep the sum below
1783 list_for_each_entry(iocg, surpluses, surplus_list) {
1786 current_hweight(iocg, &hwa, NULL);
1787 after_sum += iocg->hweight_after_donation;
1789 if (iocg->hweight_after_donation > hwa) {
1790 over_sum += iocg->hweight_after_donation;
1791 list_add(&iocg->walk_list, &over_hwa);
1795 if (after_sum >= WEIGHT_ONE) {
1797 * The delta should be deducted from the over_sum, calculate
1798 * target over_sum value.
1800 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1801 WARN_ON_ONCE(over_sum <= over_delta);
1802 over_target = over_sum - over_delta;
1807 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1809 iocg->hweight_after_donation =
1810 div_u64((u64)iocg->hweight_after_donation *
1811 over_target, over_sum);
1812 list_del_init(&iocg->walk_list);
1816 * Build pre-order inner node walk list and prepare for donation
1817 * adjustment calculations.
1819 list_for_each_entry(iocg, surpluses, surplus_list) {
1820 iocg_build_inner_walk(iocg, &inner_walk);
1823 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1824 WARN_ON_ONCE(root_iocg->level > 0);
1826 list_for_each_entry(iocg, &inner_walk, walk_list) {
1827 iocg->child_adjusted_sum = 0;
1828 iocg->hweight_donating = 0;
1829 iocg->hweight_after_donation = 0;
1833 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1836 list_for_each_entry(iocg, surpluses, surplus_list) {
1837 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1839 parent->hweight_donating += iocg->hweight_donating;
1840 parent->hweight_after_donation += iocg->hweight_after_donation;
1843 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1844 if (iocg->level > 0) {
1845 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1847 parent->hweight_donating += iocg->hweight_donating;
1848 parent->hweight_after_donation += iocg->hweight_after_donation;
1853 * Calculate inner hwa's (b) and make sure the donation values are
1854 * within the accepted ranges as we're doing low res calculations with
1857 list_for_each_entry(iocg, &inner_walk, walk_list) {
1859 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1861 iocg->hweight_active = DIV64_U64_ROUND_UP(
1862 (u64)parent->hweight_active * iocg->active,
1863 parent->child_active_sum);
1867 iocg->hweight_donating = min(iocg->hweight_donating,
1868 iocg->hweight_active);
1869 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1870 iocg->hweight_donating - 1);
1871 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1872 iocg->hweight_donating <= 1 ||
1873 iocg->hweight_after_donation == 0)) {
1874 pr_warn("iocg: invalid donation weights in ");
1875 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1876 pr_cont(": active=%u donating=%u after=%u\n",
1877 iocg->hweight_active, iocg->hweight_donating,
1878 iocg->hweight_after_donation);
1883 * Calculate the global donation rate (gamma) - the rate to adjust
1884 * non-donating budgets by.
1886 * No need to use 64bit multiplication here as the first operand is
1887 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1889 * We know that there are beneficiary nodes and the sum of the donating
1890 * hweights can't be whole; however, due to the round-ups during hweight
1891 * calculations, root_iocg->hweight_donating might still end up equal to
1892 * or greater than whole. Limit the range when calculating the divider.
1894 * gamma = (1 - t_r') / (1 - t_r)
1896 gamma = DIV_ROUND_UP(
1897 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1898 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1901 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1904 list_for_each_entry(iocg, &inner_walk, walk_list) {
1905 struct ioc_gq *parent;
1906 u32 inuse, wpt, wptp;
1909 if (iocg->level == 0) {
1910 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1911 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1912 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1913 WEIGHT_ONE - iocg->hweight_after_donation);
1917 parent = iocg->ancestors[iocg->level - 1];
1919 /* b' = gamma * b_f + b_t' */
1920 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1921 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1922 WEIGHT_ONE) + iocg->hweight_after_donation;
1924 /* w' = s' * b' / b'_p */
1925 inuse = DIV64_U64_ROUND_UP(
1926 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1927 parent->hweight_inuse);
1929 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1930 st = DIV64_U64_ROUND_UP(
1931 iocg->child_active_sum * iocg->hweight_donating,
1932 iocg->hweight_active);
1933 sf = iocg->child_active_sum - st;
1934 wpt = DIV64_U64_ROUND_UP(
1935 (u64)iocg->active * iocg->hweight_donating,
1936 iocg->hweight_active);
1937 wptp = DIV64_U64_ROUND_UP(
1938 (u64)inuse * iocg->hweight_after_donation,
1939 iocg->hweight_inuse);
1941 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1945 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1946 * we can finally determine leaf adjustments.
1948 list_for_each_entry(iocg, surpluses, surplus_list) {
1949 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1953 * In-debt iocgs participated in the donation calculation with
1954 * the minimum target hweight_inuse. Configuring inuse
1955 * accordingly would work fine but debt handling expects
1956 * @iocg->inuse stay at the minimum and we don't wanna
1959 if (iocg->abs_vdebt) {
1960 WARN_ON_ONCE(iocg->inuse > 1);
1964 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1965 inuse = DIV64_U64_ROUND_UP(
1966 parent->child_adjusted_sum * iocg->hweight_after_donation,
1967 parent->hweight_inuse);
1969 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1971 iocg->hweight_inuse,
1972 iocg->hweight_after_donation);
1974 __propagate_weights(iocg, iocg->active, inuse, true, now);
1977 /* walk list should be dissolved after use */
1978 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
1979 list_del_init(&iocg->walk_list);
1982 static void ioc_timer_fn(struct timer_list *timer)
1984 struct ioc *ioc = container_of(timer, struct ioc, timer);
1985 struct ioc_gq *iocg, *tiocg;
1987 LIST_HEAD(surpluses);
1988 int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
1989 u64 usage_us_sum = 0;
1990 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1991 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1992 u32 missed_ppm[2], rq_wait_pct;
1994 int prev_busy_level;
1996 /* how were the latencies during the period? */
1997 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1999 /* take care of active iocgs */
2000 spin_lock_irq(&ioc->lock);
2004 period_vtime = now.vnow - ioc->period_at_vtime;
2005 if (WARN_ON_ONCE(!period_vtime)) {
2006 spin_unlock_irq(&ioc->lock);
2011 * Waiters determine the sleep durations based on the vrate they
2012 * saw at the time of sleep. If vrate has increased, some waiters
2013 * could be sleeping for too long. Wake up tardy waiters which
2014 * should have woken up in the last period and expire idle iocgs.
2016 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2017 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2018 !iocg->delay && !iocg_is_idle(iocg))
2021 spin_lock(&iocg->waitq.lock);
2023 /* flush wait and indebt stat deltas */
2024 if (iocg->wait_since) {
2025 iocg->local_stat.wait_us += now.now - iocg->wait_since;
2026 iocg->wait_since = now.now;
2028 if (iocg->indebt_since) {
2029 iocg->local_stat.indebt_us +=
2030 now.now - iocg->indebt_since;
2031 iocg->indebt_since = now.now;
2033 if (iocg->indelay_since) {
2034 iocg->local_stat.indelay_us +=
2035 now.now - iocg->indelay_since;
2036 iocg->indelay_since = now.now;
2039 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2041 /* might be oversleeping vtime / hweight changes, kick */
2042 iocg_kick_waitq(iocg, true, &now);
2043 if (iocg->abs_vdebt)
2045 } else if (iocg_is_idle(iocg)) {
2046 /* no waiter and idle, deactivate */
2047 u64 vtime = atomic64_read(&iocg->vtime);
2051 * @iocg has been inactive for a full duration and will
2052 * have a high budget. Account anything above target as
2053 * error and throw away. On reactivation, it'll start
2054 * with the target budget.
2056 excess = now.vnow - vtime - ioc->margins.target;
2060 current_hweight(iocg, NULL, &old_hwi);
2061 ioc->vtime_err -= div64_u64(excess * old_hwi,
2065 __propagate_weights(iocg, 0, 0, false, &now);
2066 list_del_init(&iocg->active_list);
2069 spin_unlock(&iocg->waitq.lock);
2071 commit_weights(ioc);
2074 * Wait and indebt stat are flushed above and the donation calculation
2075 * below needs updated usage stat. Let's bring stat up-to-date.
2077 iocg_flush_stat(&ioc->active_iocgs, &now);
2079 /* calc usage and see whether some weights need to be moved around */
2080 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2081 u64 vdone, vtime, usage_us, usage_dur;
2082 u32 usage, hw_active, hw_inuse;
2085 * Collect unused and wind vtime closer to vnow to prevent
2086 * iocgs from accumulating a large amount of budget.
2088 vdone = atomic64_read(&iocg->done_vtime);
2089 vtime = atomic64_read(&iocg->vtime);
2090 current_hweight(iocg, &hw_active, &hw_inuse);
2093 * Latency QoS detection doesn't account for IOs which are
2094 * in-flight for longer than a period. Detect them by
2095 * comparing vdone against period start. If lagging behind
2096 * IOs from past periods, don't increase vrate.
2098 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2099 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2100 time_after64(vtime, vdone) &&
2101 time_after64(vtime, now.vnow -
2102 MAX_LAGGING_PERIODS * period_vtime) &&
2103 time_before64(vdone, now.vnow - period_vtime))
2107 * Determine absolute usage factoring in in-flight IOs to avoid
2108 * high-latency completions appearing as idle.
2110 usage_us = iocg->usage_delta_us;
2111 usage_us_sum += usage_us;
2113 if (vdone != vtime) {
2114 u64 inflight_us = DIV64_U64_ROUND_UP(
2115 cost_to_abs_cost(vtime - vdone, hw_inuse),
2116 ioc->vtime_base_rate);
2117 usage_us = max(usage_us, inflight_us);
2120 /* convert to hweight based usage ratio */
2121 if (time_after64(iocg->activated_at, ioc->period_at))
2122 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2124 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2126 usage = clamp_t(u32,
2127 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2131 /* see whether there's surplus vtime */
2132 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2133 if (hw_inuse < hw_active ||
2134 (!waitqueue_active(&iocg->waitq) &&
2135 time_before64(vtime, now.vnow - ioc->margins.low))) {
2136 u32 hwa, old_hwi, hwm, new_hwi;
2139 * Already donating or accumulated enough to start.
2140 * Determine the donation amount.
2142 current_hweight(iocg, &hwa, &old_hwi);
2143 hwm = current_hweight_max(iocg);
2144 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2146 if (new_hwi < hwm) {
2147 iocg->hweight_donating = hwa;
2148 iocg->hweight_after_donation = new_hwi;
2149 list_add(&iocg->surplus_list, &surpluses);
2151 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2152 iocg->inuse, iocg->active,
2153 iocg->hweight_inuse, new_hwi);
2155 __propagate_weights(iocg, iocg->active,
2156 iocg->active, true, &now);
2160 /* genuinely short on vtime */
2165 if (!list_empty(&surpluses) && nr_shortages)
2166 transfer_surpluses(&surpluses, &now);
2168 commit_weights(ioc);
2170 /* surplus list should be dissolved after use */
2171 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2172 list_del_init(&iocg->surplus_list);
2175 * A low weight iocg can amass a large amount of debt, for example, when
2176 * anonymous memory gets reclaimed aggressively. If the system has a lot
2177 * of memory paired with a slow IO device, the debt can span multiple
2178 * seconds or more. If there are no other subsequent IO issuers, the
2179 * in-debt iocg may end up blocked paying its debt while the IO device
2182 * The following protects against such pathological cases. If the device
2183 * has been sufficiently idle for a substantial amount of time, the
2184 * debts are halved. The criteria are on the conservative side as we
2185 * want to resolve the rare extreme cases without impacting regular
2186 * operation by forgiving debts too readily.
2189 div64_u64(100 * usage_us_sum, now.now - ioc->period_at) >=
2190 DEBT_BUSY_USAGE_PCT)
2191 ioc->debt_busy_at = now.now;
2194 now.now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) {
2195 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2196 if (iocg->abs_vdebt) {
2197 spin_lock(&iocg->waitq.lock);
2198 iocg->abs_vdebt /= 2;
2199 iocg_kick_waitq(iocg, true, &now);
2200 spin_unlock(&iocg->waitq.lock);
2203 ioc->debt_busy_at = now.now;
2207 * If q is getting clogged or we're missing too much, we're issuing
2208 * too much IO and should lower vtime rate. If we're not missing
2209 * and experiencing shortages but not surpluses, we're too stingy
2210 * and should increase vtime rate.
2212 prev_busy_level = ioc->busy_level;
2213 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2214 missed_ppm[READ] > ppm_rthr ||
2215 missed_ppm[WRITE] > ppm_wthr) {
2216 /* clearly missing QoS targets, slow down vrate */
2217 ioc->busy_level = max(ioc->busy_level, 0);
2219 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2220 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2221 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2222 /* QoS targets are being met with >25% margin */
2225 * We're throttling while the device has spare
2226 * capacity. If vrate was being slowed down, stop.
2228 ioc->busy_level = min(ioc->busy_level, 0);
2231 * If there are IOs spanning multiple periods, wait
2232 * them out before pushing the device harder.
2238 * Nobody is being throttled and the users aren't
2239 * issuing enough IOs to saturate the device. We
2240 * simply don't know how close the device is to
2241 * saturation. Coast.
2243 ioc->busy_level = 0;
2246 /* inside the hysterisis margin, we're good */
2247 ioc->busy_level = 0;
2250 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2252 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2253 u64 vrate = ioc->vtime_base_rate;
2254 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2256 /* rq_wait signal is always reliable, ignore user vrate_min */
2257 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2258 vrate_min = VRATE_MIN;
2261 * If vrate is out of bounds, apply clamp gradually as the
2262 * bounds can change abruptly. Otherwise, apply busy_level
2265 if (vrate < vrate_min) {
2266 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2268 vrate = min(vrate, vrate_min);
2269 } else if (vrate > vrate_max) {
2270 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2272 vrate = max(vrate, vrate_max);
2274 int idx = min_t(int, abs(ioc->busy_level),
2275 ARRAY_SIZE(vrate_adj_pct) - 1);
2276 u32 adj_pct = vrate_adj_pct[idx];
2278 if (ioc->busy_level > 0)
2279 adj_pct = 100 - adj_pct;
2281 adj_pct = 100 + adj_pct;
2283 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2284 vrate_min, vrate_max);
2287 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2288 nr_lagging, nr_shortages);
2290 ioc->vtime_base_rate = vrate;
2291 ioc_refresh_margins(ioc);
2292 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2293 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2294 missed_ppm, rq_wait_pct, nr_lagging,
2298 ioc_refresh_params(ioc, false);
2301 * This period is done. Move onto the next one. If nothing's
2302 * going on with the device, stop the timer.
2304 atomic64_inc(&ioc->cur_period);
2306 if (ioc->running != IOC_STOP) {
2307 if (!list_empty(&ioc->active_iocgs)) {
2308 ioc_start_period(ioc, &now);
2310 ioc->busy_level = 0;
2312 ioc->running = IOC_IDLE;
2315 ioc_refresh_vrate(ioc, &now);
2318 spin_unlock_irq(&ioc->lock);
2321 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2322 u64 abs_cost, struct ioc_now *now)
2324 struct ioc *ioc = iocg->ioc;
2325 struct ioc_margins *margins = &ioc->margins;
2326 u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2327 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2330 u64 cost, new_inuse;
2332 current_hweight(iocg, NULL, &hwi);
2334 cost = abs_cost_to_cost(abs_cost, hwi);
2335 margin = now->vnow - vtime - cost;
2337 /* debt handling owns inuse for debtors */
2338 if (iocg->abs_vdebt)
2342 * We only increase inuse during period and do so iff the margin has
2343 * deteriorated since the previous adjustment.
2345 if (margin >= iocg->saved_margin || margin >= margins->low ||
2346 iocg->inuse == iocg->active)
2349 spin_lock_irq(&ioc->lock);
2351 /* we own inuse only when @iocg is in the normal active state */
2352 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2353 spin_unlock_irq(&ioc->lock);
2357 /* bump up inuse till @abs_cost fits in the existing budget */
2358 new_inuse = iocg->inuse;
2360 new_inuse = new_inuse + adj_step;
2361 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2362 current_hweight(iocg, NULL, &hwi);
2363 cost = abs_cost_to_cost(abs_cost, hwi);
2364 } while (time_after64(vtime + cost, now->vnow) &&
2365 iocg->inuse != iocg->active);
2367 spin_unlock_irq(&ioc->lock);
2369 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2370 old_inuse, iocg->inuse, old_hwi, hwi);
2375 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2376 bool is_merge, u64 *costp)
2378 struct ioc *ioc = iocg->ioc;
2379 u64 coef_seqio, coef_randio, coef_page;
2380 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2384 switch (bio_op(bio)) {
2386 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2387 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2388 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2391 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2392 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2393 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2400 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2401 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2405 if (seek_pages > LCOEF_RANDIO_PAGES) {
2406 cost += coef_randio;
2411 cost += pages * coef_page;
2416 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2420 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2424 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2427 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2429 switch (req_op(rq)) {
2431 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2434 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2441 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2445 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2449 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2451 struct blkcg_gq *blkg = bio->bi_blkg;
2452 struct ioc *ioc = rqos_to_ioc(rqos);
2453 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2455 struct iocg_wait wait;
2456 u64 abs_cost, cost, vtime;
2457 bool use_debt, ioc_locked;
2458 unsigned long flags;
2460 /* bypass IOs if disabled or for root cgroup */
2461 if (!ioc->enabled || !iocg->level)
2464 /* calculate the absolute vtime cost */
2465 abs_cost = calc_vtime_cost(bio, iocg, false);
2469 if (!iocg_activate(iocg, &now))
2472 iocg->cursor = bio_end_sector(bio);
2473 vtime = atomic64_read(&iocg->vtime);
2474 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2477 * If no one's waiting and within budget, issue right away. The
2478 * tests are racy but the races aren't systemic - we only miss once
2479 * in a while which is fine.
2481 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2482 time_before_eq64(vtime + cost, now.vnow)) {
2483 iocg_commit_bio(iocg, bio, abs_cost, cost);
2488 * We're over budget. This can be handled in two ways. IOs which may
2489 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2490 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2491 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2492 * whether debt handling is needed and acquire locks accordingly.
2494 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2495 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2497 iocg_lock(iocg, ioc_locked, &flags);
2500 * @iocg must stay activated for debt and waitq handling. Deactivation
2501 * is synchronized against both ioc->lock and waitq.lock and we won't
2502 * get deactivated as long as we're waiting or has debt, so we're good
2503 * if we're activated here. In the unlikely cases that we aren't, just
2506 if (unlikely(list_empty(&iocg->active_list))) {
2507 iocg_unlock(iocg, ioc_locked, &flags);
2508 iocg_commit_bio(iocg, bio, abs_cost, cost);
2513 * We're over budget. If @bio has to be issued regardless, remember
2514 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2515 * off the debt before waking more IOs.
2517 * This way, the debt is continuously paid off each period with the
2518 * actual budget available to the cgroup. If we just wound vtime, we
2519 * would incorrectly use the current hw_inuse for the entire amount
2520 * which, for example, can lead to the cgroup staying blocked for a
2521 * long time even with substantially raised hw_inuse.
2523 * An iocg with vdebt should stay online so that the timer can keep
2524 * deducting its vdebt and [de]activate use_delay mechanism
2525 * accordingly. We don't want to race against the timer trying to
2526 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2527 * penalizing the cgroup and its descendants.
2530 iocg_incur_debt(iocg, abs_cost, &now);
2531 if (iocg_kick_delay(iocg, &now))
2532 blkcg_schedule_throttle(rqos->q,
2533 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2534 iocg_unlock(iocg, ioc_locked, &flags);
2538 /* guarantee that iocgs w/ waiters have maximum inuse */
2539 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2541 iocg_unlock(iocg, false, &flags);
2545 propagate_weights(iocg, iocg->active, iocg->active, true,
2550 * Append self to the waitq and schedule the wakeup timer if we're
2551 * the first waiter. The timer duration is calculated based on the
2552 * current vrate. vtime and hweight changes can make it too short
2553 * or too long. Each wait entry records the absolute cost it's
2554 * waiting for to allow re-evaluation using a custom wait entry.
2556 * If too short, the timer simply reschedules itself. If too long,
2557 * the period timer will notice and trigger wakeups.
2559 * All waiters are on iocg->waitq and the wait states are
2560 * synchronized using waitq.lock.
2562 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2563 wait.wait.private = current;
2565 wait.abs_cost = abs_cost;
2566 wait.committed = false; /* will be set true by waker */
2568 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2569 iocg_kick_waitq(iocg, ioc_locked, &now);
2571 iocg_unlock(iocg, ioc_locked, &flags);
2574 set_current_state(TASK_UNINTERRUPTIBLE);
2580 /* waker already committed us, proceed */
2581 finish_wait(&iocg->waitq, &wait.wait);
2584 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2587 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2588 struct ioc *ioc = iocg->ioc;
2589 sector_t bio_end = bio_end_sector(bio);
2591 u64 vtime, abs_cost, cost;
2592 unsigned long flags;
2594 /* bypass if disabled or for root cgroup */
2595 if (!ioc->enabled || !iocg->level)
2598 abs_cost = calc_vtime_cost(bio, iocg, true);
2604 vtime = atomic64_read(&iocg->vtime);
2605 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2607 /* update cursor if backmerging into the request at the cursor */
2608 if (blk_rq_pos(rq) < bio_end &&
2609 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2610 iocg->cursor = bio_end;
2613 * Charge if there's enough vtime budget and the existing request has
2616 if (rq->bio && rq->bio->bi_iocost_cost &&
2617 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2618 iocg_commit_bio(iocg, bio, abs_cost, cost);
2623 * Otherwise, account it as debt if @iocg is online, which it should
2624 * be for the vast majority of cases. See debt handling in
2625 * ioc_rqos_throttle() for details.
2627 spin_lock_irqsave(&ioc->lock, flags);
2628 spin_lock(&iocg->waitq.lock);
2630 if (likely(!list_empty(&iocg->active_list))) {
2631 iocg_incur_debt(iocg, abs_cost, &now);
2632 if (iocg_kick_delay(iocg, &now))
2633 blkcg_schedule_throttle(rqos->q,
2634 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2636 iocg_commit_bio(iocg, bio, abs_cost, cost);
2639 spin_unlock(&iocg->waitq.lock);
2640 spin_unlock_irqrestore(&ioc->lock, flags);
2643 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2645 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2647 if (iocg && bio->bi_iocost_cost)
2648 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2651 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2653 struct ioc *ioc = rqos_to_ioc(rqos);
2654 struct ioc_pcpu_stat *ccs;
2655 u64 on_q_ns, rq_wait_ns, size_nsec;
2658 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2661 switch (req_op(rq) & REQ_OP_MASK) {
2674 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2675 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2676 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2678 ccs = get_cpu_ptr(ioc->pcpu_stat);
2680 if (on_q_ns <= size_nsec ||
2681 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2682 local_inc(&ccs->missed[rw].nr_met);
2684 local_inc(&ccs->missed[rw].nr_missed);
2686 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2691 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2693 struct ioc *ioc = rqos_to_ioc(rqos);
2695 spin_lock_irq(&ioc->lock);
2696 ioc_refresh_params(ioc, false);
2697 spin_unlock_irq(&ioc->lock);
2700 static void ioc_rqos_exit(struct rq_qos *rqos)
2702 struct ioc *ioc = rqos_to_ioc(rqos);
2704 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2706 spin_lock_irq(&ioc->lock);
2707 ioc->running = IOC_STOP;
2708 spin_unlock_irq(&ioc->lock);
2710 del_timer_sync(&ioc->timer);
2711 free_percpu(ioc->pcpu_stat);
2715 static struct rq_qos_ops ioc_rqos_ops = {
2716 .throttle = ioc_rqos_throttle,
2717 .merge = ioc_rqos_merge,
2718 .done_bio = ioc_rqos_done_bio,
2719 .done = ioc_rqos_done,
2720 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2721 .exit = ioc_rqos_exit,
2724 static int blk_iocost_init(struct request_queue *q)
2727 struct rq_qos *rqos;
2730 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2734 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2735 if (!ioc->pcpu_stat) {
2740 for_each_possible_cpu(cpu) {
2741 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2743 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2744 local_set(&ccs->missed[i].nr_met, 0);
2745 local_set(&ccs->missed[i].nr_missed, 0);
2747 local64_set(&ccs->rq_wait_ns, 0);
2751 rqos->id = RQ_QOS_COST;
2752 rqos->ops = &ioc_rqos_ops;
2755 spin_lock_init(&ioc->lock);
2756 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2757 INIT_LIST_HEAD(&ioc->active_iocgs);
2759 ioc->running = IOC_IDLE;
2760 ioc->vtime_base_rate = VTIME_PER_USEC;
2761 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2762 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2763 ioc->period_at = ktime_to_us(ktime_get());
2764 atomic64_set(&ioc->cur_period, 0);
2765 atomic_set(&ioc->hweight_gen, 0);
2767 spin_lock_irq(&ioc->lock);
2768 ioc->autop_idx = AUTOP_INVALID;
2769 ioc_refresh_params(ioc, true);
2770 spin_unlock_irq(&ioc->lock);
2772 rq_qos_add(q, rqos);
2773 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2775 rq_qos_del(q, rqos);
2776 free_percpu(ioc->pcpu_stat);
2783 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2785 struct ioc_cgrp *iocc;
2787 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2791 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2795 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2797 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2800 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2801 struct blkcg *blkcg)
2803 int levels = blkcg->css.cgroup->level + 1;
2804 struct ioc_gq *iocg;
2806 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2810 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2811 if (!iocg->pcpu_stat) {
2819 static void ioc_pd_init(struct blkg_policy_data *pd)
2821 struct ioc_gq *iocg = pd_to_iocg(pd);
2822 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2823 struct ioc *ioc = q_to_ioc(blkg->q);
2825 struct blkcg_gq *tblkg;
2826 unsigned long flags;
2831 atomic64_set(&iocg->vtime, now.vnow);
2832 atomic64_set(&iocg->done_vtime, now.vnow);
2833 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2834 INIT_LIST_HEAD(&iocg->active_list);
2835 INIT_LIST_HEAD(&iocg->walk_list);
2836 INIT_LIST_HEAD(&iocg->surplus_list);
2837 iocg->hweight_active = WEIGHT_ONE;
2838 iocg->hweight_inuse = WEIGHT_ONE;
2840 init_waitqueue_head(&iocg->waitq);
2841 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2842 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2844 iocg->level = blkg->blkcg->css.cgroup->level;
2846 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2847 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2848 iocg->ancestors[tiocg->level] = tiocg;
2851 spin_lock_irqsave(&ioc->lock, flags);
2852 weight_updated(iocg, &now);
2853 spin_unlock_irqrestore(&ioc->lock, flags);
2856 static void ioc_pd_free(struct blkg_policy_data *pd)
2858 struct ioc_gq *iocg = pd_to_iocg(pd);
2859 struct ioc *ioc = iocg->ioc;
2860 unsigned long flags;
2863 spin_lock_irqsave(&ioc->lock, flags);
2865 if (!list_empty(&iocg->active_list)) {
2869 propagate_weights(iocg, 0, 0, false, &now);
2870 list_del_init(&iocg->active_list);
2873 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2874 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2876 spin_unlock_irqrestore(&ioc->lock, flags);
2878 hrtimer_cancel(&iocg->waitq_timer);
2880 free_percpu(iocg->pcpu_stat);
2884 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2886 struct ioc_gq *iocg = pd_to_iocg(pd);
2887 struct ioc *ioc = iocg->ioc;
2893 if (iocg->level == 0) {
2894 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
2895 ioc->vtime_base_rate * 10000,
2897 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
2898 vp10k / 100, vp10k % 100);
2901 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
2902 iocg->last_stat.usage_us);
2904 if (blkcg_debug_stats)
2905 pos += scnprintf(buf + pos, size - pos,
2906 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
2907 iocg->last_stat.wait_us,
2908 iocg->last_stat.indebt_us,
2909 iocg->last_stat.indelay_us);
2914 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2917 const char *dname = blkg_dev_name(pd->blkg);
2918 struct ioc_gq *iocg = pd_to_iocg(pd);
2920 if (dname && iocg->cfg_weight)
2921 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
2926 static int ioc_weight_show(struct seq_file *sf, void *v)
2928 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2929 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2931 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
2932 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
2933 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2937 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
2938 size_t nbytes, loff_t off)
2940 struct blkcg *blkcg = css_to_blkcg(of_css(of));
2941 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2942 struct blkg_conf_ctx ctx;
2944 struct ioc_gq *iocg;
2948 if (!strchr(buf, ':')) {
2949 struct blkcg_gq *blkg;
2951 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2954 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2957 spin_lock(&blkcg->lock);
2958 iocc->dfl_weight = v * WEIGHT_ONE;
2959 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2960 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2963 spin_lock_irq(&iocg->ioc->lock);
2964 ioc_now(iocg->ioc, &now);
2965 weight_updated(iocg, &now);
2966 spin_unlock_irq(&iocg->ioc->lock);
2969 spin_unlock(&blkcg->lock);
2974 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2978 iocg = blkg_to_iocg(ctx.blkg);
2980 if (!strncmp(ctx.body, "default", 7)) {
2983 if (!sscanf(ctx.body, "%u", &v))
2985 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2989 spin_lock(&iocg->ioc->lock);
2990 iocg->cfg_weight = v * WEIGHT_ONE;
2991 ioc_now(iocg->ioc, &now);
2992 weight_updated(iocg, &now);
2993 spin_unlock(&iocg->ioc->lock);
2995 blkg_conf_finish(&ctx);
2999 blkg_conf_finish(&ctx);
3003 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3006 const char *dname = blkg_dev_name(pd->blkg);
3007 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3012 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3013 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3014 ioc->params.qos[QOS_RPPM] / 10000,
3015 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3016 ioc->params.qos[QOS_RLAT],
3017 ioc->params.qos[QOS_WPPM] / 10000,
3018 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3019 ioc->params.qos[QOS_WLAT],
3020 ioc->params.qos[QOS_MIN] / 10000,
3021 ioc->params.qos[QOS_MIN] % 10000 / 100,
3022 ioc->params.qos[QOS_MAX] / 10000,
3023 ioc->params.qos[QOS_MAX] % 10000 / 100);
3027 static int ioc_qos_show(struct seq_file *sf, void *v)
3029 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3031 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3032 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3036 static const match_table_t qos_ctrl_tokens = {
3037 { QOS_ENABLE, "enable=%u" },
3038 { QOS_CTRL, "ctrl=%s" },
3039 { NR_QOS_CTRL_PARAMS, NULL },
3042 static const match_table_t qos_tokens = {
3043 { QOS_RPPM, "rpct=%s" },
3044 { QOS_RLAT, "rlat=%u" },
3045 { QOS_WPPM, "wpct=%s" },
3046 { QOS_WLAT, "wlat=%u" },
3047 { QOS_MIN, "min=%s" },
3048 { QOS_MAX, "max=%s" },
3049 { NR_QOS_PARAMS, NULL },
3052 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3053 size_t nbytes, loff_t off)
3055 struct gendisk *disk;
3057 u32 qos[NR_QOS_PARAMS];
3062 disk = blkcg_conf_get_disk(&input);
3064 return PTR_ERR(disk);
3066 ioc = q_to_ioc(disk->queue);
3068 ret = blk_iocost_init(disk->queue);
3071 ioc = q_to_ioc(disk->queue);
3074 spin_lock_irq(&ioc->lock);
3075 memcpy(qos, ioc->params.qos, sizeof(qos));
3076 enable = ioc->enabled;
3077 user = ioc->user_qos_params;
3078 spin_unlock_irq(&ioc->lock);
3080 while ((p = strsep(&input, " \t\n"))) {
3081 substring_t args[MAX_OPT_ARGS];
3089 switch (match_token(p, qos_ctrl_tokens, args)) {
3091 match_u64(&args[0], &v);
3095 match_strlcpy(buf, &args[0], sizeof(buf));
3096 if (!strcmp(buf, "auto"))
3098 else if (!strcmp(buf, "user"))
3105 tok = match_token(p, qos_tokens, args);
3109 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3112 if (cgroup_parse_float(buf, 2, &v))
3114 if (v < 0 || v > 10000)
3120 if (match_u64(&args[0], &v))
3126 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3129 if (cgroup_parse_float(buf, 2, &v))
3133 qos[tok] = clamp_t(s64, v * 100,
3134 VRATE_MIN_PPM, VRATE_MAX_PPM);
3142 if (qos[QOS_MIN] > qos[QOS_MAX])
3145 spin_lock_irq(&ioc->lock);
3148 blk_stat_enable_accounting(ioc->rqos.q);
3149 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3150 ioc->enabled = true;
3152 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3153 ioc->enabled = false;
3157 memcpy(ioc->params.qos, qos, sizeof(qos));
3158 ioc->user_qos_params = true;
3160 ioc->user_qos_params = false;
3163 ioc_refresh_params(ioc, true);
3164 spin_unlock_irq(&ioc->lock);
3166 put_disk_and_module(disk);
3171 put_disk_and_module(disk);
3175 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3176 struct blkg_policy_data *pd, int off)
3178 const char *dname = blkg_dev_name(pd->blkg);
3179 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3180 u64 *u = ioc->params.i_lcoefs;
3185 seq_printf(sf, "%s ctrl=%s model=linear "
3186 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3187 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3188 dname, ioc->user_cost_model ? "user" : "auto",
3189 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3190 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3194 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3196 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3198 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3199 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3203 static const match_table_t cost_ctrl_tokens = {
3204 { COST_CTRL, "ctrl=%s" },
3205 { COST_MODEL, "model=%s" },
3206 { NR_COST_CTRL_PARAMS, NULL },
3209 static const match_table_t i_lcoef_tokens = {
3210 { I_LCOEF_RBPS, "rbps=%u" },
3211 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3212 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3213 { I_LCOEF_WBPS, "wbps=%u" },
3214 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3215 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3216 { NR_I_LCOEFS, NULL },
3219 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3220 size_t nbytes, loff_t off)
3222 struct gendisk *disk;
3229 disk = blkcg_conf_get_disk(&input);
3231 return PTR_ERR(disk);
3233 ioc = q_to_ioc(disk->queue);
3235 ret = blk_iocost_init(disk->queue);
3238 ioc = q_to_ioc(disk->queue);
3241 spin_lock_irq(&ioc->lock);
3242 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3243 user = ioc->user_cost_model;
3244 spin_unlock_irq(&ioc->lock);
3246 while ((p = strsep(&input, " \t\n"))) {
3247 substring_t args[MAX_OPT_ARGS];
3255 switch (match_token(p, cost_ctrl_tokens, args)) {
3257 match_strlcpy(buf, &args[0], sizeof(buf));
3258 if (!strcmp(buf, "auto"))
3260 else if (!strcmp(buf, "user"))
3266 match_strlcpy(buf, &args[0], sizeof(buf));
3267 if (strcmp(buf, "linear"))
3272 tok = match_token(p, i_lcoef_tokens, args);
3273 if (tok == NR_I_LCOEFS)
3275 if (match_u64(&args[0], &v))
3281 spin_lock_irq(&ioc->lock);
3283 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3284 ioc->user_cost_model = true;
3286 ioc->user_cost_model = false;
3288 ioc_refresh_params(ioc, true);
3289 spin_unlock_irq(&ioc->lock);
3291 put_disk_and_module(disk);
3297 put_disk_and_module(disk);
3301 static struct cftype ioc_files[] = {
3304 .flags = CFTYPE_NOT_ON_ROOT,
3305 .seq_show = ioc_weight_show,
3306 .write = ioc_weight_write,
3310 .flags = CFTYPE_ONLY_ON_ROOT,
3311 .seq_show = ioc_qos_show,
3312 .write = ioc_qos_write,
3315 .name = "cost.model",
3316 .flags = CFTYPE_ONLY_ON_ROOT,
3317 .seq_show = ioc_cost_model_show,
3318 .write = ioc_cost_model_write,
3323 static struct blkcg_policy blkcg_policy_iocost = {
3324 .dfl_cftypes = ioc_files,
3325 .cpd_alloc_fn = ioc_cpd_alloc,
3326 .cpd_free_fn = ioc_cpd_free,
3327 .pd_alloc_fn = ioc_pd_alloc,
3328 .pd_init_fn = ioc_pd_init,
3329 .pd_free_fn = ioc_pd_free,
3330 .pd_stat_fn = ioc_pd_stat,
3333 static int __init ioc_init(void)
3335 return blkcg_policy_register(&blkcg_policy_iocost);
3338 static void __exit ioc_exit(void)
3340 return blkcg_policy_unregister(&blkcg_policy_iocost);
3343 module_init(ioc_init);
3344 module_exit(ioc_exit);