blk-iocost: avoid 64-bit division in ioc_timer_fn
[platform/kernel/linux-starfive.git] / block / blk-iocost.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * parameters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO if doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The output looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per        : Timer period
166  * - cur_per    : Internal wall and device vtime clock
167  * - vrate      : Device virtual time rate against wall clock
168  * - weight     : Surplus-adjusted and configured weights
169  * - hweight    : Surplus-adjusted and configured hierarchical weights
170  * - inflt      : The percentage of in-flight IO cost at the end of last period
171  * - del_ms     : Deferred issuer delay induction level and duration
172  * - usages     : Usage history
173  */
174
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
185 #include "blk-wbt.h"
186 #include "blk-cgroup.h"
187
188 #ifdef CONFIG_TRACEPOINTS
189
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195 #define TRACE_IOCG_PATH(type, iocg, ...)                                        \
196         do {                                                                    \
197                 unsigned long flags;                                            \
198                 if (trace_iocost_##type##_enabled()) {                          \
199                         spin_lock_irqsave(&trace_iocg_path_lock, flags);        \
200                         cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,      \
201                                     trace_iocg_path, TRACE_IOCG_PATH_LEN);      \
202                         trace_iocost_##type(iocg, trace_iocg_path,              \
203                                               ##__VA_ARGS__);                   \
204                         spin_unlock_irqrestore(&trace_iocg_path_lock, flags);   \
205                 }                                                               \
206         } while (0)
207
208 #else   /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)        do { } while (0)
210 #endif  /* CONFIG_TRACE_POINTS */
211
212 enum {
213         MILLION                 = 1000000,
214
215         /* timer period is calculated from latency requirements, bound it */
216         MIN_PERIOD              = USEC_PER_MSEC,
217         MAX_PERIOD              = USEC_PER_SEC,
218
219         /*
220          * iocg->vtime is targeted at 50% behind the device vtime, which
221          * serves as its IO credit buffer.  Surplus weight adjustment is
222          * immediately canceled if the vtime margin runs below 10%.
223          */
224         MARGIN_MIN_PCT          = 10,
225         MARGIN_LOW_PCT          = 20,
226         MARGIN_TARGET_PCT       = 50,
227
228         INUSE_ADJ_STEP_PCT      = 25,
229
230         /* Have some play in timer operations */
231         TIMER_SLACK_PCT         = 1,
232
233         /* 1/64k is granular enough and can easily be handled w/ u32 */
234         WEIGHT_ONE              = 1 << 16,
235
236         /*
237          * As vtime is used to calculate the cost of each IO, it needs to
238          * be fairly high precision.  For example, it should be able to
239          * represent the cost of a single page worth of discard with
240          * suffificient accuracy.  At the same time, it should be able to
241          * represent reasonably long enough durations to be useful and
242          * convenient during operation.
243          *
244          * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
245          * granularity and days of wrap-around time even at extreme vrates.
246          */
247         VTIME_PER_SEC_SHIFT     = 37,
248         VTIME_PER_SEC           = 1LLU << VTIME_PER_SEC_SHIFT,
249         VTIME_PER_USEC          = VTIME_PER_SEC / USEC_PER_SEC,
250         VTIME_PER_NSEC          = VTIME_PER_SEC / NSEC_PER_SEC,
251
252         /* bound vrate adjustments within two orders of magnitude */
253         VRATE_MIN_PPM           = 10000,        /* 1% */
254         VRATE_MAX_PPM           = 100000000,    /* 10000% */
255
256         VRATE_MIN               = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257         VRATE_CLAMP_ADJ_PCT     = 4,
258
259         /* switch iff the conditions are met for longer than this */
260         AUTOP_CYCLE_NSEC        = 10LLU * NSEC_PER_SEC,
261 };
262
263 enum {
264         /* if IOs end up waiting for requests, issue less */
265         RQ_WAIT_BUSY_PCT        = 5,
266
267         /* unbusy hysterisis */
268         UNBUSY_THR_PCT          = 75,
269
270         /*
271          * The effect of delay is indirect and non-linear and a huge amount of
272          * future debt can accumulate abruptly while unthrottled. Linearly scale
273          * up delay as debt is going up and then let it decay exponentially.
274          * This gives us quick ramp ups while delay is accumulating and long
275          * tails which can help reducing the frequency of debt explosions on
276          * unthrottle. The parameters are experimentally determined.
277          *
278          * The delay mechanism provides adequate protection and behavior in many
279          * cases. However, this is far from ideal and falls shorts on both
280          * fronts. The debtors are often throttled too harshly costing a
281          * significant level of fairness and possibly total work while the
282          * protection against their impacts on the system can be choppy and
283          * unreliable.
284          *
285          * The shortcoming primarily stems from the fact that, unlike for page
286          * cache, the kernel doesn't have well-defined back-pressure propagation
287          * mechanism and policies for anonymous memory. Fully addressing this
288          * issue will likely require substantial improvements in the area.
289          */
290         MIN_DELAY_THR_PCT       = 500,
291         MAX_DELAY_THR_PCT       = 25000,
292         MIN_DELAY               = 250,
293         MAX_DELAY               = 250 * USEC_PER_MSEC,
294
295         /* halve debts if avg usage over 100ms is under 50% */
296         DFGV_USAGE_PCT          = 50,
297         DFGV_PERIOD             = 100 * USEC_PER_MSEC,
298
299         /* don't let cmds which take a very long time pin lagging for too long */
300         MAX_LAGGING_PERIODS     = 10,
301
302         /*
303          * Count IO size in 4k pages.  The 12bit shift helps keeping
304          * size-proportional components of cost calculation in closer
305          * numbers of digits to per-IO cost components.
306          */
307         IOC_PAGE_SHIFT          = 12,
308         IOC_PAGE_SIZE           = 1 << IOC_PAGE_SHIFT,
309         IOC_SECT_TO_PAGE_SHIFT  = IOC_PAGE_SHIFT - SECTOR_SHIFT,
310
311         /* if apart further than 16M, consider randio for linear model */
312         LCOEF_RANDIO_PAGES      = 4096,
313 };
314
315 enum ioc_running {
316         IOC_IDLE,
317         IOC_RUNNING,
318         IOC_STOP,
319 };
320
321 /* io.cost.qos controls including per-dev enable of the whole controller */
322 enum {
323         QOS_ENABLE,
324         QOS_CTRL,
325         NR_QOS_CTRL_PARAMS,
326 };
327
328 /* io.cost.qos params */
329 enum {
330         QOS_RPPM,
331         QOS_RLAT,
332         QOS_WPPM,
333         QOS_WLAT,
334         QOS_MIN,
335         QOS_MAX,
336         NR_QOS_PARAMS,
337 };
338
339 /* io.cost.model controls */
340 enum {
341         COST_CTRL,
342         COST_MODEL,
343         NR_COST_CTRL_PARAMS,
344 };
345
346 /* builtin linear cost model coefficients */
347 enum {
348         I_LCOEF_RBPS,
349         I_LCOEF_RSEQIOPS,
350         I_LCOEF_RRANDIOPS,
351         I_LCOEF_WBPS,
352         I_LCOEF_WSEQIOPS,
353         I_LCOEF_WRANDIOPS,
354         NR_I_LCOEFS,
355 };
356
357 enum {
358         LCOEF_RPAGE,
359         LCOEF_RSEQIO,
360         LCOEF_RRANDIO,
361         LCOEF_WPAGE,
362         LCOEF_WSEQIO,
363         LCOEF_WRANDIO,
364         NR_LCOEFS,
365 };
366
367 enum {
368         AUTOP_INVALID,
369         AUTOP_HDD,
370         AUTOP_SSD_QD1,
371         AUTOP_SSD_DFL,
372         AUTOP_SSD_FAST,
373 };
374
375 struct ioc_params {
376         u32                             qos[NR_QOS_PARAMS];
377         u64                             i_lcoefs[NR_I_LCOEFS];
378         u64                             lcoefs[NR_LCOEFS];
379         u32                             too_fast_vrate_pct;
380         u32                             too_slow_vrate_pct;
381 };
382
383 struct ioc_margins {
384         s64                             min;
385         s64                             low;
386         s64                             target;
387 };
388
389 struct ioc_missed {
390         local_t                         nr_met;
391         local_t                         nr_missed;
392         u32                             last_met;
393         u32                             last_missed;
394 };
395
396 struct ioc_pcpu_stat {
397         struct ioc_missed               missed[2];
398
399         local64_t                       rq_wait_ns;
400         u64                             last_rq_wait_ns;
401 };
402
403 /* per device */
404 struct ioc {
405         struct rq_qos                   rqos;
406
407         bool                            enabled;
408
409         struct ioc_params               params;
410         struct ioc_margins              margins;
411         u32                             period_us;
412         u32                             timer_slack_ns;
413         u64                             vrate_min;
414         u64                             vrate_max;
415
416         spinlock_t                      lock;
417         struct timer_list               timer;
418         struct list_head                active_iocgs;   /* active cgroups */
419         struct ioc_pcpu_stat __percpu   *pcpu_stat;
420
421         enum ioc_running                running;
422         atomic64_t                      vtime_rate;
423         u64                             vtime_base_rate;
424         s64                             vtime_err;
425
426         seqcount_spinlock_t             period_seqcount;
427         u64                             period_at;      /* wallclock starttime */
428         u64                             period_at_vtime; /* vtime starttime */
429
430         atomic64_t                      cur_period;     /* inc'd each period */
431         int                             busy_level;     /* saturation history */
432
433         bool                            weights_updated;
434         atomic_t                        hweight_gen;    /* for lazy hweights */
435
436         /* debt forgivness */
437         u64                             dfgv_period_at;
438         u64                             dfgv_period_rem;
439         u64                             dfgv_usage_us_sum;
440
441         u64                             autop_too_fast_at;
442         u64                             autop_too_slow_at;
443         int                             autop_idx;
444         bool                            user_qos_params:1;
445         bool                            user_cost_model:1;
446 };
447
448 struct iocg_pcpu_stat {
449         local64_t                       abs_vusage;
450 };
451
452 struct iocg_stat {
453         u64                             usage_us;
454         u64                             wait_us;
455         u64                             indebt_us;
456         u64                             indelay_us;
457 };
458
459 /* per device-cgroup pair */
460 struct ioc_gq {
461         struct blkg_policy_data         pd;
462         struct ioc                      *ioc;
463
464         /*
465          * A iocg can get its weight from two sources - an explicit
466          * per-device-cgroup configuration or the default weight of the
467          * cgroup.  `cfg_weight` is the explicit per-device-cgroup
468          * configuration.  `weight` is the effective considering both
469          * sources.
470          *
471          * When an idle cgroup becomes active its `active` goes from 0 to
472          * `weight`.  `inuse` is the surplus adjusted active weight.
473          * `active` and `inuse` are used to calculate `hweight_active` and
474          * `hweight_inuse`.
475          *
476          * `last_inuse` remembers `inuse` while an iocg is idle to persist
477          * surplus adjustments.
478          *
479          * `inuse` may be adjusted dynamically during period. `saved_*` are used
480          * to determine and track adjustments.
481          */
482         u32                             cfg_weight;
483         u32                             weight;
484         u32                             active;
485         u32                             inuse;
486
487         u32                             last_inuse;
488         s64                             saved_margin;
489
490         sector_t                        cursor;         /* to detect randio */
491
492         /*
493          * `vtime` is this iocg's vtime cursor which progresses as IOs are
494          * issued.  If lagging behind device vtime, the delta represents
495          * the currently available IO budget.  If running ahead, the
496          * overage.
497          *
498          * `vtime_done` is the same but progressed on completion rather
499          * than issue.  The delta behind `vtime` represents the cost of
500          * currently in-flight IOs.
501          */
502         atomic64_t                      vtime;
503         atomic64_t                      done_vtime;
504         u64                             abs_vdebt;
505
506         /* current delay in effect and when it started */
507         u64                             delay;
508         u64                             delay_at;
509
510         /*
511          * The period this iocg was last active in.  Used for deactivation
512          * and invalidating `vtime`.
513          */
514         atomic64_t                      active_period;
515         struct list_head                active_list;
516
517         /* see __propagate_weights() and current_hweight() for details */
518         u64                             child_active_sum;
519         u64                             child_inuse_sum;
520         u64                             child_adjusted_sum;
521         int                             hweight_gen;
522         u32                             hweight_active;
523         u32                             hweight_inuse;
524         u32                             hweight_donating;
525         u32                             hweight_after_donation;
526
527         struct list_head                walk_list;
528         struct list_head                surplus_list;
529
530         struct wait_queue_head          waitq;
531         struct hrtimer                  waitq_timer;
532
533         /* timestamp at the latest activation */
534         u64                             activated_at;
535
536         /* statistics */
537         struct iocg_pcpu_stat __percpu  *pcpu_stat;
538         struct iocg_stat                stat;
539         struct iocg_stat                last_stat;
540         u64                             last_stat_abs_vusage;
541         u64                             usage_delta_us;
542         u64                             wait_since;
543         u64                             indebt_since;
544         u64                             indelay_since;
545
546         /* this iocg's depth in the hierarchy and ancestors including self */
547         int                             level;
548         struct ioc_gq                   *ancestors[];
549 };
550
551 /* per cgroup */
552 struct ioc_cgrp {
553         struct blkcg_policy_data        cpd;
554         unsigned int                    dfl_weight;
555 };
556
557 struct ioc_now {
558         u64                             now_ns;
559         u64                             now;
560         u64                             vnow;
561         u64                             vrate;
562 };
563
564 struct iocg_wait {
565         struct wait_queue_entry         wait;
566         struct bio                      *bio;
567         u64                             abs_cost;
568         bool                            committed;
569 };
570
571 struct iocg_wake_ctx {
572         struct ioc_gq                   *iocg;
573         u32                             hw_inuse;
574         s64                             vbudget;
575 };
576
577 static const struct ioc_params autop[] = {
578         [AUTOP_HDD] = {
579                 .qos                            = {
580                         [QOS_RLAT]              =        250000, /* 250ms */
581                         [QOS_WLAT]              =        250000,
582                         [QOS_MIN]               = VRATE_MIN_PPM,
583                         [QOS_MAX]               = VRATE_MAX_PPM,
584                 },
585                 .i_lcoefs                       = {
586                         [I_LCOEF_RBPS]          =     174019176,
587                         [I_LCOEF_RSEQIOPS]      =         41708,
588                         [I_LCOEF_RRANDIOPS]     =           370,
589                         [I_LCOEF_WBPS]          =     178075866,
590                         [I_LCOEF_WSEQIOPS]      =         42705,
591                         [I_LCOEF_WRANDIOPS]     =           378,
592                 },
593         },
594         [AUTOP_SSD_QD1] = {
595                 .qos                            = {
596                         [QOS_RLAT]              =         25000, /* 25ms */
597                         [QOS_WLAT]              =         25000,
598                         [QOS_MIN]               = VRATE_MIN_PPM,
599                         [QOS_MAX]               = VRATE_MAX_PPM,
600                 },
601                 .i_lcoefs                       = {
602                         [I_LCOEF_RBPS]          =     245855193,
603                         [I_LCOEF_RSEQIOPS]      =         61575,
604                         [I_LCOEF_RRANDIOPS]     =          6946,
605                         [I_LCOEF_WBPS]          =     141365009,
606                         [I_LCOEF_WSEQIOPS]      =         33716,
607                         [I_LCOEF_WRANDIOPS]     =         26796,
608                 },
609         },
610         [AUTOP_SSD_DFL] = {
611                 .qos                            = {
612                         [QOS_RLAT]              =         25000, /* 25ms */
613                         [QOS_WLAT]              =         25000,
614                         [QOS_MIN]               = VRATE_MIN_PPM,
615                         [QOS_MAX]               = VRATE_MAX_PPM,
616                 },
617                 .i_lcoefs                       = {
618                         [I_LCOEF_RBPS]          =     488636629,
619                         [I_LCOEF_RSEQIOPS]      =          8932,
620                         [I_LCOEF_RRANDIOPS]     =          8518,
621                         [I_LCOEF_WBPS]          =     427891549,
622                         [I_LCOEF_WSEQIOPS]      =         28755,
623                         [I_LCOEF_WRANDIOPS]     =         21940,
624                 },
625                 .too_fast_vrate_pct             =           500,
626         },
627         [AUTOP_SSD_FAST] = {
628                 .qos                            = {
629                         [QOS_RLAT]              =          5000, /* 5ms */
630                         [QOS_WLAT]              =          5000,
631                         [QOS_MIN]               = VRATE_MIN_PPM,
632                         [QOS_MAX]               = VRATE_MAX_PPM,
633                 },
634                 .i_lcoefs                       = {
635                         [I_LCOEF_RBPS]          =    3102524156LLU,
636                         [I_LCOEF_RSEQIOPS]      =        724816,
637                         [I_LCOEF_RRANDIOPS]     =        778122,
638                         [I_LCOEF_WBPS]          =    1742780862LLU,
639                         [I_LCOEF_WSEQIOPS]      =        425702,
640                         [I_LCOEF_WRANDIOPS]     =        443193,
641                 },
642                 .too_slow_vrate_pct             =            10,
643         },
644 };
645
646 /*
647  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
648  * vtime credit shortage and down on device saturation.
649  */
650 static u32 vrate_adj_pct[] =
651         { 0, 0, 0, 0,
652           1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
653           2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
654           4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
655
656 static struct blkcg_policy blkcg_policy_iocost;
657
658 /* accessors and helpers */
659 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
660 {
661         return container_of(rqos, struct ioc, rqos);
662 }
663
664 static struct ioc *q_to_ioc(struct request_queue *q)
665 {
666         return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
667 }
668
669 static const char __maybe_unused *ioc_name(struct ioc *ioc)
670 {
671         struct gendisk *disk = ioc->rqos.q->disk;
672
673         if (!disk)
674                 return "<unknown>";
675         return disk->disk_name;
676 }
677
678 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
679 {
680         return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
681 }
682
683 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
684 {
685         return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
686 }
687
688 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
689 {
690         return pd_to_blkg(&iocg->pd);
691 }
692
693 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
694 {
695         return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
696                             struct ioc_cgrp, cpd);
697 }
698
699 /*
700  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
701  * weight, the more expensive each IO.  Must round up.
702  */
703 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
704 {
705         return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
706 }
707
708 /*
709  * The inverse of abs_cost_to_cost().  Must round up.
710  */
711 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
712 {
713         return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
714 }
715
716 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
717                             u64 abs_cost, u64 cost)
718 {
719         struct iocg_pcpu_stat *gcs;
720
721         bio->bi_iocost_cost = cost;
722         atomic64_add(cost, &iocg->vtime);
723
724         gcs = get_cpu_ptr(iocg->pcpu_stat);
725         local64_add(abs_cost, &gcs->abs_vusage);
726         put_cpu_ptr(gcs);
727 }
728
729 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
730 {
731         if (lock_ioc) {
732                 spin_lock_irqsave(&iocg->ioc->lock, *flags);
733                 spin_lock(&iocg->waitq.lock);
734         } else {
735                 spin_lock_irqsave(&iocg->waitq.lock, *flags);
736         }
737 }
738
739 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
740 {
741         if (unlock_ioc) {
742                 spin_unlock(&iocg->waitq.lock);
743                 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
744         } else {
745                 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
746         }
747 }
748
749 #define CREATE_TRACE_POINTS
750 #include <trace/events/iocost.h>
751
752 static void ioc_refresh_margins(struct ioc *ioc)
753 {
754         struct ioc_margins *margins = &ioc->margins;
755         u32 period_us = ioc->period_us;
756         u64 vrate = ioc->vtime_base_rate;
757
758         margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
759         margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
760         margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
761 }
762
763 /* latency Qos params changed, update period_us and all the dependent params */
764 static void ioc_refresh_period_us(struct ioc *ioc)
765 {
766         u32 ppm, lat, multi, period_us;
767
768         lockdep_assert_held(&ioc->lock);
769
770         /* pick the higher latency target */
771         if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
772                 ppm = ioc->params.qos[QOS_RPPM];
773                 lat = ioc->params.qos[QOS_RLAT];
774         } else {
775                 ppm = ioc->params.qos[QOS_WPPM];
776                 lat = ioc->params.qos[QOS_WLAT];
777         }
778
779         /*
780          * We want the period to be long enough to contain a healthy number
781          * of IOs while short enough for granular control.  Define it as a
782          * multiple of the latency target.  Ideally, the multiplier should
783          * be scaled according to the percentile so that it would nominally
784          * contain a certain number of requests.  Let's be simpler and
785          * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
786          */
787         if (ppm)
788                 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
789         else
790                 multi = 2;
791         period_us = multi * lat;
792         period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
793
794         /* calculate dependent params */
795         ioc->period_us = period_us;
796         ioc->timer_slack_ns = div64_u64(
797                 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
798                 100);
799         ioc_refresh_margins(ioc);
800 }
801
802 static int ioc_autop_idx(struct ioc *ioc)
803 {
804         int idx = ioc->autop_idx;
805         const struct ioc_params *p = &autop[idx];
806         u32 vrate_pct;
807         u64 now_ns;
808
809         /* rotational? */
810         if (!blk_queue_nonrot(ioc->rqos.q))
811                 return AUTOP_HDD;
812
813         /* handle SATA SSDs w/ broken NCQ */
814         if (blk_queue_depth(ioc->rqos.q) == 1)
815                 return AUTOP_SSD_QD1;
816
817         /* use one of the normal ssd sets */
818         if (idx < AUTOP_SSD_DFL)
819                 return AUTOP_SSD_DFL;
820
821         /* if user is overriding anything, maintain what was there */
822         if (ioc->user_qos_params || ioc->user_cost_model)
823                 return idx;
824
825         /* step up/down based on the vrate */
826         vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
827         now_ns = ktime_get_ns();
828
829         if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
830                 if (!ioc->autop_too_fast_at)
831                         ioc->autop_too_fast_at = now_ns;
832                 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
833                         return idx + 1;
834         } else {
835                 ioc->autop_too_fast_at = 0;
836         }
837
838         if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
839                 if (!ioc->autop_too_slow_at)
840                         ioc->autop_too_slow_at = now_ns;
841                 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
842                         return idx - 1;
843         } else {
844                 ioc->autop_too_slow_at = 0;
845         }
846
847         return idx;
848 }
849
850 /*
851  * Take the followings as input
852  *
853  *  @bps        maximum sequential throughput
854  *  @seqiops    maximum sequential 4k iops
855  *  @randiops   maximum random 4k iops
856  *
857  * and calculate the linear model cost coefficients.
858  *
859  *  *@page      per-page cost           1s / (@bps / 4096)
860  *  *@seqio     base cost of a seq IO   max((1s / @seqiops) - *@page, 0)
861  *  @randiops   base cost of a rand IO  max((1s / @randiops) - *@page, 0)
862  */
863 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
864                         u64 *page, u64 *seqio, u64 *randio)
865 {
866         u64 v;
867
868         *page = *seqio = *randio = 0;
869
870         if (bps) {
871                 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
872
873                 if (bps_pages)
874                         *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
875                 else
876                         *page = 1;
877         }
878
879         if (seqiops) {
880                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
881                 if (v > *page)
882                         *seqio = v - *page;
883         }
884
885         if (randiops) {
886                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
887                 if (v > *page)
888                         *randio = v - *page;
889         }
890 }
891
892 static void ioc_refresh_lcoefs(struct ioc *ioc)
893 {
894         u64 *u = ioc->params.i_lcoefs;
895         u64 *c = ioc->params.lcoefs;
896
897         calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
898                     &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
899         calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
900                     &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
901 }
902
903 static bool ioc_refresh_params(struct ioc *ioc, bool force)
904 {
905         const struct ioc_params *p;
906         int idx;
907
908         lockdep_assert_held(&ioc->lock);
909
910         idx = ioc_autop_idx(ioc);
911         p = &autop[idx];
912
913         if (idx == ioc->autop_idx && !force)
914                 return false;
915
916         if (idx != ioc->autop_idx)
917                 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
918
919         ioc->autop_idx = idx;
920         ioc->autop_too_fast_at = 0;
921         ioc->autop_too_slow_at = 0;
922
923         if (!ioc->user_qos_params)
924                 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
925         if (!ioc->user_cost_model)
926                 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
927
928         ioc_refresh_period_us(ioc);
929         ioc_refresh_lcoefs(ioc);
930
931         ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
932                                             VTIME_PER_USEC, MILLION);
933         ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
934                                    VTIME_PER_USEC, MILLION);
935
936         return true;
937 }
938
939 /*
940  * When an iocg accumulates too much vtime or gets deactivated, we throw away
941  * some vtime, which lowers the overall device utilization. As the exact amount
942  * which is being thrown away is known, we can compensate by accelerating the
943  * vrate accordingly so that the extra vtime generated in the current period
944  * matches what got lost.
945  */
946 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
947 {
948         s64 pleft = ioc->period_at + ioc->period_us - now->now;
949         s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
950         s64 vcomp, vcomp_min, vcomp_max;
951
952         lockdep_assert_held(&ioc->lock);
953
954         /* we need some time left in this period */
955         if (pleft <= 0)
956                 goto done;
957
958         /*
959          * Calculate how much vrate should be adjusted to offset the error.
960          * Limit the amount of adjustment and deduct the adjusted amount from
961          * the error.
962          */
963         vcomp = -div64_s64(ioc->vtime_err, pleft);
964         vcomp_min = -(ioc->vtime_base_rate >> 1);
965         vcomp_max = ioc->vtime_base_rate;
966         vcomp = clamp(vcomp, vcomp_min, vcomp_max);
967
968         ioc->vtime_err += vcomp * pleft;
969
970         atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
971 done:
972         /* bound how much error can accumulate */
973         ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
974 }
975
976 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
977                                   int nr_lagging, int nr_shortages,
978                                   int prev_busy_level, u32 *missed_ppm)
979 {
980         u64 vrate = ioc->vtime_base_rate;
981         u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
982
983         if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
984                 if (ioc->busy_level != prev_busy_level || nr_lagging)
985                         trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
986                                                    missed_ppm, rq_wait_pct,
987                                                    nr_lagging, nr_shortages);
988
989                 return;
990         }
991
992         /*
993          * If vrate is out of bounds, apply clamp gradually as the
994          * bounds can change abruptly.  Otherwise, apply busy_level
995          * based adjustment.
996          */
997         if (vrate < vrate_min) {
998                 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
999                 vrate = min(vrate, vrate_min);
1000         } else if (vrate > vrate_max) {
1001                 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1002                 vrate = max(vrate, vrate_max);
1003         } else {
1004                 int idx = min_t(int, abs(ioc->busy_level),
1005                                 ARRAY_SIZE(vrate_adj_pct) - 1);
1006                 u32 adj_pct = vrate_adj_pct[idx];
1007
1008                 if (ioc->busy_level > 0)
1009                         adj_pct = 100 - adj_pct;
1010                 else
1011                         adj_pct = 100 + adj_pct;
1012
1013                 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1014                               vrate_min, vrate_max);
1015         }
1016
1017         trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1018                                    nr_lagging, nr_shortages);
1019
1020         ioc->vtime_base_rate = vrate;
1021         ioc_refresh_margins(ioc);
1022 }
1023
1024 /* take a snapshot of the current [v]time and vrate */
1025 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1026 {
1027         unsigned seq;
1028
1029         now->now_ns = ktime_get();
1030         now->now = ktime_to_us(now->now_ns);
1031         now->vrate = atomic64_read(&ioc->vtime_rate);
1032
1033         /*
1034          * The current vtime is
1035          *
1036          *   vtime at period start + (wallclock time since the start) * vrate
1037          *
1038          * As a consistent snapshot of `period_at_vtime` and `period_at` is
1039          * needed, they're seqcount protected.
1040          */
1041         do {
1042                 seq = read_seqcount_begin(&ioc->period_seqcount);
1043                 now->vnow = ioc->period_at_vtime +
1044                         (now->now - ioc->period_at) * now->vrate;
1045         } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1046 }
1047
1048 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1049 {
1050         WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1051
1052         write_seqcount_begin(&ioc->period_seqcount);
1053         ioc->period_at = now->now;
1054         ioc->period_at_vtime = now->vnow;
1055         write_seqcount_end(&ioc->period_seqcount);
1056
1057         ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1058         add_timer(&ioc->timer);
1059 }
1060
1061 /*
1062  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1063  * weight sums and propagate upwards accordingly. If @save, the current margin
1064  * is saved to be used as reference for later inuse in-period adjustments.
1065  */
1066 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1067                                 bool save, struct ioc_now *now)
1068 {
1069         struct ioc *ioc = iocg->ioc;
1070         int lvl;
1071
1072         lockdep_assert_held(&ioc->lock);
1073
1074         /*
1075          * For an active leaf node, its inuse shouldn't be zero or exceed
1076          * @active. An active internal node's inuse is solely determined by the
1077          * inuse to active ratio of its children regardless of @inuse.
1078          */
1079         if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1080                 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1081                                            iocg->child_active_sum);
1082         } else {
1083                 inuse = clamp_t(u32, inuse, 1, active);
1084         }
1085
1086         iocg->last_inuse = iocg->inuse;
1087         if (save)
1088                 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1089
1090         if (active == iocg->active && inuse == iocg->inuse)
1091                 return;
1092
1093         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1094                 struct ioc_gq *parent = iocg->ancestors[lvl];
1095                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1096                 u32 parent_active = 0, parent_inuse = 0;
1097
1098                 /* update the level sums */
1099                 parent->child_active_sum += (s32)(active - child->active);
1100                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1101                 /* apply the updates */
1102                 child->active = active;
1103                 child->inuse = inuse;
1104
1105                 /*
1106                  * The delta between inuse and active sums indicates that
1107                  * much of weight is being given away.  Parent's inuse
1108                  * and active should reflect the ratio.
1109                  */
1110                 if (parent->child_active_sum) {
1111                         parent_active = parent->weight;
1112                         parent_inuse = DIV64_U64_ROUND_UP(
1113                                 parent_active * parent->child_inuse_sum,
1114                                 parent->child_active_sum);
1115                 }
1116
1117                 /* do we need to keep walking up? */
1118                 if (parent_active == parent->active &&
1119                     parent_inuse == parent->inuse)
1120                         break;
1121
1122                 active = parent_active;
1123                 inuse = parent_inuse;
1124         }
1125
1126         ioc->weights_updated = true;
1127 }
1128
1129 static void commit_weights(struct ioc *ioc)
1130 {
1131         lockdep_assert_held(&ioc->lock);
1132
1133         if (ioc->weights_updated) {
1134                 /* paired with rmb in current_hweight(), see there */
1135                 smp_wmb();
1136                 atomic_inc(&ioc->hweight_gen);
1137                 ioc->weights_updated = false;
1138         }
1139 }
1140
1141 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1142                               bool save, struct ioc_now *now)
1143 {
1144         __propagate_weights(iocg, active, inuse, save, now);
1145         commit_weights(iocg->ioc);
1146 }
1147
1148 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1149 {
1150         struct ioc *ioc = iocg->ioc;
1151         int lvl;
1152         u32 hwa, hwi;
1153         int ioc_gen;
1154
1155         /* hot path - if uptodate, use cached */
1156         ioc_gen = atomic_read(&ioc->hweight_gen);
1157         if (ioc_gen == iocg->hweight_gen)
1158                 goto out;
1159
1160         /*
1161          * Paired with wmb in commit_weights(). If we saw the updated
1162          * hweight_gen, all the weight updates from __propagate_weights() are
1163          * visible too.
1164          *
1165          * We can race with weight updates during calculation and get it
1166          * wrong.  However, hweight_gen would have changed and a future
1167          * reader will recalculate and we're guaranteed to discard the
1168          * wrong result soon.
1169          */
1170         smp_rmb();
1171
1172         hwa = hwi = WEIGHT_ONE;
1173         for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1174                 struct ioc_gq *parent = iocg->ancestors[lvl];
1175                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1176                 u64 active_sum = READ_ONCE(parent->child_active_sum);
1177                 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1178                 u32 active = READ_ONCE(child->active);
1179                 u32 inuse = READ_ONCE(child->inuse);
1180
1181                 /* we can race with deactivations and either may read as zero */
1182                 if (!active_sum || !inuse_sum)
1183                         continue;
1184
1185                 active_sum = max_t(u64, active, active_sum);
1186                 hwa = div64_u64((u64)hwa * active, active_sum);
1187
1188                 inuse_sum = max_t(u64, inuse, inuse_sum);
1189                 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1190         }
1191
1192         iocg->hweight_active = max_t(u32, hwa, 1);
1193         iocg->hweight_inuse = max_t(u32, hwi, 1);
1194         iocg->hweight_gen = ioc_gen;
1195 out:
1196         if (hw_activep)
1197                 *hw_activep = iocg->hweight_active;
1198         if (hw_inusep)
1199                 *hw_inusep = iocg->hweight_inuse;
1200 }
1201
1202 /*
1203  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1204  * other weights stay unchanged.
1205  */
1206 static u32 current_hweight_max(struct ioc_gq *iocg)
1207 {
1208         u32 hwm = WEIGHT_ONE;
1209         u32 inuse = iocg->active;
1210         u64 child_inuse_sum;
1211         int lvl;
1212
1213         lockdep_assert_held(&iocg->ioc->lock);
1214
1215         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1216                 struct ioc_gq *parent = iocg->ancestors[lvl];
1217                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1218
1219                 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1220                 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1221                 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1222                                            parent->child_active_sum);
1223         }
1224
1225         return max_t(u32, hwm, 1);
1226 }
1227
1228 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1229 {
1230         struct ioc *ioc = iocg->ioc;
1231         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1232         struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1233         u32 weight;
1234
1235         lockdep_assert_held(&ioc->lock);
1236
1237         weight = iocg->cfg_weight ?: iocc->dfl_weight;
1238         if (weight != iocg->weight && iocg->active)
1239                 propagate_weights(iocg, weight, iocg->inuse, true, now);
1240         iocg->weight = weight;
1241 }
1242
1243 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1244 {
1245         struct ioc *ioc = iocg->ioc;
1246         u64 last_period, cur_period;
1247         u64 vtime, vtarget;
1248         int i;
1249
1250         /*
1251          * If seem to be already active, just update the stamp to tell the
1252          * timer that we're still active.  We don't mind occassional races.
1253          */
1254         if (!list_empty(&iocg->active_list)) {
1255                 ioc_now(ioc, now);
1256                 cur_period = atomic64_read(&ioc->cur_period);
1257                 if (atomic64_read(&iocg->active_period) != cur_period)
1258                         atomic64_set(&iocg->active_period, cur_period);
1259                 return true;
1260         }
1261
1262         /* racy check on internal node IOs, treat as root level IOs */
1263         if (iocg->child_active_sum)
1264                 return false;
1265
1266         spin_lock_irq(&ioc->lock);
1267
1268         ioc_now(ioc, now);
1269
1270         /* update period */
1271         cur_period = atomic64_read(&ioc->cur_period);
1272         last_period = atomic64_read(&iocg->active_period);
1273         atomic64_set(&iocg->active_period, cur_period);
1274
1275         /* already activated or breaking leaf-only constraint? */
1276         if (!list_empty(&iocg->active_list))
1277                 goto succeed_unlock;
1278         for (i = iocg->level - 1; i > 0; i--)
1279                 if (!list_empty(&iocg->ancestors[i]->active_list))
1280                         goto fail_unlock;
1281
1282         if (iocg->child_active_sum)
1283                 goto fail_unlock;
1284
1285         /*
1286          * Always start with the target budget. On deactivation, we throw away
1287          * anything above it.
1288          */
1289         vtarget = now->vnow - ioc->margins.target;
1290         vtime = atomic64_read(&iocg->vtime);
1291
1292         atomic64_add(vtarget - vtime, &iocg->vtime);
1293         atomic64_add(vtarget - vtime, &iocg->done_vtime);
1294         vtime = vtarget;
1295
1296         /*
1297          * Activate, propagate weight and start period timer if not
1298          * running.  Reset hweight_gen to avoid accidental match from
1299          * wrapping.
1300          */
1301         iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1302         list_add(&iocg->active_list, &ioc->active_iocgs);
1303
1304         propagate_weights(iocg, iocg->weight,
1305                           iocg->last_inuse ?: iocg->weight, true, now);
1306
1307         TRACE_IOCG_PATH(iocg_activate, iocg, now,
1308                         last_period, cur_period, vtime);
1309
1310         iocg->activated_at = now->now;
1311
1312         if (ioc->running == IOC_IDLE) {
1313                 ioc->running = IOC_RUNNING;
1314                 ioc->dfgv_period_at = now->now;
1315                 ioc->dfgv_period_rem = 0;
1316                 ioc_start_period(ioc, now);
1317         }
1318
1319 succeed_unlock:
1320         spin_unlock_irq(&ioc->lock);
1321         return true;
1322
1323 fail_unlock:
1324         spin_unlock_irq(&ioc->lock);
1325         return false;
1326 }
1327
1328 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1329 {
1330         struct ioc *ioc = iocg->ioc;
1331         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1332         u64 tdelta, delay, new_delay;
1333         s64 vover, vover_pct;
1334         u32 hwa;
1335
1336         lockdep_assert_held(&iocg->waitq.lock);
1337
1338         /* calculate the current delay in effect - 1/2 every second */
1339         tdelta = now->now - iocg->delay_at;
1340         if (iocg->delay)
1341                 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1342         else
1343                 delay = 0;
1344
1345         /* calculate the new delay from the debt amount */
1346         current_hweight(iocg, &hwa, NULL);
1347         vover = atomic64_read(&iocg->vtime) +
1348                 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1349         vover_pct = div64_s64(100 * vover,
1350                               ioc->period_us * ioc->vtime_base_rate);
1351
1352         if (vover_pct <= MIN_DELAY_THR_PCT)
1353                 new_delay = 0;
1354         else if (vover_pct >= MAX_DELAY_THR_PCT)
1355                 new_delay = MAX_DELAY;
1356         else
1357                 new_delay = MIN_DELAY +
1358                         div_u64((MAX_DELAY - MIN_DELAY) *
1359                                 (vover_pct - MIN_DELAY_THR_PCT),
1360                                 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1361
1362         /* pick the higher one and apply */
1363         if (new_delay > delay) {
1364                 iocg->delay = new_delay;
1365                 iocg->delay_at = now->now;
1366                 delay = new_delay;
1367         }
1368
1369         if (delay >= MIN_DELAY) {
1370                 if (!iocg->indelay_since)
1371                         iocg->indelay_since = now->now;
1372                 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1373                 return true;
1374         } else {
1375                 if (iocg->indelay_since) {
1376                         iocg->stat.indelay_us += now->now - iocg->indelay_since;
1377                         iocg->indelay_since = 0;
1378                 }
1379                 iocg->delay = 0;
1380                 blkcg_clear_delay(blkg);
1381                 return false;
1382         }
1383 }
1384
1385 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1386                             struct ioc_now *now)
1387 {
1388         struct iocg_pcpu_stat *gcs;
1389
1390         lockdep_assert_held(&iocg->ioc->lock);
1391         lockdep_assert_held(&iocg->waitq.lock);
1392         WARN_ON_ONCE(list_empty(&iocg->active_list));
1393
1394         /*
1395          * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1396          * inuse donating all of it share to others until its debt is paid off.
1397          */
1398         if (!iocg->abs_vdebt && abs_cost) {
1399                 iocg->indebt_since = now->now;
1400                 propagate_weights(iocg, iocg->active, 0, false, now);
1401         }
1402
1403         iocg->abs_vdebt += abs_cost;
1404
1405         gcs = get_cpu_ptr(iocg->pcpu_stat);
1406         local64_add(abs_cost, &gcs->abs_vusage);
1407         put_cpu_ptr(gcs);
1408 }
1409
1410 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1411                           struct ioc_now *now)
1412 {
1413         lockdep_assert_held(&iocg->ioc->lock);
1414         lockdep_assert_held(&iocg->waitq.lock);
1415
1416         /* make sure that nobody messed with @iocg */
1417         WARN_ON_ONCE(list_empty(&iocg->active_list));
1418         WARN_ON_ONCE(iocg->inuse > 1);
1419
1420         iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1421
1422         /* if debt is paid in full, restore inuse */
1423         if (!iocg->abs_vdebt) {
1424                 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1425                 iocg->indebt_since = 0;
1426
1427                 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1428                                   false, now);
1429         }
1430 }
1431
1432 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1433                         int flags, void *key)
1434 {
1435         struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1436         struct iocg_wake_ctx *ctx = key;
1437         u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1438
1439         ctx->vbudget -= cost;
1440
1441         if (ctx->vbudget < 0)
1442                 return -1;
1443
1444         iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1445         wait->committed = true;
1446
1447         /*
1448          * autoremove_wake_function() removes the wait entry only when it
1449          * actually changed the task state. We want the wait always removed.
1450          * Remove explicitly and use default_wake_function(). Note that the
1451          * order of operations is important as finish_wait() tests whether
1452          * @wq_entry is removed without grabbing the lock.
1453          */
1454         default_wake_function(wq_entry, mode, flags, key);
1455         list_del_init_careful(&wq_entry->entry);
1456         return 0;
1457 }
1458
1459 /*
1460  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1461  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1462  * addition to iocg->waitq.lock.
1463  */
1464 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1465                             struct ioc_now *now)
1466 {
1467         struct ioc *ioc = iocg->ioc;
1468         struct iocg_wake_ctx ctx = { .iocg = iocg };
1469         u64 vshortage, expires, oexpires;
1470         s64 vbudget;
1471         u32 hwa;
1472
1473         lockdep_assert_held(&iocg->waitq.lock);
1474
1475         current_hweight(iocg, &hwa, NULL);
1476         vbudget = now->vnow - atomic64_read(&iocg->vtime);
1477
1478         /* pay off debt */
1479         if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1480                 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1481                 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1482                 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1483
1484                 lockdep_assert_held(&ioc->lock);
1485
1486                 atomic64_add(vpay, &iocg->vtime);
1487                 atomic64_add(vpay, &iocg->done_vtime);
1488                 iocg_pay_debt(iocg, abs_vpay, now);
1489                 vbudget -= vpay;
1490         }
1491
1492         if (iocg->abs_vdebt || iocg->delay)
1493                 iocg_kick_delay(iocg, now);
1494
1495         /*
1496          * Debt can still be outstanding if we haven't paid all yet or the
1497          * caller raced and called without @pay_debt. Shouldn't wake up waiters
1498          * under debt. Make sure @vbudget reflects the outstanding amount and is
1499          * not positive.
1500          */
1501         if (iocg->abs_vdebt) {
1502                 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1503                 vbudget = min_t(s64, 0, vbudget - vdebt);
1504         }
1505
1506         /*
1507          * Wake up the ones which are due and see how much vtime we'll need for
1508          * the next one. As paying off debt restores hw_inuse, it must be read
1509          * after the above debt payment.
1510          */
1511         ctx.vbudget = vbudget;
1512         current_hweight(iocg, NULL, &ctx.hw_inuse);
1513
1514         __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1515
1516         if (!waitqueue_active(&iocg->waitq)) {
1517                 if (iocg->wait_since) {
1518                         iocg->stat.wait_us += now->now - iocg->wait_since;
1519                         iocg->wait_since = 0;
1520                 }
1521                 return;
1522         }
1523
1524         if (!iocg->wait_since)
1525                 iocg->wait_since = now->now;
1526
1527         if (WARN_ON_ONCE(ctx.vbudget >= 0))
1528                 return;
1529
1530         /* determine next wakeup, add a timer margin to guarantee chunking */
1531         vshortage = -ctx.vbudget;
1532         expires = now->now_ns +
1533                 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1534                 NSEC_PER_USEC;
1535         expires += ioc->timer_slack_ns;
1536
1537         /* if already active and close enough, don't bother */
1538         oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1539         if (hrtimer_is_queued(&iocg->waitq_timer) &&
1540             abs(oexpires - expires) <= ioc->timer_slack_ns)
1541                 return;
1542
1543         hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1544                                ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1545 }
1546
1547 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1548 {
1549         struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1550         bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1551         struct ioc_now now;
1552         unsigned long flags;
1553
1554         ioc_now(iocg->ioc, &now);
1555
1556         iocg_lock(iocg, pay_debt, &flags);
1557         iocg_kick_waitq(iocg, pay_debt, &now);
1558         iocg_unlock(iocg, pay_debt, &flags);
1559
1560         return HRTIMER_NORESTART;
1561 }
1562
1563 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1564 {
1565         u32 nr_met[2] = { };
1566         u32 nr_missed[2] = { };
1567         u64 rq_wait_ns = 0;
1568         int cpu, rw;
1569
1570         for_each_online_cpu(cpu) {
1571                 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1572                 u64 this_rq_wait_ns;
1573
1574                 for (rw = READ; rw <= WRITE; rw++) {
1575                         u32 this_met = local_read(&stat->missed[rw].nr_met);
1576                         u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1577
1578                         nr_met[rw] += this_met - stat->missed[rw].last_met;
1579                         nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1580                         stat->missed[rw].last_met = this_met;
1581                         stat->missed[rw].last_missed = this_missed;
1582                 }
1583
1584                 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1585                 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1586                 stat->last_rq_wait_ns = this_rq_wait_ns;
1587         }
1588
1589         for (rw = READ; rw <= WRITE; rw++) {
1590                 if (nr_met[rw] + nr_missed[rw])
1591                         missed_ppm_ar[rw] =
1592                                 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1593                                                    nr_met[rw] + nr_missed[rw]);
1594                 else
1595                         missed_ppm_ar[rw] = 0;
1596         }
1597
1598         *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1599                                    ioc->period_us * NSEC_PER_USEC);
1600 }
1601
1602 /* was iocg idle this period? */
1603 static bool iocg_is_idle(struct ioc_gq *iocg)
1604 {
1605         struct ioc *ioc = iocg->ioc;
1606
1607         /* did something get issued this period? */
1608         if (atomic64_read(&iocg->active_period) ==
1609             atomic64_read(&ioc->cur_period))
1610                 return false;
1611
1612         /* is something in flight? */
1613         if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1614                 return false;
1615
1616         return true;
1617 }
1618
1619 /*
1620  * Call this function on the target leaf @iocg's to build pre-order traversal
1621  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1622  * ->walk_list and the caller is responsible for dissolving the list after use.
1623  */
1624 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1625                                   struct list_head *inner_walk)
1626 {
1627         int lvl;
1628
1629         WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1630
1631         /* find the first ancestor which hasn't been visited yet */
1632         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1633                 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1634                         break;
1635         }
1636
1637         /* walk down and visit the inner nodes to get pre-order traversal */
1638         while (++lvl <= iocg->level - 1) {
1639                 struct ioc_gq *inner = iocg->ancestors[lvl];
1640
1641                 /* record traversal order */
1642                 list_add_tail(&inner->walk_list, inner_walk);
1643         }
1644 }
1645
1646 /* propagate the deltas to the parent */
1647 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1648 {
1649         if (iocg->level > 0) {
1650                 struct iocg_stat *parent_stat =
1651                         &iocg->ancestors[iocg->level - 1]->stat;
1652
1653                 parent_stat->usage_us +=
1654                         iocg->stat.usage_us - iocg->last_stat.usage_us;
1655                 parent_stat->wait_us +=
1656                         iocg->stat.wait_us - iocg->last_stat.wait_us;
1657                 parent_stat->indebt_us +=
1658                         iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1659                 parent_stat->indelay_us +=
1660                         iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1661         }
1662
1663         iocg->last_stat = iocg->stat;
1664 }
1665
1666 /* collect per-cpu counters and propagate the deltas to the parent */
1667 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1668 {
1669         struct ioc *ioc = iocg->ioc;
1670         u64 abs_vusage = 0;
1671         u64 vusage_delta;
1672         int cpu;
1673
1674         lockdep_assert_held(&iocg->ioc->lock);
1675
1676         /* collect per-cpu counters */
1677         for_each_possible_cpu(cpu) {
1678                 abs_vusage += local64_read(
1679                                 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1680         }
1681         vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1682         iocg->last_stat_abs_vusage = abs_vusage;
1683
1684         iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1685         iocg->stat.usage_us += iocg->usage_delta_us;
1686
1687         iocg_flush_stat_upward(iocg);
1688 }
1689
1690 /* get stat counters ready for reading on all active iocgs */
1691 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1692 {
1693         LIST_HEAD(inner_walk);
1694         struct ioc_gq *iocg, *tiocg;
1695
1696         /* flush leaves and build inner node walk list */
1697         list_for_each_entry(iocg, target_iocgs, active_list) {
1698                 iocg_flush_stat_leaf(iocg, now);
1699                 iocg_build_inner_walk(iocg, &inner_walk);
1700         }
1701
1702         /* keep flushing upwards by walking the inner list backwards */
1703         list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1704                 iocg_flush_stat_upward(iocg);
1705                 list_del_init(&iocg->walk_list);
1706         }
1707 }
1708
1709 /*
1710  * Determine what @iocg's hweight_inuse should be after donating unused
1711  * capacity. @hwm is the upper bound and used to signal no donation. This
1712  * function also throws away @iocg's excess budget.
1713  */
1714 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1715                                   u32 usage, struct ioc_now *now)
1716 {
1717         struct ioc *ioc = iocg->ioc;
1718         u64 vtime = atomic64_read(&iocg->vtime);
1719         s64 excess, delta, target, new_hwi;
1720
1721         /* debt handling owns inuse for debtors */
1722         if (iocg->abs_vdebt)
1723                 return 1;
1724
1725         /* see whether minimum margin requirement is met */
1726         if (waitqueue_active(&iocg->waitq) ||
1727             time_after64(vtime, now->vnow - ioc->margins.min))
1728                 return hwm;
1729
1730         /* throw away excess above target */
1731         excess = now->vnow - vtime - ioc->margins.target;
1732         if (excess > 0) {
1733                 atomic64_add(excess, &iocg->vtime);
1734                 atomic64_add(excess, &iocg->done_vtime);
1735                 vtime += excess;
1736                 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1737         }
1738
1739         /*
1740          * Let's say the distance between iocg's and device's vtimes as a
1741          * fraction of period duration is delta. Assuming that the iocg will
1742          * consume the usage determined above, we want to determine new_hwi so
1743          * that delta equals MARGIN_TARGET at the end of the next period.
1744          *
1745          * We need to execute usage worth of IOs while spending the sum of the
1746          * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1747          * (delta):
1748          *
1749          *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1750          *
1751          * Therefore, the new_hwi is:
1752          *
1753          *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1754          */
1755         delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1756                           now->vnow - ioc->period_at_vtime);
1757         target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1758         new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1759
1760         return clamp_t(s64, new_hwi, 1, hwm);
1761 }
1762
1763 /*
1764  * For work-conservation, an iocg which isn't using all of its share should
1765  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1766  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1767  *
1768  * #1 is mathematically simpler but has the drawback of requiring synchronous
1769  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1770  * change due to donation snapbacks as it has the possibility of grossly
1771  * overshooting what's allowed by the model and vrate.
1772  *
1773  * #2 is inherently safe with local operations. The donating iocg can easily
1774  * snap back to higher weights when needed without worrying about impacts on
1775  * other nodes as the impacts will be inherently correct. This also makes idle
1776  * iocg activations safe. The only effect activations have is decreasing
1777  * hweight_inuse of others, the right solution to which is for those iocgs to
1778  * snap back to higher weights.
1779  *
1780  * So, we go with #2. The challenge is calculating how each donating iocg's
1781  * inuse should be adjusted to achieve the target donation amounts. This is done
1782  * using Andy's method described in the following pdf.
1783  *
1784  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1785  *
1786  * Given the weights and target after-donation hweight_inuse values, Andy's
1787  * method determines how the proportional distribution should look like at each
1788  * sibling level to maintain the relative relationship between all non-donating
1789  * pairs. To roughly summarize, it divides the tree into donating and
1790  * non-donating parts, calculates global donation rate which is used to
1791  * determine the target hweight_inuse for each node, and then derives per-level
1792  * proportions.
1793  *
1794  * The following pdf shows that global distribution calculated this way can be
1795  * achieved by scaling inuse weights of donating leaves and propagating the
1796  * adjustments upwards proportionally.
1797  *
1798  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1799  *
1800  * Combining the above two, we can determine how each leaf iocg's inuse should
1801  * be adjusted to achieve the target donation.
1802  *
1803  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1804  *
1805  * The inline comments use symbols from the last pdf.
1806  *
1807  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1808  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1809  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1810  *   w is the weight of the node. w = w_f + w_t
1811  *   w_f is the non-donating portion of w. w_f = w * f / b
1812  *   w_b is the donating portion of w. w_t = w * t / b
1813  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1814  *   s_f and s_t are the non-donating and donating portions of s.
1815  *
1816  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1817  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1818  * after adjustments. Subscript r denotes the root node's values.
1819  */
1820 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1821 {
1822         LIST_HEAD(over_hwa);
1823         LIST_HEAD(inner_walk);
1824         struct ioc_gq *iocg, *tiocg, *root_iocg;
1825         u32 after_sum, over_sum, over_target, gamma;
1826
1827         /*
1828          * It's pretty unlikely but possible for the total sum of
1829          * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1830          * confuse the following calculations. If such condition is detected,
1831          * scale down everyone over its full share equally to keep the sum below
1832          * WEIGHT_ONE.
1833          */
1834         after_sum = 0;
1835         over_sum = 0;
1836         list_for_each_entry(iocg, surpluses, surplus_list) {
1837                 u32 hwa;
1838
1839                 current_hweight(iocg, &hwa, NULL);
1840                 after_sum += iocg->hweight_after_donation;
1841
1842                 if (iocg->hweight_after_donation > hwa) {
1843                         over_sum += iocg->hweight_after_donation;
1844                         list_add(&iocg->walk_list, &over_hwa);
1845                 }
1846         }
1847
1848         if (after_sum >= WEIGHT_ONE) {
1849                 /*
1850                  * The delta should be deducted from the over_sum, calculate
1851                  * target over_sum value.
1852                  */
1853                 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1854                 WARN_ON_ONCE(over_sum <= over_delta);
1855                 over_target = over_sum - over_delta;
1856         } else {
1857                 over_target = 0;
1858         }
1859
1860         list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1861                 if (over_target)
1862                         iocg->hweight_after_donation =
1863                                 div_u64((u64)iocg->hweight_after_donation *
1864                                         over_target, over_sum);
1865                 list_del_init(&iocg->walk_list);
1866         }
1867
1868         /*
1869          * Build pre-order inner node walk list and prepare for donation
1870          * adjustment calculations.
1871          */
1872         list_for_each_entry(iocg, surpluses, surplus_list) {
1873                 iocg_build_inner_walk(iocg, &inner_walk);
1874         }
1875
1876         root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1877         WARN_ON_ONCE(root_iocg->level > 0);
1878
1879         list_for_each_entry(iocg, &inner_walk, walk_list) {
1880                 iocg->child_adjusted_sum = 0;
1881                 iocg->hweight_donating = 0;
1882                 iocg->hweight_after_donation = 0;
1883         }
1884
1885         /*
1886          * Propagate the donating budget (b_t) and after donation budget (b'_t)
1887          * up the hierarchy.
1888          */
1889         list_for_each_entry(iocg, surpluses, surplus_list) {
1890                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1891
1892                 parent->hweight_donating += iocg->hweight_donating;
1893                 parent->hweight_after_donation += iocg->hweight_after_donation;
1894         }
1895
1896         list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1897                 if (iocg->level > 0) {
1898                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1899
1900                         parent->hweight_donating += iocg->hweight_donating;
1901                         parent->hweight_after_donation += iocg->hweight_after_donation;
1902                 }
1903         }
1904
1905         /*
1906          * Calculate inner hwa's (b) and make sure the donation values are
1907          * within the accepted ranges as we're doing low res calculations with
1908          * roundups.
1909          */
1910         list_for_each_entry(iocg, &inner_walk, walk_list) {
1911                 if (iocg->level) {
1912                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1913
1914                         iocg->hweight_active = DIV64_U64_ROUND_UP(
1915                                 (u64)parent->hweight_active * iocg->active,
1916                                 parent->child_active_sum);
1917
1918                 }
1919
1920                 iocg->hweight_donating = min(iocg->hweight_donating,
1921                                              iocg->hweight_active);
1922                 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1923                                                    iocg->hweight_donating - 1);
1924                 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1925                                  iocg->hweight_donating <= 1 ||
1926                                  iocg->hweight_after_donation == 0)) {
1927                         pr_warn("iocg: invalid donation weights in ");
1928                         pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1929                         pr_cont(": active=%u donating=%u after=%u\n",
1930                                 iocg->hweight_active, iocg->hweight_donating,
1931                                 iocg->hweight_after_donation);
1932                 }
1933         }
1934
1935         /*
1936          * Calculate the global donation rate (gamma) - the rate to adjust
1937          * non-donating budgets by.
1938          *
1939          * No need to use 64bit multiplication here as the first operand is
1940          * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1941          *
1942          * We know that there are beneficiary nodes and the sum of the donating
1943          * hweights can't be whole; however, due to the round-ups during hweight
1944          * calculations, root_iocg->hweight_donating might still end up equal to
1945          * or greater than whole. Limit the range when calculating the divider.
1946          *
1947          * gamma = (1 - t_r') / (1 - t_r)
1948          */
1949         gamma = DIV_ROUND_UP(
1950                 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1951                 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1952
1953         /*
1954          * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1955          * nodes.
1956          */
1957         list_for_each_entry(iocg, &inner_walk, walk_list) {
1958                 struct ioc_gq *parent;
1959                 u32 inuse, wpt, wptp;
1960                 u64 st, sf;
1961
1962                 if (iocg->level == 0) {
1963                         /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1964                         iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1965                                 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1966                                 WEIGHT_ONE - iocg->hweight_after_donation);
1967                         continue;
1968                 }
1969
1970                 parent = iocg->ancestors[iocg->level - 1];
1971
1972                 /* b' = gamma * b_f + b_t' */
1973                 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1974                         (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1975                         WEIGHT_ONE) + iocg->hweight_after_donation;
1976
1977                 /* w' = s' * b' / b'_p */
1978                 inuse = DIV64_U64_ROUND_UP(
1979                         (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1980                         parent->hweight_inuse);
1981
1982                 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1983                 st = DIV64_U64_ROUND_UP(
1984                         iocg->child_active_sum * iocg->hweight_donating,
1985                         iocg->hweight_active);
1986                 sf = iocg->child_active_sum - st;
1987                 wpt = DIV64_U64_ROUND_UP(
1988                         (u64)iocg->active * iocg->hweight_donating,
1989                         iocg->hweight_active);
1990                 wptp = DIV64_U64_ROUND_UP(
1991                         (u64)inuse * iocg->hweight_after_donation,
1992                         iocg->hweight_inuse);
1993
1994                 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1995         }
1996
1997         /*
1998          * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1999          * we can finally determine leaf adjustments.
2000          */
2001         list_for_each_entry(iocg, surpluses, surplus_list) {
2002                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2003                 u32 inuse;
2004
2005                 /*
2006                  * In-debt iocgs participated in the donation calculation with
2007                  * the minimum target hweight_inuse. Configuring inuse
2008                  * accordingly would work fine but debt handling expects
2009                  * @iocg->inuse stay at the minimum and we don't wanna
2010                  * interfere.
2011                  */
2012                 if (iocg->abs_vdebt) {
2013                         WARN_ON_ONCE(iocg->inuse > 1);
2014                         continue;
2015                 }
2016
2017                 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2018                 inuse = DIV64_U64_ROUND_UP(
2019                         parent->child_adjusted_sum * iocg->hweight_after_donation,
2020                         parent->hweight_inuse);
2021
2022                 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2023                                 iocg->inuse, inuse,
2024                                 iocg->hweight_inuse,
2025                                 iocg->hweight_after_donation);
2026
2027                 __propagate_weights(iocg, iocg->active, inuse, true, now);
2028         }
2029
2030         /* walk list should be dissolved after use */
2031         list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2032                 list_del_init(&iocg->walk_list);
2033 }
2034
2035 /*
2036  * A low weight iocg can amass a large amount of debt, for example, when
2037  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2038  * memory paired with a slow IO device, the debt can span multiple seconds or
2039  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2040  * up blocked paying its debt while the IO device is idle.
2041  *
2042  * The following protects against such cases. If the device has been
2043  * sufficiently idle for a while, the debts are halved and delays are
2044  * recalculated.
2045  */
2046 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2047                               struct ioc_now *now)
2048 {
2049         struct ioc_gq *iocg;
2050         u64 dur, usage_pct, nr_cycles;
2051
2052         /* if no debtor, reset the cycle */
2053         if (!nr_debtors) {
2054                 ioc->dfgv_period_at = now->now;
2055                 ioc->dfgv_period_rem = 0;
2056                 ioc->dfgv_usage_us_sum = 0;
2057                 return;
2058         }
2059
2060         /*
2061          * Debtors can pass through a lot of writes choking the device and we
2062          * don't want to be forgiving debts while the device is struggling from
2063          * write bursts. If we're missing latency targets, consider the device
2064          * fully utilized.
2065          */
2066         if (ioc->busy_level > 0)
2067                 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2068
2069         ioc->dfgv_usage_us_sum += usage_us_sum;
2070         if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2071                 return;
2072
2073         /*
2074          * At least DFGV_PERIOD has passed since the last period. Calculate the
2075          * average usage and reset the period counters.
2076          */
2077         dur = now->now - ioc->dfgv_period_at;
2078         usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2079
2080         ioc->dfgv_period_at = now->now;
2081         ioc->dfgv_usage_us_sum = 0;
2082
2083         /* if was too busy, reset everything */
2084         if (usage_pct > DFGV_USAGE_PCT) {
2085                 ioc->dfgv_period_rem = 0;
2086                 return;
2087         }
2088
2089         /*
2090          * Usage is lower than threshold. Let's forgive some debts. Debt
2091          * forgiveness runs off of the usual ioc timer but its period usually
2092          * doesn't match ioc's. Compensate the difference by performing the
2093          * reduction as many times as would fit in the duration since the last
2094          * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2095          * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2096          * reductions is doubled.
2097          */
2098         nr_cycles = dur + ioc->dfgv_period_rem;
2099         ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2100
2101         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2102                 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2103
2104                 if (!iocg->abs_vdebt && !iocg->delay)
2105                         continue;
2106
2107                 spin_lock(&iocg->waitq.lock);
2108
2109                 old_debt = iocg->abs_vdebt;
2110                 old_delay = iocg->delay;
2111
2112                 if (iocg->abs_vdebt)
2113                         iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2114                 if (iocg->delay)
2115                         iocg->delay = iocg->delay >> nr_cycles ?: 1;
2116
2117                 iocg_kick_waitq(iocg, true, now);
2118
2119                 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2120                                 old_debt, iocg->abs_vdebt,
2121                                 old_delay, iocg->delay);
2122
2123                 spin_unlock(&iocg->waitq.lock);
2124         }
2125 }
2126
2127 /*
2128  * Check the active iocgs' state to avoid oversleeping and deactive
2129  * idle iocgs.
2130  *
2131  * Since waiters determine the sleep durations based on the vrate
2132  * they saw at the time of sleep, if vrate has increased, some
2133  * waiters could be sleeping for too long. Wake up tardy waiters
2134  * which should have woken up in the last period and expire idle
2135  * iocgs.
2136  */
2137 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2138 {
2139         int nr_debtors = 0;
2140         struct ioc_gq *iocg, *tiocg;
2141
2142         list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2143                 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2144                     !iocg->delay && !iocg_is_idle(iocg))
2145                         continue;
2146
2147                 spin_lock(&iocg->waitq.lock);
2148
2149                 /* flush wait and indebt stat deltas */
2150                 if (iocg->wait_since) {
2151                         iocg->stat.wait_us += now->now - iocg->wait_since;
2152                         iocg->wait_since = now->now;
2153                 }
2154                 if (iocg->indebt_since) {
2155                         iocg->stat.indebt_us +=
2156                                 now->now - iocg->indebt_since;
2157                         iocg->indebt_since = now->now;
2158                 }
2159                 if (iocg->indelay_since) {
2160                         iocg->stat.indelay_us +=
2161                                 now->now - iocg->indelay_since;
2162                         iocg->indelay_since = now->now;
2163                 }
2164
2165                 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2166                     iocg->delay) {
2167                         /* might be oversleeping vtime / hweight changes, kick */
2168                         iocg_kick_waitq(iocg, true, now);
2169                         if (iocg->abs_vdebt || iocg->delay)
2170                                 nr_debtors++;
2171                 } else if (iocg_is_idle(iocg)) {
2172                         /* no waiter and idle, deactivate */
2173                         u64 vtime = atomic64_read(&iocg->vtime);
2174                         s64 excess;
2175
2176                         /*
2177                          * @iocg has been inactive for a full duration and will
2178                          * have a high budget. Account anything above target as
2179                          * error and throw away. On reactivation, it'll start
2180                          * with the target budget.
2181                          */
2182                         excess = now->vnow - vtime - ioc->margins.target;
2183                         if (excess > 0) {
2184                                 u32 old_hwi;
2185
2186                                 current_hweight(iocg, NULL, &old_hwi);
2187                                 ioc->vtime_err -= div64_u64(excess * old_hwi,
2188                                                             WEIGHT_ONE);
2189                         }
2190
2191                         TRACE_IOCG_PATH(iocg_idle, iocg, now,
2192                                         atomic64_read(&iocg->active_period),
2193                                         atomic64_read(&ioc->cur_period), vtime);
2194                         __propagate_weights(iocg, 0, 0, false, now);
2195                         list_del_init(&iocg->active_list);
2196                 }
2197
2198                 spin_unlock(&iocg->waitq.lock);
2199         }
2200
2201         commit_weights(ioc);
2202         return nr_debtors;
2203 }
2204
2205 static void ioc_timer_fn(struct timer_list *timer)
2206 {
2207         struct ioc *ioc = container_of(timer, struct ioc, timer);
2208         struct ioc_gq *iocg, *tiocg;
2209         struct ioc_now now;
2210         LIST_HEAD(surpluses);
2211         int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2212         u64 usage_us_sum = 0;
2213         u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2214         u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2215         u32 missed_ppm[2], rq_wait_pct;
2216         u64 period_vtime;
2217         int prev_busy_level;
2218
2219         /* how were the latencies during the period? */
2220         ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2221
2222         /* take care of active iocgs */
2223         spin_lock_irq(&ioc->lock);
2224
2225         ioc_now(ioc, &now);
2226
2227         period_vtime = now.vnow - ioc->period_at_vtime;
2228         if (WARN_ON_ONCE(!period_vtime)) {
2229                 spin_unlock_irq(&ioc->lock);
2230                 return;
2231         }
2232
2233         nr_debtors = ioc_check_iocgs(ioc, &now);
2234
2235         /*
2236          * Wait and indebt stat are flushed above and the donation calculation
2237          * below needs updated usage stat. Let's bring stat up-to-date.
2238          */
2239         iocg_flush_stat(&ioc->active_iocgs, &now);
2240
2241         /* calc usage and see whether some weights need to be moved around */
2242         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2243                 u64 vdone, vtime, usage_us;
2244                 u32 hw_active, hw_inuse;
2245
2246                 /*
2247                  * Collect unused and wind vtime closer to vnow to prevent
2248                  * iocgs from accumulating a large amount of budget.
2249                  */
2250                 vdone = atomic64_read(&iocg->done_vtime);
2251                 vtime = atomic64_read(&iocg->vtime);
2252                 current_hweight(iocg, &hw_active, &hw_inuse);
2253
2254                 /*
2255                  * Latency QoS detection doesn't account for IOs which are
2256                  * in-flight for longer than a period.  Detect them by
2257                  * comparing vdone against period start.  If lagging behind
2258                  * IOs from past periods, don't increase vrate.
2259                  */
2260                 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2261                     !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2262                     time_after64(vtime, vdone) &&
2263                     time_after64(vtime, now.vnow -
2264                                  MAX_LAGGING_PERIODS * period_vtime) &&
2265                     time_before64(vdone, now.vnow - period_vtime))
2266                         nr_lagging++;
2267
2268                 /*
2269                  * Determine absolute usage factoring in in-flight IOs to avoid
2270                  * high-latency completions appearing as idle.
2271                  */
2272                 usage_us = iocg->usage_delta_us;
2273                 usage_us_sum += usage_us;
2274
2275                 /* see whether there's surplus vtime */
2276                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2277                 if (hw_inuse < hw_active ||
2278                     (!waitqueue_active(&iocg->waitq) &&
2279                      time_before64(vtime, now.vnow - ioc->margins.low))) {
2280                         u32 hwa, old_hwi, hwm, new_hwi, usage;
2281                         u64 usage_dur;
2282
2283                         if (vdone != vtime) {
2284                                 u64 inflight_us = DIV64_U64_ROUND_UP(
2285                                         cost_to_abs_cost(vtime - vdone, hw_inuse),
2286                                         ioc->vtime_base_rate);
2287
2288                                 usage_us = max(usage_us, inflight_us);
2289                         }
2290
2291                         /* convert to hweight based usage ratio */
2292                         if (time_after64(iocg->activated_at, ioc->period_at))
2293                                 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2294                         else
2295                                 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2296
2297                         usage = clamp_t(u32,
2298                                 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2299                                                    usage_dur),
2300                                 1, WEIGHT_ONE);
2301
2302                         /*
2303                          * Already donating or accumulated enough to start.
2304                          * Determine the donation amount.
2305                          */
2306                         current_hweight(iocg, &hwa, &old_hwi);
2307                         hwm = current_hweight_max(iocg);
2308                         new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2309                                                          usage, &now);
2310                         /*
2311                          * Donation calculation assumes hweight_after_donation
2312                          * to be positive, a condition that a donor w/ hwa < 2
2313                          * can't meet. Don't bother with donation if hwa is
2314                          * below 2. It's not gonna make a meaningful difference
2315                          * anyway.
2316                          */
2317                         if (new_hwi < hwm && hwa >= 2) {
2318                                 iocg->hweight_donating = hwa;
2319                                 iocg->hweight_after_donation = new_hwi;
2320                                 list_add(&iocg->surplus_list, &surpluses);
2321                         } else if (!iocg->abs_vdebt) {
2322                                 /*
2323                                  * @iocg doesn't have enough to donate. Reset
2324                                  * its inuse to active.
2325                                  *
2326                                  * Don't reset debtors as their inuse's are
2327                                  * owned by debt handling. This shouldn't affect
2328                                  * donation calculuation in any meaningful way
2329                                  * as @iocg doesn't have a meaningful amount of
2330                                  * share anyway.
2331                                  */
2332                                 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2333                                                 iocg->inuse, iocg->active,
2334                                                 iocg->hweight_inuse, new_hwi);
2335
2336                                 __propagate_weights(iocg, iocg->active,
2337                                                     iocg->active, true, &now);
2338                                 nr_shortages++;
2339                         }
2340                 } else {
2341                         /* genuinely short on vtime */
2342                         nr_shortages++;
2343                 }
2344         }
2345
2346         if (!list_empty(&surpluses) && nr_shortages)
2347                 transfer_surpluses(&surpluses, &now);
2348
2349         commit_weights(ioc);
2350
2351         /* surplus list should be dissolved after use */
2352         list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2353                 list_del_init(&iocg->surplus_list);
2354
2355         /*
2356          * If q is getting clogged or we're missing too much, we're issuing
2357          * too much IO and should lower vtime rate.  If we're not missing
2358          * and experiencing shortages but not surpluses, we're too stingy
2359          * and should increase vtime rate.
2360          */
2361         prev_busy_level = ioc->busy_level;
2362         if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2363             missed_ppm[READ] > ppm_rthr ||
2364             missed_ppm[WRITE] > ppm_wthr) {
2365                 /* clearly missing QoS targets, slow down vrate */
2366                 ioc->busy_level = max(ioc->busy_level, 0);
2367                 ioc->busy_level++;
2368         } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2369                    missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2370                    missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2371                 /* QoS targets are being met with >25% margin */
2372                 if (nr_shortages) {
2373                         /*
2374                          * We're throttling while the device has spare
2375                          * capacity.  If vrate was being slowed down, stop.
2376                          */
2377                         ioc->busy_level = min(ioc->busy_level, 0);
2378
2379                         /*
2380                          * If there are IOs spanning multiple periods, wait
2381                          * them out before pushing the device harder.
2382                          */
2383                         if (!nr_lagging)
2384                                 ioc->busy_level--;
2385                 } else {
2386                         /*
2387                          * Nobody is being throttled and the users aren't
2388                          * issuing enough IOs to saturate the device.  We
2389                          * simply don't know how close the device is to
2390                          * saturation.  Coast.
2391                          */
2392                         ioc->busy_level = 0;
2393                 }
2394         } else {
2395                 /* inside the hysterisis margin, we're good */
2396                 ioc->busy_level = 0;
2397         }
2398
2399         ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2400
2401         ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2402                               prev_busy_level, missed_ppm);
2403
2404         ioc_refresh_params(ioc, false);
2405
2406         ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2407
2408         /*
2409          * This period is done.  Move onto the next one.  If nothing's
2410          * going on with the device, stop the timer.
2411          */
2412         atomic64_inc(&ioc->cur_period);
2413
2414         if (ioc->running != IOC_STOP) {
2415                 if (!list_empty(&ioc->active_iocgs)) {
2416                         ioc_start_period(ioc, &now);
2417                 } else {
2418                         ioc->busy_level = 0;
2419                         ioc->vtime_err = 0;
2420                         ioc->running = IOC_IDLE;
2421                 }
2422
2423                 ioc_refresh_vrate(ioc, &now);
2424         }
2425
2426         spin_unlock_irq(&ioc->lock);
2427 }
2428
2429 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2430                                       u64 abs_cost, struct ioc_now *now)
2431 {
2432         struct ioc *ioc = iocg->ioc;
2433         struct ioc_margins *margins = &ioc->margins;
2434         u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2435         u32 hwi, adj_step;
2436         s64 margin;
2437         u64 cost, new_inuse;
2438
2439         current_hweight(iocg, NULL, &hwi);
2440         old_hwi = hwi;
2441         cost = abs_cost_to_cost(abs_cost, hwi);
2442         margin = now->vnow - vtime - cost;
2443
2444         /* debt handling owns inuse for debtors */
2445         if (iocg->abs_vdebt)
2446                 return cost;
2447
2448         /*
2449          * We only increase inuse during period and do so if the margin has
2450          * deteriorated since the previous adjustment.
2451          */
2452         if (margin >= iocg->saved_margin || margin >= margins->low ||
2453             iocg->inuse == iocg->active)
2454                 return cost;
2455
2456         spin_lock_irq(&ioc->lock);
2457
2458         /* we own inuse only when @iocg is in the normal active state */
2459         if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2460                 spin_unlock_irq(&ioc->lock);
2461                 return cost;
2462         }
2463
2464         /*
2465          * Bump up inuse till @abs_cost fits in the existing budget.
2466          * adj_step must be determined after acquiring ioc->lock - we might
2467          * have raced and lost to another thread for activation and could
2468          * be reading 0 iocg->active before ioc->lock which will lead to
2469          * infinite loop.
2470          */
2471         new_inuse = iocg->inuse;
2472         adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2473         do {
2474                 new_inuse = new_inuse + adj_step;
2475                 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2476                 current_hweight(iocg, NULL, &hwi);
2477                 cost = abs_cost_to_cost(abs_cost, hwi);
2478         } while (time_after64(vtime + cost, now->vnow) &&
2479                  iocg->inuse != iocg->active);
2480
2481         spin_unlock_irq(&ioc->lock);
2482
2483         TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2484                         old_inuse, iocg->inuse, old_hwi, hwi);
2485
2486         return cost;
2487 }
2488
2489 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2490                                     bool is_merge, u64 *costp)
2491 {
2492         struct ioc *ioc = iocg->ioc;
2493         u64 coef_seqio, coef_randio, coef_page;
2494         u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2495         u64 seek_pages = 0;
2496         u64 cost = 0;
2497
2498         switch (bio_op(bio)) {
2499         case REQ_OP_READ:
2500                 coef_seqio      = ioc->params.lcoefs[LCOEF_RSEQIO];
2501                 coef_randio     = ioc->params.lcoefs[LCOEF_RRANDIO];
2502                 coef_page       = ioc->params.lcoefs[LCOEF_RPAGE];
2503                 break;
2504         case REQ_OP_WRITE:
2505                 coef_seqio      = ioc->params.lcoefs[LCOEF_WSEQIO];
2506                 coef_randio     = ioc->params.lcoefs[LCOEF_WRANDIO];
2507                 coef_page       = ioc->params.lcoefs[LCOEF_WPAGE];
2508                 break;
2509         default:
2510                 goto out;
2511         }
2512
2513         if (iocg->cursor) {
2514                 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2515                 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2516         }
2517
2518         if (!is_merge) {
2519                 if (seek_pages > LCOEF_RANDIO_PAGES) {
2520                         cost += coef_randio;
2521                 } else {
2522                         cost += coef_seqio;
2523                 }
2524         }
2525         cost += pages * coef_page;
2526 out:
2527         *costp = cost;
2528 }
2529
2530 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2531 {
2532         u64 cost;
2533
2534         calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2535         return cost;
2536 }
2537
2538 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2539                                          u64 *costp)
2540 {
2541         unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2542
2543         switch (req_op(rq)) {
2544         case REQ_OP_READ:
2545                 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2546                 break;
2547         case REQ_OP_WRITE:
2548                 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2549                 break;
2550         default:
2551                 *costp = 0;
2552         }
2553 }
2554
2555 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2556 {
2557         u64 cost;
2558
2559         calc_size_vtime_cost_builtin(rq, ioc, &cost);
2560         return cost;
2561 }
2562
2563 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2564 {
2565         struct blkcg_gq *blkg = bio->bi_blkg;
2566         struct ioc *ioc = rqos_to_ioc(rqos);
2567         struct ioc_gq *iocg = blkg_to_iocg(blkg);
2568         struct ioc_now now;
2569         struct iocg_wait wait;
2570         u64 abs_cost, cost, vtime;
2571         bool use_debt, ioc_locked;
2572         unsigned long flags;
2573
2574         /* bypass IOs if disabled, still initializing, or for root cgroup */
2575         if (!ioc->enabled || !iocg || !iocg->level)
2576                 return;
2577
2578         /* calculate the absolute vtime cost */
2579         abs_cost = calc_vtime_cost(bio, iocg, false);
2580         if (!abs_cost)
2581                 return;
2582
2583         if (!iocg_activate(iocg, &now))
2584                 return;
2585
2586         iocg->cursor = bio_end_sector(bio);
2587         vtime = atomic64_read(&iocg->vtime);
2588         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2589
2590         /*
2591          * If no one's waiting and within budget, issue right away.  The
2592          * tests are racy but the races aren't systemic - we only miss once
2593          * in a while which is fine.
2594          */
2595         if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2596             time_before_eq64(vtime + cost, now.vnow)) {
2597                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2598                 return;
2599         }
2600
2601         /*
2602          * We're over budget. This can be handled in two ways. IOs which may
2603          * cause priority inversions are punted to @ioc->aux_iocg and charged as
2604          * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2605          * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2606          * whether debt handling is needed and acquire locks accordingly.
2607          */
2608         use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2609         ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2610 retry_lock:
2611         iocg_lock(iocg, ioc_locked, &flags);
2612
2613         /*
2614          * @iocg must stay activated for debt and waitq handling. Deactivation
2615          * is synchronized against both ioc->lock and waitq.lock and we won't
2616          * get deactivated as long as we're waiting or has debt, so we're good
2617          * if we're activated here. In the unlikely cases that we aren't, just
2618          * issue the IO.
2619          */
2620         if (unlikely(list_empty(&iocg->active_list))) {
2621                 iocg_unlock(iocg, ioc_locked, &flags);
2622                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2623                 return;
2624         }
2625
2626         /*
2627          * We're over budget. If @bio has to be issued regardless, remember
2628          * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2629          * off the debt before waking more IOs.
2630          *
2631          * This way, the debt is continuously paid off each period with the
2632          * actual budget available to the cgroup. If we just wound vtime, we
2633          * would incorrectly use the current hw_inuse for the entire amount
2634          * which, for example, can lead to the cgroup staying blocked for a
2635          * long time even with substantially raised hw_inuse.
2636          *
2637          * An iocg with vdebt should stay online so that the timer can keep
2638          * deducting its vdebt and [de]activate use_delay mechanism
2639          * accordingly. We don't want to race against the timer trying to
2640          * clear them and leave @iocg inactive w/ dangling use_delay heavily
2641          * penalizing the cgroup and its descendants.
2642          */
2643         if (use_debt) {
2644                 iocg_incur_debt(iocg, abs_cost, &now);
2645                 if (iocg_kick_delay(iocg, &now))
2646                         blkcg_schedule_throttle(rqos->q->disk,
2647                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2648                 iocg_unlock(iocg, ioc_locked, &flags);
2649                 return;
2650         }
2651
2652         /* guarantee that iocgs w/ waiters have maximum inuse */
2653         if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2654                 if (!ioc_locked) {
2655                         iocg_unlock(iocg, false, &flags);
2656                         ioc_locked = true;
2657                         goto retry_lock;
2658                 }
2659                 propagate_weights(iocg, iocg->active, iocg->active, true,
2660                                   &now);
2661         }
2662
2663         /*
2664          * Append self to the waitq and schedule the wakeup timer if we're
2665          * the first waiter.  The timer duration is calculated based on the
2666          * current vrate.  vtime and hweight changes can make it too short
2667          * or too long.  Each wait entry records the absolute cost it's
2668          * waiting for to allow re-evaluation using a custom wait entry.
2669          *
2670          * If too short, the timer simply reschedules itself.  If too long,
2671          * the period timer will notice and trigger wakeups.
2672          *
2673          * All waiters are on iocg->waitq and the wait states are
2674          * synchronized using waitq.lock.
2675          */
2676         init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2677         wait.wait.private = current;
2678         wait.bio = bio;
2679         wait.abs_cost = abs_cost;
2680         wait.committed = false; /* will be set true by waker */
2681
2682         __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2683         iocg_kick_waitq(iocg, ioc_locked, &now);
2684
2685         iocg_unlock(iocg, ioc_locked, &flags);
2686
2687         while (true) {
2688                 set_current_state(TASK_UNINTERRUPTIBLE);
2689                 if (wait.committed)
2690                         break;
2691                 io_schedule();
2692         }
2693
2694         /* waker already committed us, proceed */
2695         finish_wait(&iocg->waitq, &wait.wait);
2696 }
2697
2698 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2699                            struct bio *bio)
2700 {
2701         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2702         struct ioc *ioc = rqos_to_ioc(rqos);
2703         sector_t bio_end = bio_end_sector(bio);
2704         struct ioc_now now;
2705         u64 vtime, abs_cost, cost;
2706         unsigned long flags;
2707
2708         /* bypass if disabled, still initializing, or for root cgroup */
2709         if (!ioc->enabled || !iocg || !iocg->level)
2710                 return;
2711
2712         abs_cost = calc_vtime_cost(bio, iocg, true);
2713         if (!abs_cost)
2714                 return;
2715
2716         ioc_now(ioc, &now);
2717
2718         vtime = atomic64_read(&iocg->vtime);
2719         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2720
2721         /* update cursor if backmerging into the request at the cursor */
2722         if (blk_rq_pos(rq) < bio_end &&
2723             blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2724                 iocg->cursor = bio_end;
2725
2726         /*
2727          * Charge if there's enough vtime budget and the existing request has
2728          * cost assigned.
2729          */
2730         if (rq->bio && rq->bio->bi_iocost_cost &&
2731             time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2732                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2733                 return;
2734         }
2735
2736         /*
2737          * Otherwise, account it as debt if @iocg is online, which it should
2738          * be for the vast majority of cases. See debt handling in
2739          * ioc_rqos_throttle() for details.
2740          */
2741         spin_lock_irqsave(&ioc->lock, flags);
2742         spin_lock(&iocg->waitq.lock);
2743
2744         if (likely(!list_empty(&iocg->active_list))) {
2745                 iocg_incur_debt(iocg, abs_cost, &now);
2746                 if (iocg_kick_delay(iocg, &now))
2747                         blkcg_schedule_throttle(rqos->q->disk,
2748                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2749         } else {
2750                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2751         }
2752
2753         spin_unlock(&iocg->waitq.lock);
2754         spin_unlock_irqrestore(&ioc->lock, flags);
2755 }
2756
2757 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2758 {
2759         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2760
2761         if (iocg && bio->bi_iocost_cost)
2762                 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2763 }
2764
2765 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2766 {
2767         struct ioc *ioc = rqos_to_ioc(rqos);
2768         struct ioc_pcpu_stat *ccs;
2769         u64 on_q_ns, rq_wait_ns, size_nsec;
2770         int pidx, rw;
2771
2772         if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2773                 return;
2774
2775         switch (req_op(rq)) {
2776         case REQ_OP_READ:
2777                 pidx = QOS_RLAT;
2778                 rw = READ;
2779                 break;
2780         case REQ_OP_WRITE:
2781                 pidx = QOS_WLAT;
2782                 rw = WRITE;
2783                 break;
2784         default:
2785                 return;
2786         }
2787
2788         on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2789         rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2790         size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2791
2792         ccs = get_cpu_ptr(ioc->pcpu_stat);
2793
2794         if (on_q_ns <= size_nsec ||
2795             on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2796                 local_inc(&ccs->missed[rw].nr_met);
2797         else
2798                 local_inc(&ccs->missed[rw].nr_missed);
2799
2800         local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2801
2802         put_cpu_ptr(ccs);
2803 }
2804
2805 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2806 {
2807         struct ioc *ioc = rqos_to_ioc(rqos);
2808
2809         spin_lock_irq(&ioc->lock);
2810         ioc_refresh_params(ioc, false);
2811         spin_unlock_irq(&ioc->lock);
2812 }
2813
2814 static void ioc_rqos_exit(struct rq_qos *rqos)
2815 {
2816         struct ioc *ioc = rqos_to_ioc(rqos);
2817
2818         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2819
2820         spin_lock_irq(&ioc->lock);
2821         ioc->running = IOC_STOP;
2822         spin_unlock_irq(&ioc->lock);
2823
2824         del_timer_sync(&ioc->timer);
2825         free_percpu(ioc->pcpu_stat);
2826         kfree(ioc);
2827 }
2828
2829 static struct rq_qos_ops ioc_rqos_ops = {
2830         .throttle = ioc_rqos_throttle,
2831         .merge = ioc_rqos_merge,
2832         .done_bio = ioc_rqos_done_bio,
2833         .done = ioc_rqos_done,
2834         .queue_depth_changed = ioc_rqos_queue_depth_changed,
2835         .exit = ioc_rqos_exit,
2836 };
2837
2838 static int blk_iocost_init(struct gendisk *disk)
2839 {
2840         struct request_queue *q = disk->queue;
2841         struct ioc *ioc;
2842         struct rq_qos *rqos;
2843         int i, cpu, ret;
2844
2845         ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2846         if (!ioc)
2847                 return -ENOMEM;
2848
2849         ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2850         if (!ioc->pcpu_stat) {
2851                 kfree(ioc);
2852                 return -ENOMEM;
2853         }
2854
2855         for_each_possible_cpu(cpu) {
2856                 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2857
2858                 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2859                         local_set(&ccs->missed[i].nr_met, 0);
2860                         local_set(&ccs->missed[i].nr_missed, 0);
2861                 }
2862                 local64_set(&ccs->rq_wait_ns, 0);
2863         }
2864
2865         rqos = &ioc->rqos;
2866         rqos->id = RQ_QOS_COST;
2867         rqos->ops = &ioc_rqos_ops;
2868         rqos->q = q;
2869
2870         spin_lock_init(&ioc->lock);
2871         timer_setup(&ioc->timer, ioc_timer_fn, 0);
2872         INIT_LIST_HEAD(&ioc->active_iocgs);
2873
2874         ioc->running = IOC_IDLE;
2875         ioc->vtime_base_rate = VTIME_PER_USEC;
2876         atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2877         seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2878         ioc->period_at = ktime_to_us(ktime_get());
2879         atomic64_set(&ioc->cur_period, 0);
2880         atomic_set(&ioc->hweight_gen, 0);
2881
2882         spin_lock_irq(&ioc->lock);
2883         ioc->autop_idx = AUTOP_INVALID;
2884         ioc_refresh_params(ioc, true);
2885         spin_unlock_irq(&ioc->lock);
2886
2887         /*
2888          * rqos must be added before activation to allow iocg_pd_init() to
2889          * lookup the ioc from q. This means that the rqos methods may get
2890          * called before policy activation completion, can't assume that the
2891          * target bio has an iocg associated and need to test for NULL iocg.
2892          */
2893         ret = rq_qos_add(q, rqos);
2894         if (ret)
2895                 goto err_free_ioc;
2896
2897         ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2898         if (ret)
2899                 goto err_del_qos;
2900         return 0;
2901
2902 err_del_qos:
2903         rq_qos_del(q, rqos);
2904 err_free_ioc:
2905         free_percpu(ioc->pcpu_stat);
2906         kfree(ioc);
2907         return ret;
2908 }
2909
2910 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2911 {
2912         struct ioc_cgrp *iocc;
2913
2914         iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2915         if (!iocc)
2916                 return NULL;
2917
2918         iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2919         return &iocc->cpd;
2920 }
2921
2922 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2923 {
2924         kfree(container_of(cpd, struct ioc_cgrp, cpd));
2925 }
2926
2927 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2928                                              struct blkcg *blkcg)
2929 {
2930         int levels = blkcg->css.cgroup->level + 1;
2931         struct ioc_gq *iocg;
2932
2933         iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2934         if (!iocg)
2935                 return NULL;
2936
2937         iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2938         if (!iocg->pcpu_stat) {
2939                 kfree(iocg);
2940                 return NULL;
2941         }
2942
2943         return &iocg->pd;
2944 }
2945
2946 static void ioc_pd_init(struct blkg_policy_data *pd)
2947 {
2948         struct ioc_gq *iocg = pd_to_iocg(pd);
2949         struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2950         struct ioc *ioc = q_to_ioc(blkg->q);
2951         struct ioc_now now;
2952         struct blkcg_gq *tblkg;
2953         unsigned long flags;
2954
2955         ioc_now(ioc, &now);
2956
2957         iocg->ioc = ioc;
2958         atomic64_set(&iocg->vtime, now.vnow);
2959         atomic64_set(&iocg->done_vtime, now.vnow);
2960         atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2961         INIT_LIST_HEAD(&iocg->active_list);
2962         INIT_LIST_HEAD(&iocg->walk_list);
2963         INIT_LIST_HEAD(&iocg->surplus_list);
2964         iocg->hweight_active = WEIGHT_ONE;
2965         iocg->hweight_inuse = WEIGHT_ONE;
2966
2967         init_waitqueue_head(&iocg->waitq);
2968         hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2969         iocg->waitq_timer.function = iocg_waitq_timer_fn;
2970
2971         iocg->level = blkg->blkcg->css.cgroup->level;
2972
2973         for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2974                 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2975                 iocg->ancestors[tiocg->level] = tiocg;
2976         }
2977
2978         spin_lock_irqsave(&ioc->lock, flags);
2979         weight_updated(iocg, &now);
2980         spin_unlock_irqrestore(&ioc->lock, flags);
2981 }
2982
2983 static void ioc_pd_free(struct blkg_policy_data *pd)
2984 {
2985         struct ioc_gq *iocg = pd_to_iocg(pd);
2986         struct ioc *ioc = iocg->ioc;
2987         unsigned long flags;
2988
2989         if (ioc) {
2990                 spin_lock_irqsave(&ioc->lock, flags);
2991
2992                 if (!list_empty(&iocg->active_list)) {
2993                         struct ioc_now now;
2994
2995                         ioc_now(ioc, &now);
2996                         propagate_weights(iocg, 0, 0, false, &now);
2997                         list_del_init(&iocg->active_list);
2998                 }
2999
3000                 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3001                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3002
3003                 spin_unlock_irqrestore(&ioc->lock, flags);
3004
3005                 hrtimer_cancel(&iocg->waitq_timer);
3006         }
3007         free_percpu(iocg->pcpu_stat);
3008         kfree(iocg);
3009 }
3010
3011 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3012 {
3013         struct ioc_gq *iocg = pd_to_iocg(pd);
3014         struct ioc *ioc = iocg->ioc;
3015
3016         if (!ioc->enabled)
3017                 return;
3018
3019         if (iocg->level == 0) {
3020                 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3021                         ioc->vtime_base_rate * 10000,
3022                         VTIME_PER_USEC);
3023                 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3024         }
3025
3026         seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3027
3028         if (blkcg_debug_stats)
3029                 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3030                         iocg->last_stat.wait_us,
3031                         iocg->last_stat.indebt_us,
3032                         iocg->last_stat.indelay_us);
3033 }
3034
3035 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3036                              int off)
3037 {
3038         const char *dname = blkg_dev_name(pd->blkg);
3039         struct ioc_gq *iocg = pd_to_iocg(pd);
3040
3041         if (dname && iocg->cfg_weight)
3042                 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3043         return 0;
3044 }
3045
3046
3047 static int ioc_weight_show(struct seq_file *sf, void *v)
3048 {
3049         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3050         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3051
3052         seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3053         blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3054                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3055         return 0;
3056 }
3057
3058 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3059                                 size_t nbytes, loff_t off)
3060 {
3061         struct blkcg *blkcg = css_to_blkcg(of_css(of));
3062         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3063         struct blkg_conf_ctx ctx;
3064         struct ioc_now now;
3065         struct ioc_gq *iocg;
3066         u32 v;
3067         int ret;
3068
3069         if (!strchr(buf, ':')) {
3070                 struct blkcg_gq *blkg;
3071
3072                 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3073                         return -EINVAL;
3074
3075                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3076                         return -EINVAL;
3077
3078                 spin_lock_irq(&blkcg->lock);
3079                 iocc->dfl_weight = v * WEIGHT_ONE;
3080                 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3081                         struct ioc_gq *iocg = blkg_to_iocg(blkg);
3082
3083                         if (iocg) {
3084                                 spin_lock(&iocg->ioc->lock);
3085                                 ioc_now(iocg->ioc, &now);
3086                                 weight_updated(iocg, &now);
3087                                 spin_unlock(&iocg->ioc->lock);
3088                         }
3089                 }
3090                 spin_unlock_irq(&blkcg->lock);
3091
3092                 return nbytes;
3093         }
3094
3095         ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3096         if (ret)
3097                 return ret;
3098
3099         iocg = blkg_to_iocg(ctx.blkg);
3100
3101         if (!strncmp(ctx.body, "default", 7)) {
3102                 v = 0;
3103         } else {
3104                 if (!sscanf(ctx.body, "%u", &v))
3105                         goto einval;
3106                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3107                         goto einval;
3108         }
3109
3110         spin_lock(&iocg->ioc->lock);
3111         iocg->cfg_weight = v * WEIGHT_ONE;
3112         ioc_now(iocg->ioc, &now);
3113         weight_updated(iocg, &now);
3114         spin_unlock(&iocg->ioc->lock);
3115
3116         blkg_conf_finish(&ctx);
3117         return nbytes;
3118
3119 einval:
3120         blkg_conf_finish(&ctx);
3121         return -EINVAL;
3122 }
3123
3124 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3125                           int off)
3126 {
3127         const char *dname = blkg_dev_name(pd->blkg);
3128         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3129
3130         if (!dname)
3131                 return 0;
3132
3133         seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3134                    dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3135                    ioc->params.qos[QOS_RPPM] / 10000,
3136                    ioc->params.qos[QOS_RPPM] % 10000 / 100,
3137                    ioc->params.qos[QOS_RLAT],
3138                    ioc->params.qos[QOS_WPPM] / 10000,
3139                    ioc->params.qos[QOS_WPPM] % 10000 / 100,
3140                    ioc->params.qos[QOS_WLAT],
3141                    ioc->params.qos[QOS_MIN] / 10000,
3142                    ioc->params.qos[QOS_MIN] % 10000 / 100,
3143                    ioc->params.qos[QOS_MAX] / 10000,
3144                    ioc->params.qos[QOS_MAX] % 10000 / 100);
3145         return 0;
3146 }
3147
3148 static int ioc_qos_show(struct seq_file *sf, void *v)
3149 {
3150         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3151
3152         blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3153                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3154         return 0;
3155 }
3156
3157 static const match_table_t qos_ctrl_tokens = {
3158         { QOS_ENABLE,           "enable=%u"     },
3159         { QOS_CTRL,             "ctrl=%s"       },
3160         { NR_QOS_CTRL_PARAMS,   NULL            },
3161 };
3162
3163 static const match_table_t qos_tokens = {
3164         { QOS_RPPM,             "rpct=%s"       },
3165         { QOS_RLAT,             "rlat=%u"       },
3166         { QOS_WPPM,             "wpct=%s"       },
3167         { QOS_WLAT,             "wlat=%u"       },
3168         { QOS_MIN,              "min=%s"        },
3169         { QOS_MAX,              "max=%s"        },
3170         { NR_QOS_PARAMS,        NULL            },
3171 };
3172
3173 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3174                              size_t nbytes, loff_t off)
3175 {
3176         struct block_device *bdev;
3177         struct gendisk *disk;
3178         struct ioc *ioc;
3179         u32 qos[NR_QOS_PARAMS];
3180         bool enable, user;
3181         char *p;
3182         int ret;
3183
3184         bdev = blkcg_conf_open_bdev(&input);
3185         if (IS_ERR(bdev))
3186                 return PTR_ERR(bdev);
3187
3188         disk = bdev->bd_disk;
3189         ioc = q_to_ioc(disk->queue);
3190         if (!ioc) {
3191                 ret = blk_iocost_init(disk);
3192                 if (ret)
3193                         goto err;
3194                 ioc = q_to_ioc(disk->queue);
3195         }
3196
3197         spin_lock_irq(&ioc->lock);
3198         memcpy(qos, ioc->params.qos, sizeof(qos));
3199         enable = ioc->enabled;
3200         user = ioc->user_qos_params;
3201         spin_unlock_irq(&ioc->lock);
3202
3203         while ((p = strsep(&input, " \t\n"))) {
3204                 substring_t args[MAX_OPT_ARGS];
3205                 char buf[32];
3206                 int tok;
3207                 s64 v;
3208
3209                 if (!*p)
3210                         continue;
3211
3212                 switch (match_token(p, qos_ctrl_tokens, args)) {
3213                 case QOS_ENABLE:
3214                         match_u64(&args[0], &v);
3215                         enable = v;
3216                         continue;
3217                 case QOS_CTRL:
3218                         match_strlcpy(buf, &args[0], sizeof(buf));
3219                         if (!strcmp(buf, "auto"))
3220                                 user = false;
3221                         else if (!strcmp(buf, "user"))
3222                                 user = true;
3223                         else
3224                                 goto einval;
3225                         continue;
3226                 }
3227
3228                 tok = match_token(p, qos_tokens, args);
3229                 switch (tok) {
3230                 case QOS_RPPM:
3231                 case QOS_WPPM:
3232                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3233                             sizeof(buf))
3234                                 goto einval;
3235                         if (cgroup_parse_float(buf, 2, &v))
3236                                 goto einval;
3237                         if (v < 0 || v > 10000)
3238                                 goto einval;
3239                         qos[tok] = v * 100;
3240                         break;
3241                 case QOS_RLAT:
3242                 case QOS_WLAT:
3243                         if (match_u64(&args[0], &v))
3244                                 goto einval;
3245                         qos[tok] = v;
3246                         break;
3247                 case QOS_MIN:
3248                 case QOS_MAX:
3249                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3250                             sizeof(buf))
3251                                 goto einval;
3252                         if (cgroup_parse_float(buf, 2, &v))
3253                                 goto einval;
3254                         if (v < 0)
3255                                 goto einval;
3256                         qos[tok] = clamp_t(s64, v * 100,
3257                                            VRATE_MIN_PPM, VRATE_MAX_PPM);
3258                         break;
3259                 default:
3260                         goto einval;
3261                 }
3262                 user = true;
3263         }
3264
3265         if (qos[QOS_MIN] > qos[QOS_MAX])
3266                 goto einval;
3267
3268         spin_lock_irq(&ioc->lock);
3269
3270         if (enable) {
3271                 blk_stat_enable_accounting(disk->queue);
3272                 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3273                 ioc->enabled = true;
3274         } else {
3275                 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3276                 ioc->enabled = false;
3277         }
3278
3279         if (user) {
3280                 memcpy(ioc->params.qos, qos, sizeof(qos));
3281                 ioc->user_qos_params = true;
3282         } else {
3283                 ioc->user_qos_params = false;
3284         }
3285
3286         ioc_refresh_params(ioc, true);
3287         spin_unlock_irq(&ioc->lock);
3288
3289         blkdev_put_no_open(bdev);
3290         return nbytes;
3291 einval:
3292         ret = -EINVAL;
3293 err:
3294         blkdev_put_no_open(bdev);
3295         return ret;
3296 }
3297
3298 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3299                                  struct blkg_policy_data *pd, int off)
3300 {
3301         const char *dname = blkg_dev_name(pd->blkg);
3302         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3303         u64 *u = ioc->params.i_lcoefs;
3304
3305         if (!dname)
3306                 return 0;
3307
3308         seq_printf(sf, "%s ctrl=%s model=linear "
3309                    "rbps=%llu rseqiops=%llu rrandiops=%llu "
3310                    "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3311                    dname, ioc->user_cost_model ? "user" : "auto",
3312                    u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3313                    u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3314         return 0;
3315 }
3316
3317 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3318 {
3319         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3320
3321         blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3322                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3323         return 0;
3324 }
3325
3326 static const match_table_t cost_ctrl_tokens = {
3327         { COST_CTRL,            "ctrl=%s"       },
3328         { COST_MODEL,           "model=%s"      },
3329         { NR_COST_CTRL_PARAMS,  NULL            },
3330 };
3331
3332 static const match_table_t i_lcoef_tokens = {
3333         { I_LCOEF_RBPS,         "rbps=%u"       },
3334         { I_LCOEF_RSEQIOPS,     "rseqiops=%u"   },
3335         { I_LCOEF_RRANDIOPS,    "rrandiops=%u"  },
3336         { I_LCOEF_WBPS,         "wbps=%u"       },
3337         { I_LCOEF_WSEQIOPS,     "wseqiops=%u"   },
3338         { I_LCOEF_WRANDIOPS,    "wrandiops=%u"  },
3339         { NR_I_LCOEFS,          NULL            },
3340 };
3341
3342 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3343                                     size_t nbytes, loff_t off)
3344 {
3345         struct block_device *bdev;
3346         struct ioc *ioc;
3347         u64 u[NR_I_LCOEFS];
3348         bool user;
3349         char *p;
3350         int ret;
3351
3352         bdev = blkcg_conf_open_bdev(&input);
3353         if (IS_ERR(bdev))
3354                 return PTR_ERR(bdev);
3355
3356         ioc = q_to_ioc(bdev_get_queue(bdev));
3357         if (!ioc) {
3358                 ret = blk_iocost_init(bdev->bd_disk);
3359                 if (ret)
3360                         goto err;
3361                 ioc = q_to_ioc(bdev_get_queue(bdev));
3362         }
3363
3364         spin_lock_irq(&ioc->lock);
3365         memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3366         user = ioc->user_cost_model;
3367         spin_unlock_irq(&ioc->lock);
3368
3369         while ((p = strsep(&input, " \t\n"))) {
3370                 substring_t args[MAX_OPT_ARGS];
3371                 char buf[32];
3372                 int tok;
3373                 u64 v;
3374
3375                 if (!*p)
3376                         continue;
3377
3378                 switch (match_token(p, cost_ctrl_tokens, args)) {
3379                 case COST_CTRL:
3380                         match_strlcpy(buf, &args[0], sizeof(buf));
3381                         if (!strcmp(buf, "auto"))
3382                                 user = false;
3383                         else if (!strcmp(buf, "user"))
3384                                 user = true;
3385                         else
3386                                 goto einval;
3387                         continue;
3388                 case COST_MODEL:
3389                         match_strlcpy(buf, &args[0], sizeof(buf));
3390                         if (strcmp(buf, "linear"))
3391                                 goto einval;
3392                         continue;
3393                 }
3394
3395                 tok = match_token(p, i_lcoef_tokens, args);
3396                 if (tok == NR_I_LCOEFS)
3397                         goto einval;
3398                 if (match_u64(&args[0], &v))
3399                         goto einval;
3400                 u[tok] = v;
3401                 user = true;
3402         }
3403
3404         spin_lock_irq(&ioc->lock);
3405         if (user) {
3406                 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3407                 ioc->user_cost_model = true;
3408         } else {
3409                 ioc->user_cost_model = false;
3410         }
3411         ioc_refresh_params(ioc, true);
3412         spin_unlock_irq(&ioc->lock);
3413
3414         blkdev_put_no_open(bdev);
3415         return nbytes;
3416
3417 einval:
3418         ret = -EINVAL;
3419 err:
3420         blkdev_put_no_open(bdev);
3421         return ret;
3422 }
3423
3424 static struct cftype ioc_files[] = {
3425         {
3426                 .name = "weight",
3427                 .flags = CFTYPE_NOT_ON_ROOT,
3428                 .seq_show = ioc_weight_show,
3429                 .write = ioc_weight_write,
3430         },
3431         {
3432                 .name = "cost.qos",
3433                 .flags = CFTYPE_ONLY_ON_ROOT,
3434                 .seq_show = ioc_qos_show,
3435                 .write = ioc_qos_write,
3436         },
3437         {
3438                 .name = "cost.model",
3439                 .flags = CFTYPE_ONLY_ON_ROOT,
3440                 .seq_show = ioc_cost_model_show,
3441                 .write = ioc_cost_model_write,
3442         },
3443         {}
3444 };
3445
3446 static struct blkcg_policy blkcg_policy_iocost = {
3447         .dfl_cftypes    = ioc_files,
3448         .cpd_alloc_fn   = ioc_cpd_alloc,
3449         .cpd_free_fn    = ioc_cpd_free,
3450         .pd_alloc_fn    = ioc_pd_alloc,
3451         .pd_init_fn     = ioc_pd_init,
3452         .pd_free_fn     = ioc_pd_free,
3453         .pd_stat_fn     = ioc_pd_stat,
3454 };
3455
3456 static int __init ioc_init(void)
3457 {
3458         return blkcg_policy_register(&blkcg_policy_iocost);
3459 }
3460
3461 static void __exit ioc_exit(void)
3462 {
3463         blkcg_policy_unregister(&blkcg_policy_iocost);
3464 }
3465
3466 module_init(ioc_init);
3467 module_exit(ioc_exit);