block: pass no-op callback to INIT_WORK().
[platform/kernel/linux-rpi.git] / block / blk-iolatency.c
1 /*
2  * Block rq-qos base io controller
3  *
4  * This works similar to wbt with a few exceptions
5  *
6  * - It's bio based, so the latency covers the whole block layer in addition to
7  *   the actual io.
8  * - We will throttle all IO that comes in here if we need to.
9  * - We use the mean latency over the 100ms window.  This is because writes can
10  *   be particularly fast, which could give us a false sense of the impact of
11  *   other workloads on our protected workload.
12  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13  *   that we can have as many outstanding bio's as we're allowed to.  Only at
14  *   throttle time do we pay attention to the actual queue depth.
15  *
16  * The hierarchy works like the cpu controller does, we track the latency at
17  * every configured node, and each configured node has it's own independent
18  * queue depth.  This means that we only care about our latency targets at the
19  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
20  * a group at the end of some other path if we're only configred at leaf level.
21  *
22  * Consider the following
23  *
24  *                   root blkg
25  *             /                     \
26  *        fast (target=5ms)     slow (target=10ms)
27  *         /     \                  /        \
28  *       a        b          normal(15ms)   unloved
29  *
30  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31  * an average latency of 5ms.  If it does then we will throttle the "slow"
32  * group.  In the case of "normal", if it exceeds its 15ms target, we will
33  * throttle "unloved", but nobody else.
34  *
35  * In this example "fast", "slow", and "normal" will be the only groups actually
36  * accounting their io latencies.  We have to walk up the heirarchy to the root
37  * on every submit and complete so we can do the appropriate stat recording and
38  * adjust the queue depth of ourselves if needed.
39  *
40  * There are 2 ways we throttle IO.
41  *
42  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
43  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
44  * to 1.  If the group is only ever submitting IO for itself then this is the
45  * only way we throttle.
46  *
47  * 2) Induced delay throttling.  This is for the case that a group is generating
48  * IO that has to be issued by the root cg to avoid priority inversion. So think
49  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
50  * of work done for us on behalf of the root cg and are being asked to scale
51  * down more then we induce a latency at userspace return.  We accumulate the
52  * total amount of time we need to be punished by doing
53  *
54  * total_time += min_lat_nsec - actual_io_completion
55  *
56  * and then at throttle time will do
57  *
58  * throttle_time = min(total_time, NSEC_PER_SEC)
59  *
60  * This induced delay will throttle back the activity that is generating the
61  * root cg issued io's, wethere that's some metadata intensive operation or the
62  * group is using so much memory that it is pushing us into swap.
63  *
64  * Copyright (C) 2018 Josef Bacik
65  */
66 #include <linux/kernel.h>
67 #include <linux/blk_types.h>
68 #include <linux/backing-dev.h>
69 #include <linux/module.h>
70 #include <linux/timer.h>
71 #include <linux/memcontrol.h>
72 #include <linux/sched/loadavg.h>
73 #include <linux/sched/signal.h>
74 #include <trace/events/block.h>
75 #include <linux/blk-mq.h>
76 #include "blk-rq-qos.h"
77 #include "blk-stat.h"
78 #include "blk.h"
79
80 #define DEFAULT_SCALE_COOKIE 1000000U
81
82 static struct blkcg_policy blkcg_policy_iolatency;
83 struct iolatency_grp;
84
85 struct blk_iolatency {
86         struct rq_qos rqos;
87         struct timer_list timer;
88         atomic_t enabled;
89 };
90
91 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
92 {
93         return container_of(rqos, struct blk_iolatency, rqos);
94 }
95
96 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
97 {
98         return atomic_read(&blkiolat->enabled) > 0;
99 }
100
101 struct child_latency_info {
102         spinlock_t lock;
103
104         /* Last time we adjusted the scale of everybody. */
105         u64 last_scale_event;
106
107         /* The latency that we missed. */
108         u64 scale_lat;
109
110         /* Total io's from all of our children for the last summation. */
111         u64 nr_samples;
112
113         /* The guy who actually changed the latency numbers. */
114         struct iolatency_grp *scale_grp;
115
116         /* Cookie to tell if we need to scale up or down. */
117         atomic_t scale_cookie;
118 };
119
120 struct iolatency_grp {
121         struct blkg_policy_data pd;
122         struct blk_rq_stat __percpu *stats;
123         struct blk_iolatency *blkiolat;
124         struct rq_depth rq_depth;
125         struct rq_wait rq_wait;
126         atomic64_t window_start;
127         atomic_t scale_cookie;
128         u64 min_lat_nsec;
129         u64 cur_win_nsec;
130
131         /* total running average of our io latency. */
132         u64 lat_avg;
133
134         /* Our current number of IO's for the last summation. */
135         u64 nr_samples;
136
137         struct child_latency_info child_lat;
138 };
139
140 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
141 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
142 /*
143  * These are the constants used to fake the fixed-point moving average
144  * calculation just like load average.  The call to CALC_LOAD folds
145  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
146  * window size is bucketed to try to approximately calculate average
147  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
148  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
149  * periods extend the most recent window.
150  */
151 #define BLKIOLATENCY_NR_EXP_FACTORS 5
152 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
153                                       (BLKIOLATENCY_NR_EXP_FACTORS - 1))
154 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
155         2045, // exp(1/600) - 600 samples
156         2039, // exp(1/240) - 240 samples
157         2031, // exp(1/120) - 120 samples
158         2023, // exp(1/80)  - 80 samples
159         2014, // exp(1/60)  - 60 samples
160 };
161
162 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
163 {
164         return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
165 }
166
167 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
168 {
169         return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
170 }
171
172 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
173 {
174         return pd_to_blkg(&iolat->pd);
175 }
176
177 static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
178                                        wait_queue_entry_t *wait,
179                                        bool first_block)
180 {
181         struct rq_wait *rqw = &iolat->rq_wait;
182
183         if (first_block && waitqueue_active(&rqw->wait) &&
184             rqw->wait.head.next != &wait->entry)
185                 return false;
186         return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
187 }
188
189 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
190                                        struct iolatency_grp *iolat,
191                                        spinlock_t *lock, bool issue_as_root,
192                                        bool use_memdelay)
193         __releases(lock)
194         __acquires(lock)
195 {
196         struct rq_wait *rqw = &iolat->rq_wait;
197         unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
198         DEFINE_WAIT(wait);
199         bool first_block = true;
200
201         if (use_delay)
202                 blkcg_schedule_throttle(rqos->q, use_memdelay);
203
204         /*
205          * To avoid priority inversions we want to just take a slot if we are
206          * issuing as root.  If we're being killed off there's no point in
207          * delaying things, we may have been killed by OOM so throttling may
208          * make recovery take even longer, so just let the IO's through so the
209          * task can go away.
210          */
211         if (issue_as_root || fatal_signal_pending(current)) {
212                 atomic_inc(&rqw->inflight);
213                 return;
214         }
215
216         if (iolatency_may_queue(iolat, &wait, first_block))
217                 return;
218
219         do {
220                 prepare_to_wait_exclusive(&rqw->wait, &wait,
221                                           TASK_UNINTERRUPTIBLE);
222
223                 if (iolatency_may_queue(iolat, &wait, first_block))
224                         break;
225                 first_block = false;
226
227                 if (lock) {
228                         spin_unlock_irq(lock);
229                         io_schedule();
230                         spin_lock_irq(lock);
231                 } else {
232                         io_schedule();
233                 }
234         } while (1);
235
236         finish_wait(&rqw->wait, &wait);
237 }
238
239 #define SCALE_DOWN_FACTOR 2
240 #define SCALE_UP_FACTOR 4
241
242 static inline unsigned long scale_amount(unsigned long qd, bool up)
243 {
244         return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
245 }
246
247 /*
248  * We scale the qd down faster than we scale up, so we need to use this helper
249  * to adjust the scale_cookie accordingly so we don't prematurely get
250  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
251  *
252  * Each group has their own local copy of the last scale cookie they saw, so if
253  * the global scale cookie goes up or down they know which way they need to go
254  * based on their last knowledge of it.
255  */
256 static void scale_cookie_change(struct blk_iolatency *blkiolat,
257                                 struct child_latency_info *lat_info,
258                                 bool up)
259 {
260         unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
261         unsigned long scale = scale_amount(qd, up);
262         unsigned long old = atomic_read(&lat_info->scale_cookie);
263         unsigned long max_scale = qd << 1;
264         unsigned long diff = 0;
265
266         if (old < DEFAULT_SCALE_COOKIE)
267                 diff = DEFAULT_SCALE_COOKIE - old;
268
269         if (up) {
270                 if (scale + old > DEFAULT_SCALE_COOKIE)
271                         atomic_set(&lat_info->scale_cookie,
272                                    DEFAULT_SCALE_COOKIE);
273                 else if (diff > qd)
274                         atomic_inc(&lat_info->scale_cookie);
275                 else
276                         atomic_add(scale, &lat_info->scale_cookie);
277         } else {
278                 /*
279                  * We don't want to dig a hole so deep that it takes us hours to
280                  * dig out of it.  Just enough that we don't throttle/unthrottle
281                  * with jagged workloads but can still unthrottle once pressure
282                  * has sufficiently dissipated.
283                  */
284                 if (diff > qd) {
285                         if (diff < max_scale)
286                                 atomic_dec(&lat_info->scale_cookie);
287                 } else {
288                         atomic_sub(scale, &lat_info->scale_cookie);
289                 }
290         }
291 }
292
293 /*
294  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
295  * queue depth at a time so we don't get wild swings and hopefully dial in to
296  * fairer distribution of the overall queue depth.
297  */
298 static void scale_change(struct iolatency_grp *iolat, bool up)
299 {
300         unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
301         unsigned long scale = scale_amount(qd, up);
302         unsigned long old = iolat->rq_depth.max_depth;
303         bool changed = false;
304
305         if (old > qd)
306                 old = qd;
307
308         if (up) {
309                 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
310                         return;
311
312                 if (old < qd) {
313                         changed = true;
314                         old += scale;
315                         old = min(old, qd);
316                         iolat->rq_depth.max_depth = old;
317                         wake_up_all(&iolat->rq_wait.wait);
318                 }
319         } else if (old > 1) {
320                 old >>= 1;
321                 changed = true;
322                 iolat->rq_depth.max_depth = max(old, 1UL);
323         }
324 }
325
326 /* Check our parent and see if the scale cookie has changed. */
327 static void check_scale_change(struct iolatency_grp *iolat)
328 {
329         struct iolatency_grp *parent;
330         struct child_latency_info *lat_info;
331         unsigned int cur_cookie;
332         unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
333         u64 scale_lat;
334         unsigned int old;
335         int direction = 0;
336
337         if (lat_to_blkg(iolat)->parent == NULL)
338                 return;
339
340         parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
341         if (!parent)
342                 return;
343
344         lat_info = &parent->child_lat;
345         cur_cookie = atomic_read(&lat_info->scale_cookie);
346         scale_lat = READ_ONCE(lat_info->scale_lat);
347
348         if (cur_cookie < our_cookie)
349                 direction = -1;
350         else if (cur_cookie > our_cookie)
351                 direction = 1;
352         else
353                 return;
354
355         old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
356
357         /* Somebody beat us to the punch, just bail. */
358         if (old != our_cookie)
359                 return;
360
361         if (direction < 0 && iolat->min_lat_nsec) {
362                 u64 samples_thresh;
363
364                 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
365                         return;
366
367                 /*
368                  * Sometimes high priority groups are their own worst enemy, so
369                  * instead of taking it out on some poor other group that did 5%
370                  * or less of the IO's for the last summation just skip this
371                  * scale down event.
372                  */
373                 samples_thresh = lat_info->nr_samples * 5;
374                 samples_thresh = div64_u64(samples_thresh, 100);
375                 if (iolat->nr_samples <= samples_thresh)
376                         return;
377         }
378
379         /* We're as low as we can go. */
380         if (iolat->rq_depth.max_depth == 1 && direction < 0) {
381                 blkcg_use_delay(lat_to_blkg(iolat));
382                 return;
383         }
384
385         /* We're back to the default cookie, unthrottle all the things. */
386         if (cur_cookie == DEFAULT_SCALE_COOKIE) {
387                 blkcg_clear_delay(lat_to_blkg(iolat));
388                 iolat->rq_depth.max_depth = UINT_MAX;
389                 wake_up_all(&iolat->rq_wait.wait);
390                 return;
391         }
392
393         scale_change(iolat, direction > 0);
394 }
395
396 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
397                                      spinlock_t *lock)
398 {
399         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
400         struct blkcg *blkcg;
401         struct blkcg_gq *blkg;
402         struct request_queue *q = rqos->q;
403         bool issue_as_root = bio_issue_as_root_blkg(bio);
404
405         if (!blk_iolatency_enabled(blkiolat))
406                 return;
407
408         rcu_read_lock();
409         blkcg = bio_blkcg(bio);
410         bio_associate_blkcg(bio, &blkcg->css);
411         blkg = blkg_lookup(blkcg, q);
412         if (unlikely(!blkg)) {
413                 if (!lock)
414                         spin_lock_irq(q->queue_lock);
415                 blkg = blkg_lookup_create(blkcg, q);
416                 if (IS_ERR(blkg))
417                         blkg = NULL;
418                 if (!lock)
419                         spin_unlock_irq(q->queue_lock);
420         }
421         if (!blkg)
422                 goto out;
423
424         bio_issue_init(&bio->bi_issue, bio_sectors(bio));
425         bio_associate_blkg(bio, blkg);
426 out:
427         rcu_read_unlock();
428         while (blkg && blkg->parent) {
429                 struct iolatency_grp *iolat = blkg_to_lat(blkg);
430                 if (!iolat) {
431                         blkg = blkg->parent;
432                         continue;
433                 }
434
435                 check_scale_change(iolat);
436                 __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
437                                      (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
438                 blkg = blkg->parent;
439         }
440         if (!timer_pending(&blkiolat->timer))
441                 mod_timer(&blkiolat->timer, jiffies + HZ);
442 }
443
444 static void iolatency_record_time(struct iolatency_grp *iolat,
445                                   struct bio_issue *issue, u64 now,
446                                   bool issue_as_root)
447 {
448         struct blk_rq_stat *rq_stat;
449         u64 start = bio_issue_time(issue);
450         u64 req_time;
451
452         /*
453          * Have to do this so we are truncated to the correct time that our
454          * issue is truncated to.
455          */
456         now = __bio_issue_time(now);
457
458         if (now <= start)
459                 return;
460
461         req_time = now - start;
462
463         /*
464          * We don't want to count issue_as_root bio's in the cgroups latency
465          * statistics as it could skew the numbers downwards.
466          */
467         if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
468                 u64 sub = iolat->min_lat_nsec;
469                 if (req_time < sub)
470                         blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
471                 return;
472         }
473
474         rq_stat = get_cpu_ptr(iolat->stats);
475         blk_rq_stat_add(rq_stat, req_time);
476         put_cpu_ptr(rq_stat);
477 }
478
479 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
480 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
481
482 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
483 {
484         struct blkcg_gq *blkg = lat_to_blkg(iolat);
485         struct iolatency_grp *parent;
486         struct child_latency_info *lat_info;
487         struct blk_rq_stat stat;
488         unsigned long flags;
489         int cpu, exp_idx;
490
491         blk_rq_stat_init(&stat);
492         preempt_disable();
493         for_each_online_cpu(cpu) {
494                 struct blk_rq_stat *s;
495                 s = per_cpu_ptr(iolat->stats, cpu);
496                 blk_rq_stat_sum(&stat, s);
497                 blk_rq_stat_init(s);
498         }
499         preempt_enable();
500
501         parent = blkg_to_lat(blkg->parent);
502         if (!parent)
503                 return;
504
505         lat_info = &parent->child_lat;
506
507         /*
508          * CALC_LOAD takes in a number stored in fixed point representation.
509          * Because we are using this for IO time in ns, the values stored
510          * are significantly larger than the FIXED_1 denominator (2048).
511          * Therefore, rounding errors in the calculation are negligible and
512          * can be ignored.
513          */
514         exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
515                         div64_u64(iolat->cur_win_nsec,
516                                   BLKIOLATENCY_EXP_BUCKET_SIZE));
517         CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
518
519         /* Everything is ok and we don't need to adjust the scale. */
520         if (stat.mean <= iolat->min_lat_nsec &&
521             atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
522                 return;
523
524         /* Somebody beat us to the punch, just bail. */
525         spin_lock_irqsave(&lat_info->lock, flags);
526         lat_info->nr_samples -= iolat->nr_samples;
527         lat_info->nr_samples += stat.nr_samples;
528         iolat->nr_samples = stat.nr_samples;
529
530         if ((lat_info->last_scale_event >= now ||
531             now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
532             lat_info->scale_lat <= iolat->min_lat_nsec)
533                 goto out;
534
535         if (stat.mean <= iolat->min_lat_nsec &&
536             stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
537                 if (lat_info->scale_grp == iolat) {
538                         lat_info->last_scale_event = now;
539                         scale_cookie_change(iolat->blkiolat, lat_info, true);
540                 }
541         } else if (stat.mean > iolat->min_lat_nsec) {
542                 lat_info->last_scale_event = now;
543                 if (!lat_info->scale_grp ||
544                     lat_info->scale_lat > iolat->min_lat_nsec) {
545                         WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
546                         lat_info->scale_grp = iolat;
547                 }
548                 scale_cookie_change(iolat->blkiolat, lat_info, false);
549         }
550 out:
551         spin_unlock_irqrestore(&lat_info->lock, flags);
552 }
553
554 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
555 {
556         struct blkcg_gq *blkg;
557         struct rq_wait *rqw;
558         struct iolatency_grp *iolat;
559         u64 window_start;
560         u64 now = ktime_to_ns(ktime_get());
561         bool issue_as_root = bio_issue_as_root_blkg(bio);
562         bool enabled = false;
563
564         blkg = bio->bi_blkg;
565         if (!blkg)
566                 return;
567
568         iolat = blkg_to_lat(bio->bi_blkg);
569         if (!iolat)
570                 return;
571
572         enabled = blk_iolatency_enabled(iolat->blkiolat);
573         if (!enabled)
574                 return;
575
576         while (blkg && blkg->parent) {
577                 iolat = blkg_to_lat(blkg);
578                 if (!iolat) {
579                         blkg = blkg->parent;
580                         continue;
581                 }
582                 rqw = &iolat->rq_wait;
583
584                 atomic_dec(&rqw->inflight);
585                 if (iolat->min_lat_nsec == 0)
586                         goto next;
587                 iolatency_record_time(iolat, &bio->bi_issue, now,
588                                       issue_as_root);
589                 window_start = atomic64_read(&iolat->window_start);
590                 if (now > window_start &&
591                     (now - window_start) >= iolat->cur_win_nsec) {
592                         if (atomic64_cmpxchg(&iolat->window_start,
593                                         window_start, now) == window_start)
594                                 iolatency_check_latencies(iolat, now);
595                 }
596 next:
597                 wake_up(&rqw->wait);
598                 blkg = blkg->parent;
599         }
600 }
601
602 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
603 {
604         struct blkcg_gq *blkg;
605
606         blkg = bio->bi_blkg;
607         while (blkg && blkg->parent) {
608                 struct rq_wait *rqw;
609                 struct iolatency_grp *iolat;
610
611                 iolat = blkg_to_lat(blkg);
612                 if (!iolat)
613                         goto next;
614
615                 rqw = &iolat->rq_wait;
616                 atomic_dec(&rqw->inflight);
617                 wake_up(&rqw->wait);
618 next:
619                 blkg = blkg->parent;
620         }
621 }
622
623 static void blkcg_iolatency_exit(struct rq_qos *rqos)
624 {
625         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
626
627         del_timer_sync(&blkiolat->timer);
628         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
629         kfree(blkiolat);
630 }
631
632 static struct rq_qos_ops blkcg_iolatency_ops = {
633         .throttle = blkcg_iolatency_throttle,
634         .cleanup = blkcg_iolatency_cleanup,
635         .done_bio = blkcg_iolatency_done_bio,
636         .exit = blkcg_iolatency_exit,
637 };
638
639 static void blkiolatency_timer_fn(struct timer_list *t)
640 {
641         struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
642         struct blkcg_gq *blkg;
643         struct cgroup_subsys_state *pos_css;
644         u64 now = ktime_to_ns(ktime_get());
645
646         rcu_read_lock();
647         blkg_for_each_descendant_pre(blkg, pos_css,
648                                      blkiolat->rqos.q->root_blkg) {
649                 struct iolatency_grp *iolat;
650                 struct child_latency_info *lat_info;
651                 unsigned long flags;
652                 u64 cookie;
653
654                 /*
655                  * We could be exiting, don't access the pd unless we have a
656                  * ref on the blkg.
657                  */
658                 if (!blkg_try_get(blkg))
659                         continue;
660
661                 iolat = blkg_to_lat(blkg);
662                 if (!iolat)
663                         goto next;
664
665                 lat_info = &iolat->child_lat;
666                 cookie = atomic_read(&lat_info->scale_cookie);
667
668                 if (cookie >= DEFAULT_SCALE_COOKIE)
669                         goto next;
670
671                 spin_lock_irqsave(&lat_info->lock, flags);
672                 if (lat_info->last_scale_event >= now)
673                         goto next_lock;
674
675                 /*
676                  * We scaled down but don't have a scale_grp, scale up and carry
677                  * on.
678                  */
679                 if (lat_info->scale_grp == NULL) {
680                         scale_cookie_change(iolat->blkiolat, lat_info, true);
681                         goto next_lock;
682                 }
683
684                 /*
685                  * It's been 5 seconds since our last scale event, clear the
686                  * scale grp in case the group that needed the scale down isn't
687                  * doing any IO currently.
688                  */
689                 if (now - lat_info->last_scale_event >=
690                     ((u64)NSEC_PER_SEC * 5))
691                         lat_info->scale_grp = NULL;
692 next_lock:
693                 spin_unlock_irqrestore(&lat_info->lock, flags);
694 next:
695                 blkg_put(blkg);
696         }
697         rcu_read_unlock();
698 }
699
700 int blk_iolatency_init(struct request_queue *q)
701 {
702         struct blk_iolatency *blkiolat;
703         struct rq_qos *rqos;
704         int ret;
705
706         blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
707         if (!blkiolat)
708                 return -ENOMEM;
709
710         rqos = &blkiolat->rqos;
711         rqos->id = RQ_QOS_CGROUP;
712         rqos->ops = &blkcg_iolatency_ops;
713         rqos->q = q;
714
715         rq_qos_add(q, rqos);
716
717         ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
718         if (ret) {
719                 rq_qos_del(q, rqos);
720                 kfree(blkiolat);
721                 return ret;
722         }
723
724         timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
725
726         return 0;
727 }
728
729 /*
730  * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
731  * return 0.
732  */
733 static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
734 {
735         struct iolatency_grp *iolat = blkg_to_lat(blkg);
736         u64 oldval = iolat->min_lat_nsec;
737
738         iolat->min_lat_nsec = val;
739         iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
740         iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
741                                     BLKIOLATENCY_MAX_WIN_SIZE);
742
743         if (!oldval && val)
744                 return 1;
745         if (oldval && !val)
746                 return -1;
747         return 0;
748 }
749
750 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
751 {
752         if (blkg->parent) {
753                 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
754                 struct child_latency_info *lat_info;
755                 if (!iolat)
756                         return;
757
758                 lat_info = &iolat->child_lat;
759                 spin_lock(&lat_info->lock);
760                 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
761                 lat_info->last_scale_event = 0;
762                 lat_info->scale_grp = NULL;
763                 lat_info->scale_lat = 0;
764                 spin_unlock(&lat_info->lock);
765         }
766 }
767
768 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
769                              size_t nbytes, loff_t off)
770 {
771         struct blkcg *blkcg = css_to_blkcg(of_css(of));
772         struct blkcg_gq *blkg;
773         struct blk_iolatency *blkiolat;
774         struct blkg_conf_ctx ctx;
775         struct iolatency_grp *iolat;
776         char *p, *tok;
777         u64 lat_val = 0;
778         u64 oldval;
779         int ret;
780         int enable = 0;
781
782         ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
783         if (ret)
784                 return ret;
785
786         iolat = blkg_to_lat(ctx.blkg);
787         blkiolat = iolat->blkiolat;
788         p = ctx.body;
789
790         ret = -EINVAL;
791         while ((tok = strsep(&p, " "))) {
792                 char key[16];
793                 char val[21];   /* 18446744073709551616 */
794
795                 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
796                         goto out;
797
798                 if (!strcmp(key, "target")) {
799                         u64 v;
800
801                         if (!strcmp(val, "max"))
802                                 lat_val = 0;
803                         else if (sscanf(val, "%llu", &v) == 1)
804                                 lat_val = v * NSEC_PER_USEC;
805                         else
806                                 goto out;
807                 } else {
808                         goto out;
809                 }
810         }
811
812         /* Walk up the tree to see if our new val is lower than it should be. */
813         blkg = ctx.blkg;
814         oldval = iolat->min_lat_nsec;
815
816         enable = iolatency_set_min_lat_nsec(blkg, lat_val);
817         if (enable) {
818                 WARN_ON_ONCE(!blk_get_queue(blkg->q));
819                 blkg_get(blkg);
820         }
821
822         if (oldval != iolat->min_lat_nsec) {
823                 iolatency_clear_scaling(blkg);
824         }
825
826         ret = 0;
827 out:
828         blkg_conf_finish(&ctx);
829         if (ret == 0 && enable) {
830                 struct iolatency_grp *tmp = blkg_to_lat(blkg);
831                 struct blk_iolatency *blkiolat = tmp->blkiolat;
832
833                 blk_mq_freeze_queue(blkg->q);
834
835                 if (enable == 1)
836                         atomic_inc(&blkiolat->enabled);
837                 else if (enable == -1)
838                         atomic_dec(&blkiolat->enabled);
839                 else
840                         WARN_ON_ONCE(1);
841
842                 blk_mq_unfreeze_queue(blkg->q);
843
844                 blkg_put(blkg);
845                 blk_put_queue(blkg->q);
846         }
847         return ret ?: nbytes;
848 }
849
850 static u64 iolatency_prfill_limit(struct seq_file *sf,
851                                   struct blkg_policy_data *pd, int off)
852 {
853         struct iolatency_grp *iolat = pd_to_lat(pd);
854         const char *dname = blkg_dev_name(pd->blkg);
855
856         if (!dname || !iolat->min_lat_nsec)
857                 return 0;
858         seq_printf(sf, "%s target=%llu\n",
859                    dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
860         return 0;
861 }
862
863 static int iolatency_print_limit(struct seq_file *sf, void *v)
864 {
865         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
866                           iolatency_prfill_limit,
867                           &blkcg_policy_iolatency, seq_cft(sf)->private, false);
868         return 0;
869 }
870
871 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
872                                 size_t size)
873 {
874         struct iolatency_grp *iolat = pd_to_lat(pd);
875         unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
876         unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
877
878         if (iolat->rq_depth.max_depth == UINT_MAX)
879                 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
880                                  avg_lat, cur_win);
881
882         return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
883                          iolat->rq_depth.max_depth, avg_lat, cur_win);
884 }
885
886
887 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
888 {
889         struct iolatency_grp *iolat;
890
891         iolat = kzalloc_node(sizeof(*iolat), gfp, node);
892         if (!iolat)
893                 return NULL;
894         iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
895                                        __alignof__(struct blk_rq_stat), gfp);
896         if (!iolat->stats) {
897                 kfree(iolat);
898                 return NULL;
899         }
900         return &iolat->pd;
901 }
902
903 static void iolatency_pd_init(struct blkg_policy_data *pd)
904 {
905         struct iolatency_grp *iolat = pd_to_lat(pd);
906         struct blkcg_gq *blkg = lat_to_blkg(iolat);
907         struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
908         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
909         u64 now = ktime_to_ns(ktime_get());
910         int cpu;
911
912         for_each_possible_cpu(cpu) {
913                 struct blk_rq_stat *stat;
914                 stat = per_cpu_ptr(iolat->stats, cpu);
915                 blk_rq_stat_init(stat);
916         }
917
918         rq_wait_init(&iolat->rq_wait);
919         spin_lock_init(&iolat->child_lat.lock);
920         iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
921         iolat->rq_depth.max_depth = UINT_MAX;
922         iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
923         iolat->blkiolat = blkiolat;
924         iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
925         atomic64_set(&iolat->window_start, now);
926
927         /*
928          * We init things in list order, so the pd for the parent may not be
929          * init'ed yet for whatever reason.
930          */
931         if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
932                 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
933                 atomic_set(&iolat->scale_cookie,
934                            atomic_read(&parent->child_lat.scale_cookie));
935         } else {
936                 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
937         }
938
939         atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
940 }
941
942 static void iolatency_pd_offline(struct blkg_policy_data *pd)
943 {
944         struct iolatency_grp *iolat = pd_to_lat(pd);
945         struct blkcg_gq *blkg = lat_to_blkg(iolat);
946         struct blk_iolatency *blkiolat = iolat->blkiolat;
947         int ret;
948
949         ret = iolatency_set_min_lat_nsec(blkg, 0);
950         if (ret == 1)
951                 atomic_inc(&blkiolat->enabled);
952         if (ret == -1)
953                 atomic_dec(&blkiolat->enabled);
954         iolatency_clear_scaling(blkg);
955 }
956
957 static void iolatency_pd_free(struct blkg_policy_data *pd)
958 {
959         struct iolatency_grp *iolat = pd_to_lat(pd);
960         free_percpu(iolat->stats);
961         kfree(iolat);
962 }
963
964 static struct cftype iolatency_files[] = {
965         {
966                 .name = "latency",
967                 .flags = CFTYPE_NOT_ON_ROOT,
968                 .seq_show = iolatency_print_limit,
969                 .write = iolatency_set_limit,
970         },
971         {}
972 };
973
974 static struct blkcg_policy blkcg_policy_iolatency = {
975         .dfl_cftypes    = iolatency_files,
976         .pd_alloc_fn    = iolatency_pd_alloc,
977         .pd_init_fn     = iolatency_pd_init,
978         .pd_offline_fn  = iolatency_pd_offline,
979         .pd_free_fn     = iolatency_pd_free,
980         .pd_stat_fn     = iolatency_pd_stat,
981 };
982
983 static int __init iolatency_init(void)
984 {
985         return blkcg_policy_register(&blkcg_policy_iolatency);
986 }
987
988 static void __exit iolatency_exit(void)
989 {
990         return blkcg_policy_unregister(&blkcg_policy_iolatency);
991 }
992
993 module_init(iolatency_init);
994 module_exit(iolatency_exit);