1 // SPDX-License-Identifier: GPL-2.0
3 * Interface for controlling IO bandwidth on a request queue
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
15 #include "blk-cgroup-rwstat.h"
16 #include "blk-throttle.h"
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
29 #define MIN_THROTL_BPS (320 * 1024)
30 #define MIN_THROTL_IOPS (10)
31 #define DFL_LATENCY_TARGET (-1L)
32 #define DFL_IDLE_THRESHOLD (0)
33 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
34 #define LATENCY_FILTERED_SSD (0)
36 * For HD, very small latency comes from sequential IO. Such IO is helpless to
37 * help determine if its IO is impacted by others, hence we ignore the IO
39 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct *kthrotld_workqueue;
45 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
46 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
49 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
51 /* We measure latency for request size from <= 4k to >= 1M */
52 #define LATENCY_BUCKET_SIZE 9
54 struct latency_bucket {
55 unsigned long total_latency; /* ns / 1024 */
59 struct avg_latency_bucket {
60 unsigned long latency; /* ns / 1024 */
66 /* service tree for active throtl groups */
67 struct throtl_service_queue service_queue;
69 struct request_queue *queue;
71 /* Total Number of queued bios on READ and WRITE lists */
72 unsigned int nr_queued[2];
74 unsigned int throtl_slice;
76 /* Work for dispatching throttled bios */
77 struct work_struct dispatch_work;
78 unsigned int limit_index;
79 bool limit_valid[LIMIT_CNT];
81 unsigned long low_upgrade_time;
82 unsigned long low_downgrade_time;
86 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
87 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
88 struct latency_bucket __percpu *latency_buckets[2];
89 unsigned long last_calculate_time;
90 unsigned long filtered_latency;
92 bool track_bio_latency;
95 static void throtl_pending_timer_fn(struct timer_list *t);
97 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
99 return pd_to_blkg(&tg->pd);
103 * sq_to_tg - return the throl_grp the specified service queue belongs to
104 * @sq: the throtl_service_queue of interest
106 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
107 * embedded in throtl_data, %NULL is returned.
109 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
111 if (sq && sq->parent_sq)
112 return container_of(sq, struct throtl_grp, service_queue);
118 * sq_to_td - return throtl_data the specified service queue belongs to
119 * @sq: the throtl_service_queue of interest
121 * A service_queue can be embedded in either a throtl_grp or throtl_data.
122 * Determine the associated throtl_data accordingly and return it.
124 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
126 struct throtl_grp *tg = sq_to_tg(sq);
131 return container_of(sq, struct throtl_data, service_queue);
135 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
136 * make the IO dispatch more smooth.
137 * Scale up: linearly scale up according to lapsed time since upgrade. For
138 * every throtl_slice, the limit scales up 1/2 .low limit till the
139 * limit hits .max limit
140 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
142 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
144 /* arbitrary value to avoid too big scale */
145 if (td->scale < 4096 && time_after_eq(jiffies,
146 td->low_upgrade_time + td->scale * td->throtl_slice))
147 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
149 return low + (low >> 1) * td->scale;
152 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
154 struct blkcg_gq *blkg = tg_to_blkg(tg);
155 struct throtl_data *td;
158 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
162 ret = tg->bps[rw][td->limit_index];
163 if (ret == 0 && td->limit_index == LIMIT_LOW) {
164 /* intermediate node or iops isn't 0 */
165 if (!list_empty(&blkg->blkcg->css.children) ||
166 tg->iops[rw][td->limit_index])
169 return MIN_THROTL_BPS;
172 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
173 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
176 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
177 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
182 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
184 struct blkcg_gq *blkg = tg_to_blkg(tg);
185 struct throtl_data *td;
188 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
192 ret = tg->iops[rw][td->limit_index];
193 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
194 /* intermediate node or bps isn't 0 */
195 if (!list_empty(&blkg->blkcg->css.children) ||
196 tg->bps[rw][td->limit_index])
199 return MIN_THROTL_IOPS;
202 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
203 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
206 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
207 if (adjusted > UINT_MAX)
209 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
214 #define request_bucket_index(sectors) \
215 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
218 * throtl_log - log debug message via blktrace
219 * @sq: the service_queue being reported
220 * @fmt: printf format string
223 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
224 * throtl_grp; otherwise, just "throtl".
226 #define throtl_log(sq, fmt, args...) do { \
227 struct throtl_grp *__tg = sq_to_tg((sq)); \
228 struct throtl_data *__td = sq_to_td((sq)); \
231 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
234 blk_add_cgroup_trace_msg(__td->queue, \
235 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
237 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
241 static inline unsigned int throtl_bio_data_size(struct bio *bio)
243 /* assume it's one sector */
244 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
246 return bio->bi_iter.bi_size;
249 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
251 INIT_LIST_HEAD(&qn->node);
252 bio_list_init(&qn->bios);
257 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
258 * @bio: bio being added
259 * @qn: qnode to add bio to
260 * @queued: the service_queue->queued[] list @qn belongs to
262 * Add @bio to @qn and put @qn on @queued if it's not already on.
263 * @qn->tg's reference count is bumped when @qn is activated. See the
264 * comment on top of throtl_qnode definition for details.
266 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
267 struct list_head *queued)
269 bio_list_add(&qn->bios, bio);
270 if (list_empty(&qn->node)) {
271 list_add_tail(&qn->node, queued);
272 blkg_get(tg_to_blkg(qn->tg));
277 * throtl_peek_queued - peek the first bio on a qnode list
278 * @queued: the qnode list to peek
280 static struct bio *throtl_peek_queued(struct list_head *queued)
282 struct throtl_qnode *qn;
285 if (list_empty(queued))
288 qn = list_first_entry(queued, struct throtl_qnode, node);
289 bio = bio_list_peek(&qn->bios);
295 * throtl_pop_queued - pop the first bio form a qnode list
296 * @queued: the qnode list to pop a bio from
297 * @tg_to_put: optional out argument for throtl_grp to put
299 * Pop the first bio from the qnode list @queued. After popping, the first
300 * qnode is removed from @queued if empty or moved to the end of @queued so
301 * that the popping order is round-robin.
303 * When the first qnode is removed, its associated throtl_grp should be put
304 * too. If @tg_to_put is NULL, this function automatically puts it;
305 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
306 * responsible for putting it.
308 static struct bio *throtl_pop_queued(struct list_head *queued,
309 struct throtl_grp **tg_to_put)
311 struct throtl_qnode *qn;
314 if (list_empty(queued))
317 qn = list_first_entry(queued, struct throtl_qnode, node);
318 bio = bio_list_pop(&qn->bios);
321 if (bio_list_empty(&qn->bios)) {
322 list_del_init(&qn->node);
326 blkg_put(tg_to_blkg(qn->tg));
328 list_move_tail(&qn->node, queued);
334 /* init a service_queue, assumes the caller zeroed it */
335 static void throtl_service_queue_init(struct throtl_service_queue *sq)
337 INIT_LIST_HEAD(&sq->queued[0]);
338 INIT_LIST_HEAD(&sq->queued[1]);
339 sq->pending_tree = RB_ROOT_CACHED;
340 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
343 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
344 struct request_queue *q,
347 struct throtl_grp *tg;
350 tg = kzalloc_node(sizeof(*tg), gfp, q->node);
354 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
357 if (blkg_rwstat_init(&tg->stat_ios, gfp))
358 goto err_exit_stat_bytes;
360 throtl_service_queue_init(&tg->service_queue);
362 for (rw = READ; rw <= WRITE; rw++) {
363 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
364 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
367 RB_CLEAR_NODE(&tg->rb_node);
368 tg->bps[READ][LIMIT_MAX] = U64_MAX;
369 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
370 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
371 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
372 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
373 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
374 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
375 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
376 /* LIMIT_LOW will have default value 0 */
378 tg->latency_target = DFL_LATENCY_TARGET;
379 tg->latency_target_conf = DFL_LATENCY_TARGET;
380 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
381 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
386 blkg_rwstat_exit(&tg->stat_bytes);
392 static void throtl_pd_init(struct blkg_policy_data *pd)
394 struct throtl_grp *tg = pd_to_tg(pd);
395 struct blkcg_gq *blkg = tg_to_blkg(tg);
396 struct throtl_data *td = blkg->q->td;
397 struct throtl_service_queue *sq = &tg->service_queue;
400 * If on the default hierarchy, we switch to properly hierarchical
401 * behavior where limits on a given throtl_grp are applied to the
402 * whole subtree rather than just the group itself. e.g. If 16M
403 * read_bps limit is set on the root group, the whole system can't
404 * exceed 16M for the device.
406 * If not on the default hierarchy, the broken flat hierarchy
407 * behavior is retained where all throtl_grps are treated as if
408 * they're all separate root groups right below throtl_data.
409 * Limits of a group don't interact with limits of other groups
410 * regardless of the position of the group in the hierarchy.
412 sq->parent_sq = &td->service_queue;
413 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
414 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
419 * Set has_rules[] if @tg or any of its parents have limits configured.
420 * This doesn't require walking up to the top of the hierarchy as the
421 * parent's has_rules[] is guaranteed to be correct.
423 static void tg_update_has_rules(struct throtl_grp *tg)
425 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
426 struct throtl_data *td = tg->td;
429 for (rw = READ; rw <= WRITE; rw++)
430 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
431 (td->limit_valid[td->limit_index] &&
432 (tg_bps_limit(tg, rw) != U64_MAX ||
433 tg_iops_limit(tg, rw) != UINT_MAX));
436 static void throtl_pd_online(struct blkg_policy_data *pd)
438 struct throtl_grp *tg = pd_to_tg(pd);
440 * We don't want new groups to escape the limits of its ancestors.
441 * Update has_rules[] after a new group is brought online.
443 tg_update_has_rules(tg);
446 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
447 static void blk_throtl_update_limit_valid(struct throtl_data *td)
449 struct cgroup_subsys_state *pos_css;
450 struct blkcg_gq *blkg;
451 bool low_valid = false;
454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
455 struct throtl_grp *tg = blkg_to_tg(blkg);
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
465 td->limit_valid[LIMIT_LOW] = low_valid;
468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
473 static void throtl_upgrade_state(struct throtl_data *td);
474 static void throtl_pd_offline(struct blkg_policy_data *pd)
476 struct throtl_grp *tg = pd_to_tg(pd);
478 tg->bps[READ][LIMIT_LOW] = 0;
479 tg->bps[WRITE][LIMIT_LOW] = 0;
480 tg->iops[READ][LIMIT_LOW] = 0;
481 tg->iops[WRITE][LIMIT_LOW] = 0;
483 blk_throtl_update_limit_valid(tg->td);
485 if (!tg->td->limit_valid[tg->td->limit_index])
486 throtl_upgrade_state(tg->td);
489 static void throtl_pd_free(struct blkg_policy_data *pd)
491 struct throtl_grp *tg = pd_to_tg(pd);
493 del_timer_sync(&tg->service_queue.pending_timer);
494 blkg_rwstat_exit(&tg->stat_bytes);
495 blkg_rwstat_exit(&tg->stat_ios);
499 static struct throtl_grp *
500 throtl_rb_first(struct throtl_service_queue *parent_sq)
504 n = rb_first_cached(&parent_sq->pending_tree);
508 return rb_entry_tg(n);
511 static void throtl_rb_erase(struct rb_node *n,
512 struct throtl_service_queue *parent_sq)
514 rb_erase_cached(n, &parent_sq->pending_tree);
516 --parent_sq->nr_pending;
519 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
521 struct throtl_grp *tg;
523 tg = throtl_rb_first(parent_sq);
527 parent_sq->first_pending_disptime = tg->disptime;
530 static void tg_service_queue_add(struct throtl_grp *tg)
532 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
533 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
534 struct rb_node *parent = NULL;
535 struct throtl_grp *__tg;
536 unsigned long key = tg->disptime;
537 bool leftmost = true;
539 while (*node != NULL) {
541 __tg = rb_entry_tg(parent);
543 if (time_before(key, __tg->disptime))
544 node = &parent->rb_left;
546 node = &parent->rb_right;
551 rb_link_node(&tg->rb_node, parent, node);
552 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
556 static void throtl_enqueue_tg(struct throtl_grp *tg)
558 if (!(tg->flags & THROTL_TG_PENDING)) {
559 tg_service_queue_add(tg);
560 tg->flags |= THROTL_TG_PENDING;
561 tg->service_queue.parent_sq->nr_pending++;
565 static void throtl_dequeue_tg(struct throtl_grp *tg)
567 if (tg->flags & THROTL_TG_PENDING) {
568 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
569 tg->flags &= ~THROTL_TG_PENDING;
573 /* Call with queue lock held */
574 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
575 unsigned long expires)
577 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
580 * Since we are adjusting the throttle limit dynamically, the sleep
581 * time calculated according to previous limit might be invalid. It's
582 * possible the cgroup sleep time is very long and no other cgroups
583 * have IO running so notify the limit changes. Make sure the cgroup
584 * doesn't sleep too long to avoid the missed notification.
586 if (time_after(expires, max_expire))
587 expires = max_expire;
588 mod_timer(&sq->pending_timer, expires);
589 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
590 expires - jiffies, jiffies);
594 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
595 * @sq: the service_queue to schedule dispatch for
596 * @force: force scheduling
598 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
599 * dispatch time of the first pending child. Returns %true if either timer
600 * is armed or there's no pending child left. %false if the current
601 * dispatch window is still open and the caller should continue
604 * If @force is %true, the dispatch timer is always scheduled and this
605 * function is guaranteed to return %true. This is to be used when the
606 * caller can't dispatch itself and needs to invoke pending_timer
607 * unconditionally. Note that forced scheduling is likely to induce short
608 * delay before dispatch starts even if @sq->first_pending_disptime is not
609 * in the future and thus shouldn't be used in hot paths.
611 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
614 /* any pending children left? */
618 update_min_dispatch_time(sq);
620 /* is the next dispatch time in the future? */
621 if (force || time_after(sq->first_pending_disptime, jiffies)) {
622 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
626 /* tell the caller to continue dispatching */
630 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
631 bool rw, unsigned long start)
633 tg->bytes_disp[rw] = 0;
636 atomic_set(&tg->io_split_cnt[rw], 0);
639 * Previous slice has expired. We must have trimmed it after last
640 * bio dispatch. That means since start of last slice, we never used
641 * that bandwidth. Do try to make use of that bandwidth while giving
644 if (time_after_eq(start, tg->slice_start[rw]))
645 tg->slice_start[rw] = start;
647 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
648 throtl_log(&tg->service_queue,
649 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
650 rw == READ ? 'R' : 'W', tg->slice_start[rw],
651 tg->slice_end[rw], jiffies);
654 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
656 tg->bytes_disp[rw] = 0;
658 tg->slice_start[rw] = jiffies;
659 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
661 atomic_set(&tg->io_split_cnt[rw], 0);
663 throtl_log(&tg->service_queue,
664 "[%c] new slice start=%lu end=%lu jiffies=%lu",
665 rw == READ ? 'R' : 'W', tg->slice_start[rw],
666 tg->slice_end[rw], jiffies);
669 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
670 unsigned long jiffy_end)
672 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
675 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
676 unsigned long jiffy_end)
678 throtl_set_slice_end(tg, rw, jiffy_end);
679 throtl_log(&tg->service_queue,
680 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
681 rw == READ ? 'R' : 'W', tg->slice_start[rw],
682 tg->slice_end[rw], jiffies);
685 /* Determine if previously allocated or extended slice is complete or not */
686 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
688 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
694 /* Trim the used slices and adjust slice start accordingly */
695 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
697 unsigned long nr_slices, time_elapsed, io_trim;
700 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
703 * If bps are unlimited (-1), then time slice don't get
704 * renewed. Don't try to trim the slice if slice is used. A new
705 * slice will start when appropriate.
707 if (throtl_slice_used(tg, rw))
711 * A bio has been dispatched. Also adjust slice_end. It might happen
712 * that initially cgroup limit was very low resulting in high
713 * slice_end, but later limit was bumped up and bio was dispatched
714 * sooner, then we need to reduce slice_end. A high bogus slice_end
715 * is bad because it does not allow new slice to start.
718 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
720 time_elapsed = jiffies - tg->slice_start[rw];
722 nr_slices = time_elapsed / tg->td->throtl_slice;
726 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
730 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
733 if (!bytes_trim && !io_trim)
736 if (tg->bytes_disp[rw] >= bytes_trim)
737 tg->bytes_disp[rw] -= bytes_trim;
739 tg->bytes_disp[rw] = 0;
741 if (tg->io_disp[rw] >= io_trim)
742 tg->io_disp[rw] -= io_trim;
746 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
748 throtl_log(&tg->service_queue,
749 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
750 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
751 tg->slice_start[rw], tg->slice_end[rw], jiffies);
754 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
755 u32 iops_limit, unsigned long *wait)
757 bool rw = bio_data_dir(bio);
758 unsigned int io_allowed;
759 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
762 if (iops_limit == UINT_MAX) {
768 jiffy_elapsed = jiffies - tg->slice_start[rw];
770 /* Round up to the next throttle slice, wait time must be nonzero */
771 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
774 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
775 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
776 * will allow dispatch after 1 second and after that slice should
780 tmp = (u64)iops_limit * jiffy_elapsed_rnd;
784 io_allowed = UINT_MAX;
788 if (tg->io_disp[rw] + 1 <= io_allowed) {
794 /* Calc approx time to dispatch */
795 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
802 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
803 u64 bps_limit, unsigned long *wait)
805 bool rw = bio_data_dir(bio);
806 u64 bytes_allowed, extra_bytes, tmp;
807 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
808 unsigned int bio_size = throtl_bio_data_size(bio);
810 if (bps_limit == U64_MAX) {
816 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
818 /* Slice has just started. Consider one slice interval */
820 jiffy_elapsed_rnd = tg->td->throtl_slice;
822 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
824 tmp = bps_limit * jiffy_elapsed_rnd;
828 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
834 /* Calc approx time to dispatch */
835 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
836 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
842 * This wait time is without taking into consideration the rounding
843 * up we did. Add that time also.
845 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
852 * Returns whether one can dispatch a bio or not. Also returns approx number
853 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
855 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
858 bool rw = bio_data_dir(bio);
859 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
860 u64 bps_limit = tg_bps_limit(tg, rw);
861 u32 iops_limit = tg_iops_limit(tg, rw);
864 * Currently whole state machine of group depends on first bio
865 * queued in the group bio list. So one should not be calling
866 * this function with a different bio if there are other bios
869 BUG_ON(tg->service_queue.nr_queued[rw] &&
870 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
872 /* If tg->bps = -1, then BW is unlimited */
873 if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
880 * If previous slice expired, start a new one otherwise renew/extend
881 * existing slice to make sure it is at least throtl_slice interval
882 * long since now. New slice is started only for empty throttle group.
883 * If there is queued bio, that means there should be an active
884 * slice and it should be extended instead.
886 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
887 throtl_start_new_slice(tg, rw);
889 if (time_before(tg->slice_end[rw],
890 jiffies + tg->td->throtl_slice))
891 throtl_extend_slice(tg, rw,
892 jiffies + tg->td->throtl_slice);
895 if (iops_limit != UINT_MAX)
896 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
898 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
899 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
905 max_wait = max(bps_wait, iops_wait);
910 if (time_before(tg->slice_end[rw], jiffies + max_wait))
911 throtl_extend_slice(tg, rw, jiffies + max_wait);
916 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
918 bool rw = bio_data_dir(bio);
919 unsigned int bio_size = throtl_bio_data_size(bio);
921 /* Charge the bio to the group */
922 tg->bytes_disp[rw] += bio_size;
924 tg->last_bytes_disp[rw] += bio_size;
925 tg->last_io_disp[rw]++;
928 * BIO_THROTTLED is used to prevent the same bio to be throttled
929 * more than once as a throttled bio will go through blk-throtl the
930 * second time when it eventually gets issued. Set it when a bio
931 * is being charged to a tg.
933 if (!bio_flagged(bio, BIO_THROTTLED))
934 bio_set_flag(bio, BIO_THROTTLED);
938 * throtl_add_bio_tg - add a bio to the specified throtl_grp
941 * @tg: the target throtl_grp
943 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
944 * tg->qnode_on_self[] is used.
946 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
947 struct throtl_grp *tg)
949 struct throtl_service_queue *sq = &tg->service_queue;
950 bool rw = bio_data_dir(bio);
953 qn = &tg->qnode_on_self[rw];
956 * If @tg doesn't currently have any bios queued in the same
957 * direction, queueing @bio can change when @tg should be
958 * dispatched. Mark that @tg was empty. This is automatically
959 * cleared on the next tg_update_disptime().
961 if (!sq->nr_queued[rw])
962 tg->flags |= THROTL_TG_WAS_EMPTY;
964 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
967 throtl_enqueue_tg(tg);
970 static void tg_update_disptime(struct throtl_grp *tg)
972 struct throtl_service_queue *sq = &tg->service_queue;
973 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
976 bio = throtl_peek_queued(&sq->queued[READ]);
978 tg_may_dispatch(tg, bio, &read_wait);
980 bio = throtl_peek_queued(&sq->queued[WRITE]);
982 tg_may_dispatch(tg, bio, &write_wait);
984 min_wait = min(read_wait, write_wait);
985 disptime = jiffies + min_wait;
987 /* Update dispatch time */
988 throtl_dequeue_tg(tg);
989 tg->disptime = disptime;
990 throtl_enqueue_tg(tg);
992 /* see throtl_add_bio_tg() */
993 tg->flags &= ~THROTL_TG_WAS_EMPTY;
996 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
997 struct throtl_grp *parent_tg, bool rw)
999 if (throtl_slice_used(parent_tg, rw)) {
1000 throtl_start_new_slice_with_credit(parent_tg, rw,
1001 child_tg->slice_start[rw]);
1006 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1008 struct throtl_service_queue *sq = &tg->service_queue;
1009 struct throtl_service_queue *parent_sq = sq->parent_sq;
1010 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1011 struct throtl_grp *tg_to_put = NULL;
1015 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1016 * from @tg may put its reference and @parent_sq might end up
1017 * getting released prematurely. Remember the tg to put and put it
1018 * after @bio is transferred to @parent_sq.
1020 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1021 sq->nr_queued[rw]--;
1023 throtl_charge_bio(tg, bio);
1026 * If our parent is another tg, we just need to transfer @bio to
1027 * the parent using throtl_add_bio_tg(). If our parent is
1028 * @td->service_queue, @bio is ready to be issued. Put it on its
1029 * bio_lists[] and decrease total number queued. The caller is
1030 * responsible for issuing these bios.
1033 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1034 start_parent_slice_with_credit(tg, parent_tg, rw);
1036 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1037 &parent_sq->queued[rw]);
1038 BUG_ON(tg->td->nr_queued[rw] <= 0);
1039 tg->td->nr_queued[rw]--;
1042 throtl_trim_slice(tg, rw);
1045 blkg_put(tg_to_blkg(tg_to_put));
1048 static int throtl_dispatch_tg(struct throtl_grp *tg)
1050 struct throtl_service_queue *sq = &tg->service_queue;
1051 unsigned int nr_reads = 0, nr_writes = 0;
1052 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1053 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1056 /* Try to dispatch 75% READS and 25% WRITES */
1058 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1059 tg_may_dispatch(tg, bio, NULL)) {
1061 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1064 if (nr_reads >= max_nr_reads)
1068 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1069 tg_may_dispatch(tg, bio, NULL)) {
1071 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1074 if (nr_writes >= max_nr_writes)
1078 return nr_reads + nr_writes;
1081 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1083 unsigned int nr_disp = 0;
1086 struct throtl_grp *tg;
1087 struct throtl_service_queue *sq;
1089 if (!parent_sq->nr_pending)
1092 tg = throtl_rb_first(parent_sq);
1096 if (time_before(jiffies, tg->disptime))
1099 throtl_dequeue_tg(tg);
1101 nr_disp += throtl_dispatch_tg(tg);
1103 sq = &tg->service_queue;
1104 if (sq->nr_queued[0] || sq->nr_queued[1])
1105 tg_update_disptime(tg);
1107 if (nr_disp >= THROTL_QUANTUM)
1114 static bool throtl_can_upgrade(struct throtl_data *td,
1115 struct throtl_grp *this_tg);
1117 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1118 * @t: the pending_timer member of the throtl_service_queue being serviced
1120 * This timer is armed when a child throtl_grp with active bio's become
1121 * pending and queued on the service_queue's pending_tree and expires when
1122 * the first child throtl_grp should be dispatched. This function
1123 * dispatches bio's from the children throtl_grps to the parent
1126 * If the parent's parent is another throtl_grp, dispatching is propagated
1127 * by either arming its pending_timer or repeating dispatch directly. If
1128 * the top-level service_tree is reached, throtl_data->dispatch_work is
1129 * kicked so that the ready bio's are issued.
1131 static void throtl_pending_timer_fn(struct timer_list *t)
1133 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1134 struct throtl_grp *tg = sq_to_tg(sq);
1135 struct throtl_data *td = sq_to_td(sq);
1136 struct request_queue *q = td->queue;
1137 struct throtl_service_queue *parent_sq;
1141 spin_lock_irq(&q->queue_lock);
1142 if (throtl_can_upgrade(td, NULL))
1143 throtl_upgrade_state(td);
1146 parent_sq = sq->parent_sq;
1150 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1151 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1152 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1154 ret = throtl_select_dispatch(sq);
1156 throtl_log(sq, "bios disp=%u", ret);
1160 if (throtl_schedule_next_dispatch(sq, false))
1163 /* this dispatch windows is still open, relax and repeat */
1164 spin_unlock_irq(&q->queue_lock);
1166 spin_lock_irq(&q->queue_lock);
1173 /* @parent_sq is another throl_grp, propagate dispatch */
1174 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1175 tg_update_disptime(tg);
1176 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1177 /* window is already open, repeat dispatching */
1184 /* reached the top-level, queue issuing */
1185 queue_work(kthrotld_workqueue, &td->dispatch_work);
1188 spin_unlock_irq(&q->queue_lock);
1192 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1193 * @work: work item being executed
1195 * This function is queued for execution when bios reach the bio_lists[]
1196 * of throtl_data->service_queue. Those bios are ready and issued by this
1199 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1201 struct throtl_data *td = container_of(work, struct throtl_data,
1203 struct throtl_service_queue *td_sq = &td->service_queue;
1204 struct request_queue *q = td->queue;
1205 struct bio_list bio_list_on_stack;
1207 struct blk_plug plug;
1210 bio_list_init(&bio_list_on_stack);
1212 spin_lock_irq(&q->queue_lock);
1213 for (rw = READ; rw <= WRITE; rw++)
1214 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1215 bio_list_add(&bio_list_on_stack, bio);
1216 spin_unlock_irq(&q->queue_lock);
1218 if (!bio_list_empty(&bio_list_on_stack)) {
1219 blk_start_plug(&plug);
1220 while ((bio = bio_list_pop(&bio_list_on_stack)))
1221 submit_bio_noacct(bio);
1222 blk_finish_plug(&plug);
1226 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1229 struct throtl_grp *tg = pd_to_tg(pd);
1230 u64 v = *(u64 *)((void *)tg + off);
1234 return __blkg_prfill_u64(sf, pd, v);
1237 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1240 struct throtl_grp *tg = pd_to_tg(pd);
1241 unsigned int v = *(unsigned int *)((void *)tg + off);
1245 return __blkg_prfill_u64(sf, pd, v);
1248 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1250 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1251 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1255 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1257 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1258 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1262 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1264 struct throtl_service_queue *sq = &tg->service_queue;
1265 struct cgroup_subsys_state *pos_css;
1266 struct blkcg_gq *blkg;
1268 throtl_log(&tg->service_queue,
1269 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1270 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1271 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1274 * Update has_rules[] flags for the updated tg's subtree. A tg is
1275 * considered to have rules if either the tg itself or any of its
1276 * ancestors has rules. This identifies groups without any
1277 * restrictions in the whole hierarchy and allows them to bypass
1280 blkg_for_each_descendant_pre(blkg, pos_css,
1281 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1282 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1283 struct throtl_grp *parent_tg;
1285 tg_update_has_rules(this_tg);
1286 /* ignore root/second level */
1287 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1288 !blkg->parent->parent)
1290 parent_tg = blkg_to_tg(blkg->parent);
1292 * make sure all children has lower idle time threshold and
1293 * higher latency target
1295 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1296 parent_tg->idletime_threshold);
1297 this_tg->latency_target = max(this_tg->latency_target,
1298 parent_tg->latency_target);
1302 * We're already holding queue_lock and know @tg is valid. Let's
1303 * apply the new config directly.
1305 * Restart the slices for both READ and WRITES. It might happen
1306 * that a group's limit are dropped suddenly and we don't want to
1307 * account recently dispatched IO with new low rate.
1309 throtl_start_new_slice(tg, READ);
1310 throtl_start_new_slice(tg, WRITE);
1312 if (tg->flags & THROTL_TG_PENDING) {
1313 tg_update_disptime(tg);
1314 throtl_schedule_next_dispatch(sq->parent_sq, true);
1318 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1319 char *buf, size_t nbytes, loff_t off, bool is_u64)
1321 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1322 struct blkg_conf_ctx ctx;
1323 struct throtl_grp *tg;
1327 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1332 if (sscanf(ctx.body, "%llu", &v) != 1)
1337 tg = blkg_to_tg(ctx.blkg);
1340 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1342 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1344 tg_conf_updated(tg, false);
1347 blkg_conf_finish(&ctx);
1348 return ret ?: nbytes;
1351 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1352 char *buf, size_t nbytes, loff_t off)
1354 return tg_set_conf(of, buf, nbytes, off, true);
1357 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1358 char *buf, size_t nbytes, loff_t off)
1360 return tg_set_conf(of, buf, nbytes, off, false);
1363 static int tg_print_rwstat(struct seq_file *sf, void *v)
1365 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1366 blkg_prfill_rwstat, &blkcg_policy_throtl,
1367 seq_cft(sf)->private, true);
1371 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1372 struct blkg_policy_data *pd, int off)
1374 struct blkg_rwstat_sample sum;
1376 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1378 return __blkg_prfill_rwstat(sf, pd, &sum);
1381 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1383 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1384 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1385 seq_cft(sf)->private, true);
1389 static struct cftype throtl_legacy_files[] = {
1391 .name = "throttle.read_bps_device",
1392 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1393 .seq_show = tg_print_conf_u64,
1394 .write = tg_set_conf_u64,
1397 .name = "throttle.write_bps_device",
1398 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1399 .seq_show = tg_print_conf_u64,
1400 .write = tg_set_conf_u64,
1403 .name = "throttle.read_iops_device",
1404 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1405 .seq_show = tg_print_conf_uint,
1406 .write = tg_set_conf_uint,
1409 .name = "throttle.write_iops_device",
1410 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1411 .seq_show = tg_print_conf_uint,
1412 .write = tg_set_conf_uint,
1415 .name = "throttle.io_service_bytes",
1416 .private = offsetof(struct throtl_grp, stat_bytes),
1417 .seq_show = tg_print_rwstat,
1420 .name = "throttle.io_service_bytes_recursive",
1421 .private = offsetof(struct throtl_grp, stat_bytes),
1422 .seq_show = tg_print_rwstat_recursive,
1425 .name = "throttle.io_serviced",
1426 .private = offsetof(struct throtl_grp, stat_ios),
1427 .seq_show = tg_print_rwstat,
1430 .name = "throttle.io_serviced_recursive",
1431 .private = offsetof(struct throtl_grp, stat_ios),
1432 .seq_show = tg_print_rwstat_recursive,
1437 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1440 struct throtl_grp *tg = pd_to_tg(pd);
1441 const char *dname = blkg_dev_name(pd->blkg);
1442 char bufs[4][21] = { "max", "max", "max", "max" };
1444 unsigned int iops_dft;
1445 char idle_time[26] = "";
1446 char latency_time[26] = "";
1451 if (off == LIMIT_LOW) {
1456 iops_dft = UINT_MAX;
1459 if (tg->bps_conf[READ][off] == bps_dft &&
1460 tg->bps_conf[WRITE][off] == bps_dft &&
1461 tg->iops_conf[READ][off] == iops_dft &&
1462 tg->iops_conf[WRITE][off] == iops_dft &&
1463 (off != LIMIT_LOW ||
1464 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1465 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1468 if (tg->bps_conf[READ][off] != U64_MAX)
1469 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1470 tg->bps_conf[READ][off]);
1471 if (tg->bps_conf[WRITE][off] != U64_MAX)
1472 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1473 tg->bps_conf[WRITE][off]);
1474 if (tg->iops_conf[READ][off] != UINT_MAX)
1475 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1476 tg->iops_conf[READ][off]);
1477 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1478 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1479 tg->iops_conf[WRITE][off]);
1480 if (off == LIMIT_LOW) {
1481 if (tg->idletime_threshold_conf == ULONG_MAX)
1482 strcpy(idle_time, " idle=max");
1484 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1485 tg->idletime_threshold_conf);
1487 if (tg->latency_target_conf == ULONG_MAX)
1488 strcpy(latency_time, " latency=max");
1490 snprintf(latency_time, sizeof(latency_time),
1491 " latency=%lu", tg->latency_target_conf);
1494 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1495 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1500 static int tg_print_limit(struct seq_file *sf, void *v)
1502 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1503 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1507 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1508 char *buf, size_t nbytes, loff_t off)
1510 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1511 struct blkg_conf_ctx ctx;
1512 struct throtl_grp *tg;
1514 unsigned long idle_time;
1515 unsigned long latency_time;
1517 int index = of_cft(of)->private;
1519 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1523 tg = blkg_to_tg(ctx.blkg);
1525 v[0] = tg->bps_conf[READ][index];
1526 v[1] = tg->bps_conf[WRITE][index];
1527 v[2] = tg->iops_conf[READ][index];
1528 v[3] = tg->iops_conf[WRITE][index];
1530 idle_time = tg->idletime_threshold_conf;
1531 latency_time = tg->latency_target_conf;
1533 char tok[27]; /* wiops=18446744073709551616 */
1538 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1547 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1555 if (!strcmp(tok, "rbps") && val > 1)
1557 else if (!strcmp(tok, "wbps") && val > 1)
1559 else if (!strcmp(tok, "riops") && val > 1)
1560 v[2] = min_t(u64, val, UINT_MAX);
1561 else if (!strcmp(tok, "wiops") && val > 1)
1562 v[3] = min_t(u64, val, UINT_MAX);
1563 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1565 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1571 tg->bps_conf[READ][index] = v[0];
1572 tg->bps_conf[WRITE][index] = v[1];
1573 tg->iops_conf[READ][index] = v[2];
1574 tg->iops_conf[WRITE][index] = v[3];
1576 if (index == LIMIT_MAX) {
1577 tg->bps[READ][index] = v[0];
1578 tg->bps[WRITE][index] = v[1];
1579 tg->iops[READ][index] = v[2];
1580 tg->iops[WRITE][index] = v[3];
1582 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1583 tg->bps_conf[READ][LIMIT_MAX]);
1584 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1585 tg->bps_conf[WRITE][LIMIT_MAX]);
1586 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1587 tg->iops_conf[READ][LIMIT_MAX]);
1588 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1589 tg->iops_conf[WRITE][LIMIT_MAX]);
1590 tg->idletime_threshold_conf = idle_time;
1591 tg->latency_target_conf = latency_time;
1593 /* force user to configure all settings for low limit */
1594 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1595 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1596 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1597 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1598 tg->bps[READ][LIMIT_LOW] = 0;
1599 tg->bps[WRITE][LIMIT_LOW] = 0;
1600 tg->iops[READ][LIMIT_LOW] = 0;
1601 tg->iops[WRITE][LIMIT_LOW] = 0;
1602 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1603 tg->latency_target = DFL_LATENCY_TARGET;
1604 } else if (index == LIMIT_LOW) {
1605 tg->idletime_threshold = tg->idletime_threshold_conf;
1606 tg->latency_target = tg->latency_target_conf;
1609 blk_throtl_update_limit_valid(tg->td);
1610 if (tg->td->limit_valid[LIMIT_LOW]) {
1611 if (index == LIMIT_LOW)
1612 tg->td->limit_index = LIMIT_LOW;
1614 tg->td->limit_index = LIMIT_MAX;
1615 tg_conf_updated(tg, index == LIMIT_LOW &&
1616 tg->td->limit_valid[LIMIT_LOW]);
1619 blkg_conf_finish(&ctx);
1620 return ret ?: nbytes;
1623 static struct cftype throtl_files[] = {
1624 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1627 .flags = CFTYPE_NOT_ON_ROOT,
1628 .seq_show = tg_print_limit,
1629 .write = tg_set_limit,
1630 .private = LIMIT_LOW,
1635 .flags = CFTYPE_NOT_ON_ROOT,
1636 .seq_show = tg_print_limit,
1637 .write = tg_set_limit,
1638 .private = LIMIT_MAX,
1643 static void throtl_shutdown_wq(struct request_queue *q)
1645 struct throtl_data *td = q->td;
1647 cancel_work_sync(&td->dispatch_work);
1650 struct blkcg_policy blkcg_policy_throtl = {
1651 .dfl_cftypes = throtl_files,
1652 .legacy_cftypes = throtl_legacy_files,
1654 .pd_alloc_fn = throtl_pd_alloc,
1655 .pd_init_fn = throtl_pd_init,
1656 .pd_online_fn = throtl_pd_online,
1657 .pd_offline_fn = throtl_pd_offline,
1658 .pd_free_fn = throtl_pd_free,
1661 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1663 unsigned long rtime = jiffies, wtime = jiffies;
1665 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1666 rtime = tg->last_low_overflow_time[READ];
1667 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1668 wtime = tg->last_low_overflow_time[WRITE];
1669 return min(rtime, wtime);
1672 /* tg should not be an intermediate node */
1673 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1675 struct throtl_service_queue *parent_sq;
1676 struct throtl_grp *parent = tg;
1677 unsigned long ret = __tg_last_low_overflow_time(tg);
1680 parent_sq = parent->service_queue.parent_sq;
1681 parent = sq_to_tg(parent_sq);
1686 * The parent doesn't have low limit, it always reaches low
1687 * limit. Its overflow time is useless for children
1689 if (!parent->bps[READ][LIMIT_LOW] &&
1690 !parent->iops[READ][LIMIT_LOW] &&
1691 !parent->bps[WRITE][LIMIT_LOW] &&
1692 !parent->iops[WRITE][LIMIT_LOW])
1694 if (time_after(__tg_last_low_overflow_time(parent), ret))
1695 ret = __tg_last_low_overflow_time(parent);
1700 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1703 * cgroup is idle if:
1704 * - single idle is too long, longer than a fixed value (in case user
1705 * configure a too big threshold) or 4 times of idletime threshold
1706 * - average think time is more than threshold
1707 * - IO latency is largely below threshold
1712 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1713 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1714 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1715 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1716 tg->avg_idletime > tg->idletime_threshold ||
1717 (tg->latency_target && tg->bio_cnt &&
1718 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1719 throtl_log(&tg->service_queue,
1720 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1721 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1722 tg->bio_cnt, ret, tg->td->scale);
1726 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1728 struct throtl_service_queue *sq = &tg->service_queue;
1729 bool read_limit, write_limit;
1732 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1733 * reaches), it's ok to upgrade to next limit
1735 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1736 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1737 if (!read_limit && !write_limit)
1739 if (read_limit && sq->nr_queued[READ] &&
1740 (!write_limit || sq->nr_queued[WRITE]))
1742 if (write_limit && sq->nr_queued[WRITE] &&
1743 (!read_limit || sq->nr_queued[READ]))
1746 if (time_after_eq(jiffies,
1747 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1748 throtl_tg_is_idle(tg))
1753 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1756 if (throtl_tg_can_upgrade(tg))
1758 tg = sq_to_tg(tg->service_queue.parent_sq);
1759 if (!tg || !tg_to_blkg(tg)->parent)
1765 static bool throtl_can_upgrade(struct throtl_data *td,
1766 struct throtl_grp *this_tg)
1768 struct cgroup_subsys_state *pos_css;
1769 struct blkcg_gq *blkg;
1771 if (td->limit_index != LIMIT_LOW)
1774 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1778 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1779 struct throtl_grp *tg = blkg_to_tg(blkg);
1783 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1785 if (!throtl_hierarchy_can_upgrade(tg)) {
1794 static void throtl_upgrade_check(struct throtl_grp *tg)
1796 unsigned long now = jiffies;
1798 if (tg->td->limit_index != LIMIT_LOW)
1801 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1804 tg->last_check_time = now;
1806 if (!time_after_eq(now,
1807 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1810 if (throtl_can_upgrade(tg->td, NULL))
1811 throtl_upgrade_state(tg->td);
1814 static void throtl_upgrade_state(struct throtl_data *td)
1816 struct cgroup_subsys_state *pos_css;
1817 struct blkcg_gq *blkg;
1819 throtl_log(&td->service_queue, "upgrade to max");
1820 td->limit_index = LIMIT_MAX;
1821 td->low_upgrade_time = jiffies;
1824 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1825 struct throtl_grp *tg = blkg_to_tg(blkg);
1826 struct throtl_service_queue *sq = &tg->service_queue;
1828 tg->disptime = jiffies - 1;
1829 throtl_select_dispatch(sq);
1830 throtl_schedule_next_dispatch(sq, true);
1833 throtl_select_dispatch(&td->service_queue);
1834 throtl_schedule_next_dispatch(&td->service_queue, true);
1835 queue_work(kthrotld_workqueue, &td->dispatch_work);
1838 static void throtl_downgrade_state(struct throtl_data *td)
1842 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1844 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1848 td->limit_index = LIMIT_LOW;
1849 td->low_downgrade_time = jiffies;
1852 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1854 struct throtl_data *td = tg->td;
1855 unsigned long now = jiffies;
1858 * If cgroup is below low limit, consider downgrade and throttle other
1861 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1862 time_after_eq(now, tg_last_low_overflow_time(tg) +
1863 td->throtl_slice) &&
1864 (!throtl_tg_is_idle(tg) ||
1865 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1870 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1873 if (!throtl_tg_can_downgrade(tg))
1875 tg = sq_to_tg(tg->service_queue.parent_sq);
1876 if (!tg || !tg_to_blkg(tg)->parent)
1882 static void throtl_downgrade_check(struct throtl_grp *tg)
1886 unsigned long elapsed_time;
1887 unsigned long now = jiffies;
1889 if (tg->td->limit_index != LIMIT_MAX ||
1890 !tg->td->limit_valid[LIMIT_LOW])
1892 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1897 elapsed_time = now - tg->last_check_time;
1898 tg->last_check_time = now;
1900 if (time_before(now, tg_last_low_overflow_time(tg) +
1901 tg->td->throtl_slice))
1904 if (tg->bps[READ][LIMIT_LOW]) {
1905 bps = tg->last_bytes_disp[READ] * HZ;
1906 do_div(bps, elapsed_time);
1907 if (bps >= tg->bps[READ][LIMIT_LOW])
1908 tg->last_low_overflow_time[READ] = now;
1911 if (tg->bps[WRITE][LIMIT_LOW]) {
1912 bps = tg->last_bytes_disp[WRITE] * HZ;
1913 do_div(bps, elapsed_time);
1914 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1915 tg->last_low_overflow_time[WRITE] = now;
1918 if (tg->iops[READ][LIMIT_LOW]) {
1919 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
1920 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1921 if (iops >= tg->iops[READ][LIMIT_LOW])
1922 tg->last_low_overflow_time[READ] = now;
1925 if (tg->iops[WRITE][LIMIT_LOW]) {
1926 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
1927 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1928 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1929 tg->last_low_overflow_time[WRITE] = now;
1933 * If cgroup is below low limit, consider downgrade and throttle other
1936 if (throtl_hierarchy_can_downgrade(tg))
1937 throtl_downgrade_state(tg->td);
1939 tg->last_bytes_disp[READ] = 0;
1940 tg->last_bytes_disp[WRITE] = 0;
1941 tg->last_io_disp[READ] = 0;
1942 tg->last_io_disp[WRITE] = 0;
1945 static void blk_throtl_update_idletime(struct throtl_grp *tg)
1948 unsigned long last_finish_time = tg->last_finish_time;
1950 if (last_finish_time == 0)
1953 now = ktime_get_ns() >> 10;
1954 if (now <= last_finish_time ||
1955 last_finish_time == tg->checked_last_finish_time)
1958 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
1959 tg->checked_last_finish_time = last_finish_time;
1962 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1963 static void throtl_update_latency_buckets(struct throtl_data *td)
1965 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
1967 unsigned long last_latency[2] = { 0 };
1968 unsigned long latency[2];
1970 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
1972 if (time_before(jiffies, td->last_calculate_time + HZ))
1974 td->last_calculate_time = jiffies;
1976 memset(avg_latency, 0, sizeof(avg_latency));
1977 for (rw = READ; rw <= WRITE; rw++) {
1978 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
1979 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
1981 for_each_possible_cpu(cpu) {
1982 struct latency_bucket *bucket;
1984 /* this isn't race free, but ok in practice */
1985 bucket = per_cpu_ptr(td->latency_buckets[rw],
1987 tmp->total_latency += bucket[i].total_latency;
1988 tmp->samples += bucket[i].samples;
1989 bucket[i].total_latency = 0;
1990 bucket[i].samples = 0;
1993 if (tmp->samples >= 32) {
1994 int samples = tmp->samples;
1996 latency[rw] = tmp->total_latency;
1998 tmp->total_latency = 0;
2000 latency[rw] /= samples;
2001 if (latency[rw] == 0)
2003 avg_latency[rw][i].latency = latency[rw];
2008 for (rw = READ; rw <= WRITE; rw++) {
2009 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2010 if (!avg_latency[rw][i].latency) {
2011 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2012 td->avg_buckets[rw][i].latency =
2017 if (!td->avg_buckets[rw][i].valid)
2018 latency[rw] = avg_latency[rw][i].latency;
2020 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2021 avg_latency[rw][i].latency) >> 3;
2023 td->avg_buckets[rw][i].latency = max(latency[rw],
2025 td->avg_buckets[rw][i].valid = true;
2026 last_latency[rw] = td->avg_buckets[rw][i].latency;
2030 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2031 throtl_log(&td->service_queue,
2032 "Latency bucket %d: read latency=%ld, read valid=%d, "
2033 "write latency=%ld, write valid=%d", i,
2034 td->avg_buckets[READ][i].latency,
2035 td->avg_buckets[READ][i].valid,
2036 td->avg_buckets[WRITE][i].latency,
2037 td->avg_buckets[WRITE][i].valid);
2040 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2045 void blk_throtl_charge_bio_split(struct bio *bio)
2047 struct blkcg_gq *blkg = bio->bi_blkg;
2048 struct throtl_grp *parent = blkg_to_tg(blkg);
2049 struct throtl_service_queue *parent_sq;
2050 bool rw = bio_data_dir(bio);
2053 if (!parent->has_rules[rw])
2056 atomic_inc(&parent->io_split_cnt[rw]);
2057 atomic_inc(&parent->last_io_split_cnt[rw]);
2059 parent_sq = parent->service_queue.parent_sq;
2060 parent = sq_to_tg(parent_sq);
2064 bool __blk_throtl_bio(struct bio *bio)
2066 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2067 struct blkcg_gq *blkg = bio->bi_blkg;
2068 struct throtl_qnode *qn = NULL;
2069 struct throtl_grp *tg = blkg_to_tg(blkg);
2070 struct throtl_service_queue *sq;
2071 bool rw = bio_data_dir(bio);
2072 bool throttled = false;
2073 struct throtl_data *td = tg->td;
2077 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2078 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2079 bio->bi_iter.bi_size);
2080 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2083 spin_lock_irq(&q->queue_lock);
2085 throtl_update_latency_buckets(td);
2087 blk_throtl_update_idletime(tg);
2089 sq = &tg->service_queue;
2093 if (tg->last_low_overflow_time[rw] == 0)
2094 tg->last_low_overflow_time[rw] = jiffies;
2095 throtl_downgrade_check(tg);
2096 throtl_upgrade_check(tg);
2097 /* throtl is FIFO - if bios are already queued, should queue */
2098 if (sq->nr_queued[rw])
2101 /* if above limits, break to queue */
2102 if (!tg_may_dispatch(tg, bio, NULL)) {
2103 tg->last_low_overflow_time[rw] = jiffies;
2104 if (throtl_can_upgrade(td, tg)) {
2105 throtl_upgrade_state(td);
2111 /* within limits, let's charge and dispatch directly */
2112 throtl_charge_bio(tg, bio);
2115 * We need to trim slice even when bios are not being queued
2116 * otherwise it might happen that a bio is not queued for
2117 * a long time and slice keeps on extending and trim is not
2118 * called for a long time. Now if limits are reduced suddenly
2119 * we take into account all the IO dispatched so far at new
2120 * low rate and * newly queued IO gets a really long dispatch
2123 * So keep on trimming slice even if bio is not queued.
2125 throtl_trim_slice(tg, rw);
2128 * @bio passed through this layer without being throttled.
2129 * Climb up the ladder. If we're already at the top, it
2130 * can be executed directly.
2132 qn = &tg->qnode_on_parent[rw];
2139 /* out-of-limit, queue to @tg */
2140 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2141 rw == READ ? 'R' : 'W',
2142 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2143 tg_bps_limit(tg, rw),
2144 tg->io_disp[rw], tg_iops_limit(tg, rw),
2145 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2147 tg->last_low_overflow_time[rw] = jiffies;
2149 td->nr_queued[rw]++;
2150 throtl_add_bio_tg(bio, qn, tg);
2154 * Update @tg's dispatch time and force schedule dispatch if @tg
2155 * was empty before @bio. The forced scheduling isn't likely to
2156 * cause undue delay as @bio is likely to be dispatched directly if
2157 * its @tg's disptime is not in the future.
2159 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2160 tg_update_disptime(tg);
2161 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2165 spin_unlock_irq(&q->queue_lock);
2166 bio_set_flag(bio, BIO_THROTTLED);
2168 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2169 if (throttled || !td->track_bio_latency)
2170 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2176 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2177 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2178 int op, unsigned long time)
2180 struct latency_bucket *latency;
2183 if (!td || td->limit_index != LIMIT_LOW ||
2184 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2185 !blk_queue_nonrot(td->queue))
2188 index = request_bucket_index(size);
2190 latency = get_cpu_ptr(td->latency_buckets[op]);
2191 latency[index].total_latency += time;
2192 latency[index].samples++;
2193 put_cpu_ptr(td->latency_buckets[op]);
2196 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2198 struct request_queue *q = rq->q;
2199 struct throtl_data *td = q->td;
2201 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2205 void blk_throtl_bio_endio(struct bio *bio)
2207 struct blkcg_gq *blkg;
2208 struct throtl_grp *tg;
2210 unsigned long finish_time;
2211 unsigned long start_time;
2213 int rw = bio_data_dir(bio);
2215 blkg = bio->bi_blkg;
2218 tg = blkg_to_tg(blkg);
2219 if (!tg->td->limit_valid[LIMIT_LOW])
2222 finish_time_ns = ktime_get_ns();
2223 tg->last_finish_time = finish_time_ns >> 10;
2225 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2226 finish_time = __bio_issue_time(finish_time_ns) >> 10;
2227 if (!start_time || finish_time <= start_time)
2230 lat = finish_time - start_time;
2231 /* this is only for bio based driver */
2232 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2233 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2236 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2238 unsigned int threshold;
2240 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2241 threshold = tg->td->avg_buckets[rw][bucket].latency +
2243 if (lat > threshold)
2246 * Not race free, could get wrong count, which means cgroups
2252 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2253 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2255 tg->bad_bio_cnt /= 2;
2260 int blk_throtl_init(struct request_queue *q)
2262 struct throtl_data *td;
2265 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2268 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2269 LATENCY_BUCKET_SIZE, __alignof__(u64));
2270 if (!td->latency_buckets[READ]) {
2274 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2275 LATENCY_BUCKET_SIZE, __alignof__(u64));
2276 if (!td->latency_buckets[WRITE]) {
2277 free_percpu(td->latency_buckets[READ]);
2282 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2283 throtl_service_queue_init(&td->service_queue);
2288 td->limit_valid[LIMIT_MAX] = true;
2289 td->limit_index = LIMIT_MAX;
2290 td->low_upgrade_time = jiffies;
2291 td->low_downgrade_time = jiffies;
2293 /* activate policy */
2294 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2296 free_percpu(td->latency_buckets[READ]);
2297 free_percpu(td->latency_buckets[WRITE]);
2303 void blk_throtl_exit(struct request_queue *q)
2306 del_timer_sync(&q->td->service_queue.pending_timer);
2307 throtl_shutdown_wq(q);
2308 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2309 free_percpu(q->td->latency_buckets[READ]);
2310 free_percpu(q->td->latency_buckets[WRITE]);
2314 void blk_throtl_register_queue(struct request_queue *q)
2316 struct throtl_data *td;
2322 if (blk_queue_nonrot(q)) {
2323 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2324 td->filtered_latency = LATENCY_FILTERED_SSD;
2326 td->throtl_slice = DFL_THROTL_SLICE_HD;
2327 td->filtered_latency = LATENCY_FILTERED_HD;
2328 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2329 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2330 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2333 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2334 /* if no low limit, use previous default */
2335 td->throtl_slice = DFL_THROTL_SLICE_HD;
2338 td->track_bio_latency = !queue_is_mq(q);
2339 if (!td->track_bio_latency)
2340 blk_stat_enable_accounting(q);
2343 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2344 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2348 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2351 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2352 const char *page, size_t count)
2359 if (kstrtoul(page, 10, &v))
2361 t = msecs_to_jiffies(v);
2362 if (t == 0 || t > MAX_THROTL_SLICE)
2364 q->td->throtl_slice = t;
2369 static int __init throtl_init(void)
2371 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2372 if (!kthrotld_workqueue)
2373 panic("Failed to create kthrotld\n");
2375 return blkcg_policy_register(&blkcg_policy_throtl);
2378 module_init(throtl_init);