1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * cgroups support for the BFQ I/O scheduler.
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/elevator.h>
10 #include <linux/ktime.h>
11 #include <linux/rbtree.h>
12 #include <linux/ioprio.h>
13 #include <linux/sbitmap.h>
14 #include <linux/delay.h>
16 #include "bfq-iosched.h"
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
27 atomic64_set(&stat->aux_cnt, 0);
31 static void bfq_stat_exit(struct bfq_stat *stat)
33 percpu_counter_destroy(&stat->cpu_cnt);
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
62 static inline void bfq_stat_reset(struct bfq_stat *stat)
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
73 * Add @from's count including the aux one to @to's aux count.
75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
88 * prfill callback for printing a bfq_stat.
90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
103 #define BFQG_FLAG_FNS(name) \
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
106 stats->flags |= (1 << BFQG_stats_##name); \
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
112 static int bfqg_stats_##name(struct bfqg_stats *stats) \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
117 BFQG_FLAG_FNS(waiting)
118 BFQG_FLAG_FNS(idling)
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
127 if (!bfqg_stats_waiting(stats))
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
132 bfq_stat_add(&stats->group_wait_time,
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
137 /* This should be called with the scheduler lock held. */
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
141 struct bfqg_stats *stats = &bfqg->stats;
143 if (bfqg_stats_waiting(stats))
145 if (bfqg == curr_bfqg)
147 stats->start_group_wait_time = ktime_get_ns();
148 bfqg_stats_mark_waiting(stats);
151 /* This should be called with the scheduler lock held. */
152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
156 if (!bfqg_stats_empty(stats))
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
161 bfq_stat_add(&stats->empty_time,
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
168 bfq_stat_add(&bfqg->stats.dequeue, 1);
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
173 struct bfqg_stats *stats = &bfqg->stats;
175 if (blkg_rwstat_total(&stats->queued))
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
183 if (bfqg_stats_empty(stats))
186 stats->start_empty_time = ktime_get_ns();
187 bfqg_stats_mark_empty(stats);
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
192 struct bfqg_stats *stats = &bfqg->stats;
194 if (bfqg_stats_idling(stats)) {
195 u64 now = ktime_get_ns();
197 if (now > stats->start_idle_time)
198 bfq_stat_add(&stats->idle_time,
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
206 struct bfqg_stats *stats = &bfqg->stats;
208 stats->start_idle_time = ktime_get_ns();
209 bfqg_stats_mark_idling(stats);
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
214 struct bfqg_stats *stats = &bfqg->stats;
216 bfq_stat_add(&stats->avg_queue_size_sum,
217 blkg_rwstat_total(&stats->queued));
218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 bfqg_stats_update_group_wait_time(stats);
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
225 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
233 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
238 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 u64 io_start_time_ns, unsigned int op)
244 struct bfqg_stats *stats = &bfqg->stats;
245 u64 now = ktime_get_ns();
247 if (now > io_start_time_ns)
248 blkg_rwstat_add(&stats->service_time, op,
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
251 blkg_rwstat_add(&stats->wait_time, op,
252 io_start_time_ns - start_time_ns);
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262 u64 io_start_time_ns, unsigned int op) { }
263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
274 * blk-cgroup policy-related handlers
275 * The following functions help in converting between blk-cgroup
276 * internal structures and BFQ-specific structures.
279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
281 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
286 return pd_to_blkg(&bfqg->pd);
289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
296 * The following functions help in navigating the bfq_group hierarchy
297 * by allowing to find the parent of a bfq_group or the bfq_group
298 * associated to a bfq_queue.
301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
305 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
310 struct bfq_entity *group_entity = bfqq->entity.parent;
312 return group_entity ? container_of(group_entity, struct bfq_group,
314 bfqq->bfqd->root_group;
318 * The following two functions handle get and put of a bfq_group by
319 * wrapping the related blk-cgroup hooks.
322 static void bfqg_get(struct bfq_group *bfqg)
327 static void bfqg_put(struct bfq_group *bfqg)
335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
337 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
340 blkg_get(bfqg_to_blkg(bfqg));
343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
345 blkg_put(bfqg_to_blkg(bfqg));
350 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
354 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
355 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
359 static void bfqg_stats_reset(struct bfqg_stats *stats)
361 #ifdef CONFIG_BFQ_CGROUP_DEBUG
362 /* queued stats shouldn't be cleared */
363 blkg_rwstat_reset(&stats->merged);
364 blkg_rwstat_reset(&stats->service_time);
365 blkg_rwstat_reset(&stats->wait_time);
366 bfq_stat_reset(&stats->time);
367 bfq_stat_reset(&stats->avg_queue_size_sum);
368 bfq_stat_reset(&stats->avg_queue_size_samples);
369 bfq_stat_reset(&stats->dequeue);
370 bfq_stat_reset(&stats->group_wait_time);
371 bfq_stat_reset(&stats->idle_time);
372 bfq_stat_reset(&stats->empty_time);
377 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
382 #ifdef CONFIG_BFQ_CGROUP_DEBUG
383 /* queued stats shouldn't be cleared */
384 blkg_rwstat_add_aux(&to->merged, &from->merged);
385 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
386 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
387 bfq_stat_add_aux(&from->time, &from->time);
388 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
389 bfq_stat_add_aux(&to->avg_queue_size_samples,
390 &from->avg_queue_size_samples);
391 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
392 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
393 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
394 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
399 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
400 * recursive stats can still account for the amount used by this bfqg after
403 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
405 struct bfq_group *parent;
407 if (!bfqg) /* root_group */
410 parent = bfqg_parent(bfqg);
412 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
414 if (unlikely(!parent))
417 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
418 bfqg_stats_reset(&bfqg->stats);
421 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
423 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
425 entity->weight = entity->new_weight;
426 entity->orig_weight = entity->new_weight;
428 bfqq->ioprio = bfqq->new_ioprio;
429 bfqq->ioprio_class = bfqq->new_ioprio_class;
431 * Make sure that bfqg and its associated blkg do not
432 * disappear before entity.
434 bfqg_and_blkg_get(bfqg);
436 entity->parent = bfqg->my_entity; /* NULL for root group */
437 entity->sched_data = &bfqg->sched_data;
440 static void bfqg_stats_exit(struct bfqg_stats *stats)
442 blkg_rwstat_exit(&stats->bytes);
443 blkg_rwstat_exit(&stats->ios);
444 #ifdef CONFIG_BFQ_CGROUP_DEBUG
445 blkg_rwstat_exit(&stats->merged);
446 blkg_rwstat_exit(&stats->service_time);
447 blkg_rwstat_exit(&stats->wait_time);
448 blkg_rwstat_exit(&stats->queued);
449 bfq_stat_exit(&stats->time);
450 bfq_stat_exit(&stats->avg_queue_size_sum);
451 bfq_stat_exit(&stats->avg_queue_size_samples);
452 bfq_stat_exit(&stats->dequeue);
453 bfq_stat_exit(&stats->group_wait_time);
454 bfq_stat_exit(&stats->idle_time);
455 bfq_stat_exit(&stats->empty_time);
459 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
461 if (blkg_rwstat_init(&stats->bytes, gfp) ||
462 blkg_rwstat_init(&stats->ios, gfp))
465 #ifdef CONFIG_BFQ_CGROUP_DEBUG
466 if (blkg_rwstat_init(&stats->merged, gfp) ||
467 blkg_rwstat_init(&stats->service_time, gfp) ||
468 blkg_rwstat_init(&stats->wait_time, gfp) ||
469 blkg_rwstat_init(&stats->queued, gfp) ||
470 bfq_stat_init(&stats->time, gfp) ||
471 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
472 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
473 bfq_stat_init(&stats->dequeue, gfp) ||
474 bfq_stat_init(&stats->group_wait_time, gfp) ||
475 bfq_stat_init(&stats->idle_time, gfp) ||
476 bfq_stat_init(&stats->empty_time, gfp)) {
477 bfqg_stats_exit(stats);
485 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
487 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
490 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
492 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
495 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
497 struct bfq_group_data *bgd;
499 bgd = kzalloc(sizeof(*bgd), gfp);
505 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
507 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
509 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
510 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
513 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
515 kfree(cpd_to_bfqgd(cpd));
518 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
521 struct bfq_group *bfqg;
523 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
527 if (bfqg_stats_init(&bfqg->stats, gfp)) {
532 /* see comments in bfq_bic_update_cgroup for why refcounting */
537 static void bfq_pd_init(struct blkg_policy_data *pd)
539 struct blkcg_gq *blkg = pd_to_blkg(pd);
540 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
541 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
542 struct bfq_entity *entity = &bfqg->entity;
543 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
545 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
546 entity->my_sched_data = &bfqg->sched_data;
547 bfqg->my_entity = entity; /*
548 * the root_group's will be set to NULL
549 * in bfq_init_queue()
552 bfqg->active_entities = 0;
553 bfqg->rq_pos_tree = RB_ROOT;
556 static void bfq_pd_free(struct blkg_policy_data *pd)
558 struct bfq_group *bfqg = pd_to_bfqg(pd);
560 bfqg_stats_exit(&bfqg->stats);
564 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
566 struct bfq_group *bfqg = pd_to_bfqg(pd);
568 bfqg_stats_reset(&bfqg->stats);
571 static void bfq_group_set_parent(struct bfq_group *bfqg,
572 struct bfq_group *parent)
574 struct bfq_entity *entity;
576 entity = &bfqg->entity;
577 entity->parent = parent->my_entity;
578 entity->sched_data = &parent->sched_data;
581 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
584 struct blkcg_gq *blkg;
586 blkg = blkg_lookup(blkcg, bfqd->queue);
588 return blkg_to_bfqg(blkg);
592 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
595 struct bfq_group *bfqg, *parent;
596 struct bfq_entity *entity;
598 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
604 * Update chain of bfq_groups as we might be handling a leaf group
605 * which, along with some of its relatives, has not been hooked yet
606 * to the private hierarchy of BFQ.
608 entity = &bfqg->entity;
609 for_each_entity(entity) {
610 bfqg = container_of(entity, struct bfq_group, entity);
611 if (bfqg != bfqd->root_group) {
612 parent = bfqg_parent(bfqg);
614 parent = bfqd->root_group;
615 bfq_group_set_parent(bfqg, parent);
623 * bfq_bfqq_move - migrate @bfqq to @bfqg.
624 * @bfqd: queue descriptor.
625 * @bfqq: the queue to move.
626 * @bfqg: the group to move to.
628 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
629 * it on the new one. Avoid putting the entity on the old group idle tree.
631 * Must be called under the scheduler lock, to make sure that the blkg
632 * owning @bfqg does not disappear (see comments in
633 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
636 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
637 struct bfq_group *bfqg)
639 struct bfq_entity *entity = &bfqq->entity;
641 /* If bfqq is empty, then bfq_bfqq_expire also invokes
642 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
643 * from data structures related to current group. Otherwise we
644 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
647 if (bfqq == bfqd->in_service_queue)
648 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
649 false, BFQQE_PREEMPTED);
651 if (bfq_bfqq_busy(bfqq))
652 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
653 else if (entity->on_st)
654 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
655 bfqg_and_blkg_put(bfqq_group(bfqq));
657 entity->parent = bfqg->my_entity;
658 entity->sched_data = &bfqg->sched_data;
659 /* pin down bfqg and its associated blkg */
660 bfqg_and_blkg_get(bfqg);
662 if (bfq_bfqq_busy(bfqq)) {
663 if (unlikely(!bfqd->nonrot_with_queueing))
664 bfq_pos_tree_add_move(bfqd, bfqq);
665 bfq_activate_bfqq(bfqd, bfqq);
668 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
669 bfq_schedule_dispatch(bfqd);
673 * __bfq_bic_change_cgroup - move @bic to @cgroup.
674 * @bfqd: the queue descriptor.
675 * @bic: the bic to move.
676 * @blkcg: the blk-cgroup to move to.
678 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
679 * sure that the reference to cgroup is valid across the call (see
680 * comments in bfq_bic_update_cgroup on this issue)
682 * NOTE: an alternative approach might have been to store the current
683 * cgroup in bfqq and getting a reference to it, reducing the lookup
684 * time here, at the price of slightly more complex code.
686 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
687 struct bfq_io_cq *bic,
690 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
691 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
692 struct bfq_group *bfqg;
693 struct bfq_entity *entity;
695 bfqg = bfq_find_set_group(bfqd, blkcg);
698 bfqg = bfqd->root_group;
701 entity = &async_bfqq->entity;
703 if (entity->sched_data != &bfqg->sched_data) {
704 bic_set_bfqq(bic, NULL, 0);
705 bfq_log_bfqq(bfqd, async_bfqq,
706 "bic_change_group: %p %d",
707 async_bfqq, async_bfqq->ref);
708 bfq_put_queue(async_bfqq);
713 entity = &sync_bfqq->entity;
714 if (entity->sched_data != &bfqg->sched_data)
715 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
721 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
723 struct bfq_data *bfqd = bic_to_bfqd(bic);
724 struct bfq_group *bfqg = NULL;
728 serial_nr = __bio_blkcg(bio)->css.serial_nr;
731 * Check whether blkcg has changed. The condition may trigger
732 * spuriously on a newly created cic but there's no harm.
734 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
737 bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
739 * Update blkg_path for bfq_log_* functions. We cache this
740 * path, and update it here, for the following
741 * reasons. Operations on blkg objects in blk-cgroup are
742 * protected with the request_queue lock, and not with the
743 * lock that protects the instances of this scheduler
744 * (bfqd->lock). This exposes BFQ to the following sort of
747 * The blkg_lookup performed in bfq_get_queue, protected
748 * through rcu, may happen to return the address of a copy of
749 * the original blkg. If this is the case, then the
750 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
751 * the blkg, is useless: it does not prevent blk-cgroup code
752 * from destroying both the original blkg and all objects
753 * directly or indirectly referred by the copy of the
756 * On the bright side, destroy operations on a blkg invoke, as
757 * a first step, hooks of the scheduler associated with the
758 * blkg. And these hooks are executed with bfqd->lock held for
759 * BFQ. As a consequence, for any blkg associated with the
760 * request queue this instance of the scheduler is attached
761 * to, we are guaranteed that such a blkg is not destroyed, and
762 * that all the pointers it contains are consistent, while we
763 * are holding bfqd->lock. A blkg_lookup performed with
764 * bfqd->lock held then returns a fully consistent blkg, which
765 * remains consistent until this lock is held.
767 * Thanks to the last fact, and to the fact that: (1) bfqg has
768 * been obtained through a blkg_lookup in the above
769 * assignment, and (2) bfqd->lock is being held, here we can
770 * safely use the policy data for the involved blkg (i.e., the
771 * field bfqg->pd) to get to the blkg associated with bfqg,
772 * and then we can safely use any field of blkg. After we
773 * release bfqd->lock, even just getting blkg through this
774 * bfqg may cause dangling references to be traversed, as
775 * bfqg->pd may not exist any more.
777 * In view of the above facts, here we cache, in the bfqg, any
778 * blkg data we may need for this bic, and for its associated
779 * bfq_queue. As of now, we need to cache only the path of the
780 * blkg, which is used in the bfq_log_* functions.
782 * Finally, note that bfqg itself needs to be protected from
783 * destruction on the blkg_free of the original blkg (which
784 * invokes bfq_pd_free). We use an additional private
785 * refcounter for bfqg, to let it disappear only after no
786 * bfq_queue refers to it any longer.
788 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
789 bic->blkcg_serial_nr = serial_nr;
795 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
796 * @st: the service tree being flushed.
798 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
800 struct bfq_entity *entity = st->first_idle;
802 for (; entity ; entity = st->first_idle)
803 __bfq_deactivate_entity(entity, false);
807 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
808 * @bfqd: the device data structure with the root group.
809 * @entity: the entity to move.
811 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
812 struct bfq_entity *entity)
814 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
816 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
820 * bfq_reparent_active_entities - move to the root group all active
822 * @bfqd: the device data structure with the root group.
823 * @bfqg: the group to move from.
824 * @st: the service tree with the entities.
826 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
827 struct bfq_group *bfqg,
828 struct bfq_service_tree *st)
830 struct rb_root *active = &st->active;
831 struct bfq_entity *entity = NULL;
833 if (!RB_EMPTY_ROOT(&st->active))
834 entity = bfq_entity_of(rb_first(active));
836 for (; entity ; entity = bfq_entity_of(rb_first(active)))
837 bfq_reparent_leaf_entity(bfqd, entity);
839 if (bfqg->sched_data.in_service_entity)
840 bfq_reparent_leaf_entity(bfqd,
841 bfqg->sched_data.in_service_entity);
845 * bfq_pd_offline - deactivate the entity associated with @pd,
846 * and reparent its children entities.
847 * @pd: descriptor of the policy going offline.
849 * blkio already grabs the queue_lock for us, so no need to use
852 static void bfq_pd_offline(struct blkg_policy_data *pd)
854 struct bfq_service_tree *st;
855 struct bfq_group *bfqg = pd_to_bfqg(pd);
856 struct bfq_data *bfqd = bfqg->bfqd;
857 struct bfq_entity *entity = bfqg->my_entity;
861 spin_lock_irqsave(&bfqd->lock, flags);
863 if (!entity) /* root group */
864 goto put_async_queues;
867 * Empty all service_trees belonging to this group before
868 * deactivating the group itself.
870 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
871 st = bfqg->sched_data.service_tree + i;
874 * The idle tree may still contain bfq_queues belonging
875 * to exited task because they never migrated to a different
876 * cgroup from the one being destroyed now.
878 bfq_flush_idle_tree(st);
881 * It may happen that some queues are still active
882 * (busy) upon group destruction (if the corresponding
883 * processes have been forced to terminate). We move
884 * all the leaf entities corresponding to these queues
886 * Also, it may happen that the group has an entity
887 * in service, which is disconnected from the active
888 * tree: it must be moved, too.
889 * There is no need to put the sync queues, as the
890 * scheduler has taken no reference.
892 bfq_reparent_active_entities(bfqd, bfqg, st);
895 __bfq_deactivate_entity(entity, false);
898 bfq_put_async_queues(bfqd, bfqg);
900 spin_unlock_irqrestore(&bfqd->lock, flags);
902 * @blkg is going offline and will be ignored by
903 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
904 * that they don't get lost. If IOs complete after this point, the
905 * stats for them will be lost. Oh well...
907 bfqg_stats_xfer_dead(bfqg);
910 void bfq_end_wr_async(struct bfq_data *bfqd)
912 struct blkcg_gq *blkg;
914 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
915 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
917 bfq_end_wr_async_queues(bfqd, bfqg);
919 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
922 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
924 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
925 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
926 unsigned int val = 0;
931 seq_printf(sf, "%u\n", val);
936 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
937 struct blkg_policy_data *pd, int off)
939 struct bfq_group *bfqg = pd_to_bfqg(pd);
941 if (!bfqg->entity.dev_weight)
943 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
946 static int bfq_io_show_weight(struct seq_file *sf, void *v)
948 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
949 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
951 seq_printf(sf, "default %u\n", bfqgd->weight);
952 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
953 &blkcg_policy_bfq, 0, false);
957 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
959 weight = dev_weight ?: weight;
961 bfqg->entity.dev_weight = dev_weight;
963 * Setting the prio_changed flag of the entity
964 * to 1 with new_weight == weight would re-set
965 * the value of the weight to its ioprio mapping.
966 * Set the flag only if necessary.
968 if ((unsigned short)weight != bfqg->entity.new_weight) {
969 bfqg->entity.new_weight = (unsigned short)weight;
971 * Make sure that the above new value has been
972 * stored in bfqg->entity.new_weight before
973 * setting the prio_changed flag. In fact,
974 * this flag may be read asynchronously (in
975 * critical sections protected by a different
976 * lock than that held here), and finding this
977 * flag set may cause the execution of the code
978 * for updating parameters whose value may
979 * depend also on bfqg->entity.new_weight (in
980 * __bfq_entity_update_weight_prio).
981 * This barrier makes sure that the new value
982 * of bfqg->entity.new_weight is correctly
986 bfqg->entity.prio_changed = 1;
990 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
991 struct cftype *cftype,
994 struct blkcg *blkcg = css_to_blkcg(css);
995 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
996 struct blkcg_gq *blkg;
999 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1003 spin_lock_irq(&blkcg->lock);
1004 bfqgd->weight = (unsigned short)val;
1005 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1006 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1009 bfq_group_set_weight(bfqg, val, 0);
1011 spin_unlock_irq(&blkcg->lock);
1016 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1017 char *buf, size_t nbytes,
1021 struct blkg_conf_ctx ctx;
1022 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1023 struct bfq_group *bfqg;
1026 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1030 if (sscanf(ctx.body, "%llu", &v) == 1) {
1031 /* require "default" on dfl */
1035 } else if (!strcmp(strim(ctx.body), "default")) {
1042 bfqg = blkg_to_bfqg(ctx.blkg);
1045 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1046 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1050 blkg_conf_finish(&ctx);
1051 return ret ?: nbytes;
1054 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1055 char *buf, size_t nbytes,
1064 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1065 v = simple_strtoull(buf, &endp, 0);
1066 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1067 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1068 return ret ?: nbytes;
1071 return bfq_io_set_device_weight(of, buf, nbytes, off);
1074 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1076 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1077 &blkcg_policy_bfq, seq_cft(sf)->private, true);
1081 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1082 struct blkg_policy_data *pd, int off)
1084 struct blkg_rwstat_sample sum;
1086 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1087 return __blkg_prfill_rwstat(sf, pd, &sum);
1090 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1092 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1093 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1094 seq_cft(sf)->private, true);
1098 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1099 static int bfqg_print_stat(struct seq_file *sf, void *v)
1101 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1102 &blkcg_policy_bfq, seq_cft(sf)->private, false);
1106 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1107 struct blkg_policy_data *pd, int off)
1109 struct blkcg_gq *blkg = pd_to_blkg(pd);
1110 struct blkcg_gq *pos_blkg;
1111 struct cgroup_subsys_state *pos_css;
1114 lockdep_assert_held(&blkg->q->queue_lock);
1117 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1118 struct bfq_stat *stat;
1120 if (!pos_blkg->online)
1123 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1124 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1128 return __blkg_prfill_u64(sf, pd, sum);
1131 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1133 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1134 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1135 seq_cft(sf)->private, false);
1139 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1142 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1143 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1145 return __blkg_prfill_u64(sf, pd, sum >> 9);
1148 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1150 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1151 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1155 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1156 struct blkg_policy_data *pd, int off)
1158 struct blkg_rwstat_sample tmp;
1160 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1161 offsetof(struct bfq_group, stats.bytes), &tmp);
1163 return __blkg_prfill_u64(sf, pd,
1164 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1167 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1169 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1170 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1175 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1176 struct blkg_policy_data *pd, int off)
1178 struct bfq_group *bfqg = pd_to_bfqg(pd);
1179 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1183 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1184 v = div64_u64(v, samples);
1186 __blkg_prfill_u64(sf, pd, v);
1190 /* print avg_queue_size */
1191 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1193 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1194 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1198 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1200 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1204 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1208 return blkg_to_bfqg(bfqd->queue->root_blkg);
1211 struct blkcg_policy blkcg_policy_bfq = {
1212 .dfl_cftypes = bfq_blkg_files,
1213 .legacy_cftypes = bfq_blkcg_legacy_files,
1215 .cpd_alloc_fn = bfq_cpd_alloc,
1216 .cpd_init_fn = bfq_cpd_init,
1217 .cpd_bind_fn = bfq_cpd_init,
1218 .cpd_free_fn = bfq_cpd_free,
1220 .pd_alloc_fn = bfq_pd_alloc,
1221 .pd_init_fn = bfq_pd_init,
1222 .pd_offline_fn = bfq_pd_offline,
1223 .pd_free_fn = bfq_pd_free,
1224 .pd_reset_stats_fn = bfq_pd_reset_stats,
1227 struct cftype bfq_blkcg_legacy_files[] = {
1229 .name = "bfq.weight",
1230 .flags = CFTYPE_NOT_ON_ROOT,
1231 .seq_show = bfq_io_show_weight_legacy,
1232 .write_u64 = bfq_io_set_weight_legacy,
1235 .name = "bfq.weight_device",
1236 .flags = CFTYPE_NOT_ON_ROOT,
1237 .seq_show = bfq_io_show_weight,
1238 .write = bfq_io_set_weight,
1241 /* statistics, covers only the tasks in the bfqg */
1243 .name = "bfq.io_service_bytes",
1244 .private = offsetof(struct bfq_group, stats.bytes),
1245 .seq_show = bfqg_print_rwstat,
1248 .name = "bfq.io_serviced",
1249 .private = offsetof(struct bfq_group, stats.ios),
1250 .seq_show = bfqg_print_rwstat,
1252 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1255 .private = offsetof(struct bfq_group, stats.time),
1256 .seq_show = bfqg_print_stat,
1259 .name = "bfq.sectors",
1260 .seq_show = bfqg_print_stat_sectors,
1263 .name = "bfq.io_service_time",
1264 .private = offsetof(struct bfq_group, stats.service_time),
1265 .seq_show = bfqg_print_rwstat,
1268 .name = "bfq.io_wait_time",
1269 .private = offsetof(struct bfq_group, stats.wait_time),
1270 .seq_show = bfqg_print_rwstat,
1273 .name = "bfq.io_merged",
1274 .private = offsetof(struct bfq_group, stats.merged),
1275 .seq_show = bfqg_print_rwstat,
1278 .name = "bfq.io_queued",
1279 .private = offsetof(struct bfq_group, stats.queued),
1280 .seq_show = bfqg_print_rwstat,
1282 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1284 /* the same statistics which cover the bfqg and its descendants */
1286 .name = "bfq.io_service_bytes_recursive",
1287 .private = offsetof(struct bfq_group, stats.bytes),
1288 .seq_show = bfqg_print_rwstat_recursive,
1291 .name = "bfq.io_serviced_recursive",
1292 .private = offsetof(struct bfq_group, stats.ios),
1293 .seq_show = bfqg_print_rwstat_recursive,
1295 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1297 .name = "bfq.time_recursive",
1298 .private = offsetof(struct bfq_group, stats.time),
1299 .seq_show = bfqg_print_stat_recursive,
1302 .name = "bfq.sectors_recursive",
1303 .seq_show = bfqg_print_stat_sectors_recursive,
1306 .name = "bfq.io_service_time_recursive",
1307 .private = offsetof(struct bfq_group, stats.service_time),
1308 .seq_show = bfqg_print_rwstat_recursive,
1311 .name = "bfq.io_wait_time_recursive",
1312 .private = offsetof(struct bfq_group, stats.wait_time),
1313 .seq_show = bfqg_print_rwstat_recursive,
1316 .name = "bfq.io_merged_recursive",
1317 .private = offsetof(struct bfq_group, stats.merged),
1318 .seq_show = bfqg_print_rwstat_recursive,
1321 .name = "bfq.io_queued_recursive",
1322 .private = offsetof(struct bfq_group, stats.queued),
1323 .seq_show = bfqg_print_rwstat_recursive,
1326 .name = "bfq.avg_queue_size",
1327 .seq_show = bfqg_print_avg_queue_size,
1330 .name = "bfq.group_wait_time",
1331 .private = offsetof(struct bfq_group, stats.group_wait_time),
1332 .seq_show = bfqg_print_stat,
1335 .name = "bfq.idle_time",
1336 .private = offsetof(struct bfq_group, stats.idle_time),
1337 .seq_show = bfqg_print_stat,
1340 .name = "bfq.empty_time",
1341 .private = offsetof(struct bfq_group, stats.empty_time),
1342 .seq_show = bfqg_print_stat,
1345 .name = "bfq.dequeue",
1346 .private = offsetof(struct bfq_group, stats.dequeue),
1347 .seq_show = bfqg_print_stat,
1349 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1353 struct cftype bfq_blkg_files[] = {
1355 .name = "bfq.weight",
1356 .flags = CFTYPE_NOT_ON_ROOT,
1357 .seq_show = bfq_io_show_weight,
1358 .write = bfq_io_set_weight,
1363 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1365 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1366 struct bfq_group *bfqg) {}
1368 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1370 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1372 entity->weight = entity->new_weight;
1373 entity->orig_weight = entity->new_weight;
1375 bfqq->ioprio = bfqq->new_ioprio;
1376 bfqq->ioprio_class = bfqq->new_ioprio_class;
1378 entity->sched_data = &bfqg->sched_data;
1381 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1383 void bfq_end_wr_async(struct bfq_data *bfqd)
1385 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1388 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1390 return bfqd->root_group;
1393 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1395 return bfqq->bfqd->root_group;
1398 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1400 struct bfq_group *bfqg;
1403 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1407 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1408 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1412 #endif /* CONFIG_BFQ_GROUP_IOSCHED */