2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include <linux/atomic.h>
23 #include "blk-cgroup.h"
26 #define MAX_KEY_LEN 100
28 static DEFINE_SPINLOCK(blkio_list_lock);
29 static LIST_HEAD(blkio_list);
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
34 /* List of groups pending per cpu stats allocation */
35 static DEFINE_SPINLOCK(alloc_list_lock);
36 static LIST_HEAD(alloc_list);
38 static void blkio_stat_alloc_fn(struct work_struct *);
39 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
41 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
42 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
44 static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
46 /* for encoding cft->private value on file */
47 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
48 /* What policy owns the file, proportional or throttle */
49 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
50 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
52 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
55 struct blkio_cgroup, css);
57 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
59 static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
61 return container_of(task_subsys_state(tsk, blkio_subsys_id),
62 struct blkio_cgroup, css);
65 struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
67 if (bio && bio->bi_css)
68 return container_of(bio->bi_css, struct blkio_cgroup, css);
69 return task_blkio_cgroup(current);
71 EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
73 static inline void blkio_update_group_weight(struct blkio_group *blkg,
74 int plid, unsigned int weight)
76 struct blkio_policy_type *blkiop;
78 list_for_each_entry(blkiop, &blkio_list, list) {
79 /* If this policy does not own the blkg, do not send updates */
80 if (blkiop->plid != plid)
82 if (blkiop->ops.blkio_update_group_weight_fn)
83 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
88 static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
91 struct blkio_policy_type *blkiop;
93 list_for_each_entry(blkiop, &blkio_list, list) {
95 /* If this policy does not own the blkg, do not send updates */
96 if (blkiop->plid != plid)
99 if (fileid == BLKIO_THROTL_read_bps_device
100 && blkiop->ops.blkio_update_group_read_bps_fn)
101 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
104 if (fileid == BLKIO_THROTL_write_bps_device
105 && blkiop->ops.blkio_update_group_write_bps_fn)
106 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
111 static inline void blkio_update_group_iops(struct blkio_group *blkg,
112 int plid, unsigned int iops,
115 struct blkio_policy_type *blkiop;
117 list_for_each_entry(blkiop, &blkio_list, list) {
119 /* If this policy does not own the blkg, do not send updates */
120 if (blkiop->plid != plid)
123 if (fileid == BLKIO_THROTL_read_iops_device
124 && blkiop->ops.blkio_update_group_read_iops_fn)
125 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
128 if (fileid == BLKIO_THROTL_write_iops_device
129 && blkiop->ops.blkio_update_group_write_iops_fn)
130 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
136 * Add to the appropriate stat variable depending on the request type.
137 * This should be called with queue_lock held.
139 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
143 stat[BLKIO_STAT_WRITE] += add;
145 stat[BLKIO_STAT_READ] += add;
147 stat[BLKIO_STAT_SYNC] += add;
149 stat[BLKIO_STAT_ASYNC] += add;
153 * Decrements the appropriate stat variable if non-zero depending on the
154 * request type. Panics on value being zero.
155 * This should be called with the queue_lock held.
157 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
160 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
161 stat[BLKIO_STAT_WRITE]--;
163 BUG_ON(stat[BLKIO_STAT_READ] == 0);
164 stat[BLKIO_STAT_READ]--;
167 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
168 stat[BLKIO_STAT_SYNC]--;
170 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
171 stat[BLKIO_STAT_ASYNC]--;
175 #ifdef CONFIG_DEBUG_BLK_CGROUP
176 /* This should be called with the queue_lock held. */
177 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
178 struct blkio_policy_type *pol,
179 struct blkio_group *curr_blkg)
181 struct blkg_policy_data *pd = blkg->pd[pol->plid];
183 if (blkio_blkg_waiting(&pd->stats))
185 if (blkg == curr_blkg)
187 pd->stats.start_group_wait_time = sched_clock();
188 blkio_mark_blkg_waiting(&pd->stats);
191 /* This should be called with the queue_lock held. */
192 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
194 unsigned long long now;
196 if (!blkio_blkg_waiting(stats))
200 if (time_after64(now, stats->start_group_wait_time))
201 stats->group_wait_time += now - stats->start_group_wait_time;
202 blkio_clear_blkg_waiting(stats);
205 /* This should be called with the queue_lock held. */
206 static void blkio_end_empty_time(struct blkio_group_stats *stats)
208 unsigned long long now;
210 if (!blkio_blkg_empty(stats))
214 if (time_after64(now, stats->start_empty_time))
215 stats->empty_time += now - stats->start_empty_time;
216 blkio_clear_blkg_empty(stats);
219 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
220 struct blkio_policy_type *pol)
222 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
224 lockdep_assert_held(blkg->q->queue_lock);
225 BUG_ON(blkio_blkg_idling(stats));
227 stats->start_idle_time = sched_clock();
228 blkio_mark_blkg_idling(stats);
230 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
232 void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
233 struct blkio_policy_type *pol)
235 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
237 lockdep_assert_held(blkg->q->queue_lock);
239 if (blkio_blkg_idling(stats)) {
240 unsigned long long now = sched_clock();
242 if (time_after64(now, stats->start_idle_time)) {
243 u64_stats_update_begin(&stats->syncp);
244 stats->idle_time += now - stats->start_idle_time;
245 u64_stats_update_end(&stats->syncp);
247 blkio_clear_blkg_idling(stats);
250 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
252 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
253 struct blkio_policy_type *pol)
255 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
257 lockdep_assert_held(blkg->q->queue_lock);
259 u64_stats_update_begin(&stats->syncp);
260 stats->avg_queue_size_sum +=
261 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
262 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
263 stats->avg_queue_size_samples++;
264 blkio_update_group_wait_time(stats);
265 u64_stats_update_end(&stats->syncp);
267 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
269 void blkiocg_set_start_empty_time(struct blkio_group *blkg,
270 struct blkio_policy_type *pol)
272 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
274 lockdep_assert_held(blkg->q->queue_lock);
276 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
277 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
281 * group is already marked empty. This can happen if cfqq got new
282 * request in parent group and moved to this group while being added
283 * to service tree. Just ignore the event and move on.
285 if (blkio_blkg_empty(stats))
288 stats->start_empty_time = sched_clock();
289 blkio_mark_blkg_empty(stats);
291 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
293 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
294 struct blkio_policy_type *pol,
295 unsigned long dequeue)
297 struct blkg_policy_data *pd = blkg->pd[pol->plid];
299 lockdep_assert_held(blkg->q->queue_lock);
301 pd->stats.dequeue += dequeue;
303 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
305 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
306 struct blkio_policy_type *pol,
307 struct blkio_group *curr_blkg) { }
308 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
311 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
312 struct blkio_policy_type *pol,
313 struct blkio_group *curr_blkg, bool direction,
316 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
318 lockdep_assert_held(blkg->q->queue_lock);
320 u64_stats_update_begin(&stats->syncp);
321 blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
322 blkio_end_empty_time(stats);
323 u64_stats_update_end(&stats->syncp);
325 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
327 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
329 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
330 struct blkio_policy_type *pol,
331 bool direction, bool sync)
333 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
335 lockdep_assert_held(blkg->q->queue_lock);
337 u64_stats_update_begin(&stats->syncp);
338 blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
340 u64_stats_update_end(&stats->syncp);
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
344 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
345 struct blkio_policy_type *pol,
347 unsigned long unaccounted_time)
349 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
351 lockdep_assert_held(blkg->q->queue_lock);
353 u64_stats_update_begin(&stats->syncp);
355 #ifdef CONFIG_DEBUG_BLK_CGROUP
356 stats->unaccounted_time += unaccounted_time;
358 u64_stats_update_end(&stats->syncp);
360 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
363 * should be called under rcu read lock or queue lock to make sure blkg pointer
366 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
367 struct blkio_policy_type *pol,
368 uint64_t bytes, bool direction, bool sync)
370 struct blkg_policy_data *pd = blkg->pd[pol->plid];
371 struct blkio_group_stats_cpu *stats_cpu;
374 /* If per cpu stats are not allocated yet, don't do any accounting. */
375 if (pd->stats_cpu == NULL)
379 * Disabling interrupts to provide mutual exclusion between two
380 * writes on same cpu. It probably is not needed for 64bit. Not
381 * optimizing that case yet.
383 local_irq_save(flags);
385 stats_cpu = this_cpu_ptr(pd->stats_cpu);
387 u64_stats_update_begin(&stats_cpu->syncp);
388 stats_cpu->sectors += bytes >> 9;
389 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
391 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
392 bytes, direction, sync);
393 u64_stats_update_end(&stats_cpu->syncp);
394 local_irq_restore(flags);
396 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
398 void blkiocg_update_completion_stats(struct blkio_group *blkg,
399 struct blkio_policy_type *pol,
401 uint64_t io_start_time, bool direction,
404 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
405 unsigned long long now = sched_clock();
407 lockdep_assert_held(blkg->q->queue_lock);
409 u64_stats_update_begin(&stats->syncp);
410 if (time_after64(now, io_start_time))
411 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
412 now - io_start_time, direction, sync);
413 if (time_after64(io_start_time, start_time))
414 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
415 io_start_time - start_time, direction, sync);
416 u64_stats_update_end(&stats->syncp);
418 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
420 /* Merged stats are per cpu. */
421 void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
422 struct blkio_policy_type *pol,
423 bool direction, bool sync)
425 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
427 lockdep_assert_held(blkg->q->queue_lock);
429 u64_stats_update_begin(&stats->syncp);
430 blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
431 u64_stats_update_end(&stats->syncp);
433 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
436 * Worker for allocating per cpu stat for blk groups. This is scheduled on
437 * the system_nrt_wq once there are some groups on the alloc_list waiting
440 static void blkio_stat_alloc_fn(struct work_struct *work)
442 static void *pcpu_stats[BLKIO_NR_POLICIES];
443 struct delayed_work *dwork = to_delayed_work(work);
444 struct blkio_group *blkg;
449 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
450 if (pcpu_stats[i] != NULL)
453 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
455 /* Allocation failed. Try again after some time. */
456 if (pcpu_stats[i] == NULL) {
457 queue_delayed_work(system_nrt_wq, dwork,
458 msecs_to_jiffies(10));
463 spin_lock_irq(&blkio_list_lock);
464 spin_lock(&alloc_list_lock);
466 /* cgroup got deleted or queue exited. */
467 if (!list_empty(&alloc_list)) {
468 blkg = list_first_entry(&alloc_list, struct blkio_group,
470 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
471 struct blkg_policy_data *pd = blkg->pd[i];
473 if (blkio_policy[i] && pd && !pd->stats_cpu)
474 swap(pd->stats_cpu, pcpu_stats[i]);
477 list_del_init(&blkg->alloc_node);
480 empty = list_empty(&alloc_list);
482 spin_unlock(&alloc_list_lock);
483 spin_unlock_irq(&blkio_list_lock);
490 * blkg_free - free a blkg
491 * @blkg: blkg to free
493 * Free @blkg which may be partially allocated.
495 static void blkg_free(struct blkio_group *blkg)
502 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
503 struct blkg_policy_data *pd = blkg->pd[i];
506 free_percpu(pd->stats_cpu);
515 * blkg_alloc - allocate a blkg
516 * @blkcg: block cgroup the new blkg is associated with
517 * @q: request_queue the new blkg is associated with
519 * Allocate a new blkg assocating @blkcg and @q.
521 static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
522 struct request_queue *q)
524 struct blkio_group *blkg;
527 /* alloc and init base part */
528 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
533 INIT_LIST_HEAD(&blkg->q_node);
534 INIT_LIST_HEAD(&blkg->alloc_node);
537 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
539 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
540 struct blkio_policy_type *pol = blkio_policy[i];
541 struct blkg_policy_data *pd;
546 /* alloc per-policy data and attach it to blkg */
547 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
558 /* invoke per-policy init */
559 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
560 struct blkio_policy_type *pol = blkio_policy[i];
563 pol->ops.blkio_init_group_fn(blkg);
569 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
570 struct request_queue *q,
572 __releases(q->queue_lock) __acquires(q->queue_lock)
574 struct blkio_group *blkg;
576 WARN_ON_ONCE(!rcu_read_lock_held());
577 lockdep_assert_held(q->queue_lock);
580 * This could be the first entry point of blkcg implementation and
581 * we shouldn't allow anything to go through for a bypassing queue.
582 * The following can be removed if blkg lookup is guaranteed to
583 * fail on a bypassing queue.
585 if (unlikely(blk_queue_bypass(q)) && !for_root)
586 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
588 blkg = blkg_lookup(blkcg, q);
592 /* blkg holds a reference to blkcg */
593 if (!css_tryget(&blkcg->css))
594 return ERR_PTR(-EINVAL);
597 * Allocate and initialize.
599 blkg = blkg_alloc(blkcg, q);
601 /* did alloc fail? */
602 if (unlikely(!blkg)) {
603 blkg = ERR_PTR(-ENOMEM);
608 spin_lock(&blkcg->lock);
609 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
610 list_add(&blkg->q_node, &q->blkg_list);
611 spin_unlock(&blkcg->lock);
613 spin_lock(&alloc_list_lock);
614 list_add(&blkg->alloc_node, &alloc_list);
615 /* Queue per cpu stat allocation from worker thread. */
616 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
617 spin_unlock(&alloc_list_lock);
621 EXPORT_SYMBOL_GPL(blkg_lookup_create);
623 /* called under rcu_read_lock(). */
624 struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
625 struct request_queue *q)
627 struct blkio_group *blkg;
628 struct hlist_node *n;
630 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
635 EXPORT_SYMBOL_GPL(blkg_lookup);
637 static void blkg_destroy(struct blkio_group *blkg)
639 struct request_queue *q = blkg->q;
640 struct blkio_cgroup *blkcg = blkg->blkcg;
642 lockdep_assert_held(q->queue_lock);
643 lockdep_assert_held(&blkcg->lock);
645 /* Something wrong if we are trying to remove same group twice */
646 WARN_ON_ONCE(list_empty(&blkg->q_node));
647 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
648 list_del_init(&blkg->q_node);
649 hlist_del_init_rcu(&blkg->blkcg_node);
651 spin_lock(&alloc_list_lock);
652 list_del_init(&blkg->alloc_node);
653 spin_unlock(&alloc_list_lock);
656 * Put the reference taken at the time of creation so that when all
657 * queues are gone, group can be destroyed.
663 * XXX: This updates blkg policy data in-place for root blkg, which is
664 * necessary across elevator switch and policy registration as root blkgs
665 * aren't shot down. This broken and racy implementation is temporary.
666 * Eventually, blkg shoot down will be replaced by proper in-place update.
668 void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
670 struct blkio_policy_type *pol = blkio_policy[plid];
671 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
672 struct blkg_policy_data *pd;
677 kfree(blkg->pd[plid]);
678 blkg->pd[plid] = NULL;
683 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
686 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
687 WARN_ON_ONCE(!pd->stats_cpu);
691 pol->ops.blkio_init_group_fn(blkg);
693 EXPORT_SYMBOL_GPL(update_root_blkg_pd);
696 * blkg_destroy_all - destroy all blkgs associated with a request_queue
697 * @q: request_queue of interest
698 * @destroy_root: whether to destroy root blkg or not
700 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
701 * destroyed; otherwise, root blkg is left alone.
703 void blkg_destroy_all(struct request_queue *q, bool destroy_root)
705 struct blkio_group *blkg, *n;
707 spin_lock_irq(q->queue_lock);
709 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
710 struct blkio_cgroup *blkcg = blkg->blkcg;
713 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
716 spin_lock(&blkcg->lock);
718 spin_unlock(&blkcg->lock);
721 spin_unlock_irq(q->queue_lock);
723 EXPORT_SYMBOL_GPL(blkg_destroy_all);
725 static void blkg_rcu_free(struct rcu_head *rcu_head)
727 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
730 void __blkg_release(struct blkio_group *blkg)
732 /* release the extra blkcg reference this blkg has been holding */
733 css_put(&blkg->blkcg->css);
736 * A group is freed in rcu manner. But having an rcu lock does not
737 * mean that one can access all the fields of blkg and assume these
738 * are valid. For example, don't try to follow throtl_data and
739 * request queue links.
741 * Having a reference to blkg under an rcu allows acess to only
742 * values local to groups like group stats and group rate limits
744 call_rcu(&blkg->rcu_head, blkg_rcu_free);
746 EXPORT_SYMBOL_GPL(__blkg_release);
748 static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
750 struct blkg_policy_data *pd = blkg->pd[plid];
753 if (pd->stats_cpu == NULL)
756 for_each_possible_cpu(cpu) {
757 struct blkio_group_stats_cpu *sc =
758 per_cpu_ptr(pd->stats_cpu, cpu);
761 memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
766 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
768 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
769 struct blkio_group *blkg;
770 struct hlist_node *n;
773 spin_lock(&blkio_list_lock);
774 spin_lock_irq(&blkcg->lock);
777 * Note that stat reset is racy - it doesn't synchronize against
778 * stat updates. This is a debug feature which shouldn't exist
779 * anyway. If you get hit by a race, retry.
781 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
782 struct blkio_policy_type *pol;
784 list_for_each_entry(pol, &blkio_list, list) {
785 struct blkg_policy_data *pd = blkg->pd[pol->plid];
786 struct blkio_group_stats *stats = &pd->stats;
788 /* queued stats shouldn't be cleared */
789 for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
790 if (i != BLKIO_STAT_QUEUED)
791 memset(stats->stat_arr[i], 0,
792 sizeof(stats->stat_arr[i]));
794 #ifdef CONFIG_DEBUG_BLK_CGROUP
795 memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
796 BLKG_STATS_DEBUG_CLEAR_SIZE);
798 blkio_reset_stats_cpu(blkg, pol->plid);
802 spin_unlock_irq(&blkcg->lock);
803 spin_unlock(&blkio_list_lock);
807 static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
808 char *str, int chars_left, bool diskname_only)
810 snprintf(str, chars_left, "%s", dname);
811 chars_left -= strlen(str);
812 if (chars_left <= 0) {
814 "Possibly incorrect cgroup stat display format");
820 case BLKIO_STAT_READ:
821 strlcat(str, " Read", chars_left);
823 case BLKIO_STAT_WRITE:
824 strlcat(str, " Write", chars_left);
826 case BLKIO_STAT_SYNC:
827 strlcat(str, " Sync", chars_left);
829 case BLKIO_STAT_ASYNC:
830 strlcat(str, " Async", chars_left);
832 case BLKIO_STAT_TOTAL:
833 strlcat(str, " Total", chars_left);
836 strlcat(str, " Invalid", chars_left);
840 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
841 enum stat_type_cpu type, enum stat_sub_type sub_type)
843 struct blkg_policy_data *pd = blkg->pd[plid];
845 struct blkio_group_stats_cpu *stats_cpu;
848 if (pd->stats_cpu == NULL)
851 for_each_possible_cpu(cpu) {
853 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
856 start = u64_stats_fetch_begin(&stats_cpu->syncp);
857 if (type == BLKIO_STAT_CPU_SECTORS)
858 tval = stats_cpu->sectors;
860 tval = stats_cpu->stat_arr_cpu[type][sub_type];
861 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
869 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
870 struct cgroup_map_cb *cb, const char *dname,
871 enum stat_type_cpu type)
873 uint64_t disk_total, val;
874 char key_str[MAX_KEY_LEN];
875 enum stat_sub_type sub_type;
877 if (type == BLKIO_STAT_CPU_SECTORS) {
878 val = blkio_read_stat_cpu(blkg, plid, type, 0);
879 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
880 cb->fill(cb, key_str, val);
884 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
886 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
888 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
889 cb->fill(cb, key_str, val);
892 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
893 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
895 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
897 cb->fill(cb, key_str, disk_total);
901 static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
902 struct cgroup_map_cb *cb, const char *dname,
905 struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
906 uint64_t v = 0, disk_total = 0;
907 char key_str[MAX_KEY_LEN];
908 unsigned int sync_start;
911 if (type >= BLKIO_STAT_ARR_NR) {
913 sync_start = u64_stats_fetch_begin(&stats->syncp);
915 case BLKIO_STAT_TIME:
918 #ifdef CONFIG_DEBUG_BLK_CGROUP
919 case BLKIO_STAT_UNACCOUNTED_TIME:
920 v = stats->unaccounted_time;
922 case BLKIO_STAT_AVG_QUEUE_SIZE: {
923 uint64_t samples = stats->avg_queue_size_samples;
926 v = stats->avg_queue_size_sum;
931 case BLKIO_STAT_IDLE_TIME:
932 v = stats->idle_time;
934 case BLKIO_STAT_EMPTY_TIME:
935 v = stats->empty_time;
937 case BLKIO_STAT_DEQUEUE:
940 case BLKIO_STAT_GROUP_WAIT_TIME:
941 v = stats->group_wait_time;
947 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
949 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
950 cb->fill(cb, key_str, v);
954 for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
956 sync_start = u64_stats_fetch_begin(&stats->syncp);
957 v = stats->stat_arr[type][st];
958 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
960 blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
961 cb->fill(cb, key_str, v);
962 if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE)
966 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
968 cb->fill(cb, key_str, disk_total);
972 static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
973 int fileid, struct blkio_cgroup *blkcg)
975 struct gendisk *disk = NULL;
976 struct blkio_group *blkg = NULL;
977 struct blkg_policy_data *pd;
978 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
979 unsigned long major, minor;
980 int i = 0, ret = -EINVAL;
985 memset(s, 0, sizeof(s));
987 while ((p = strsep(&buf, " ")) != NULL) {
993 /* Prevent from inputing too many things */
1001 p = strsep(&s[0], ":");
1011 if (strict_strtoul(major_s, 10, &major))
1014 if (strict_strtoul(minor_s, 10, &minor))
1017 dev = MKDEV(major, minor);
1019 if (strict_strtoull(s[1], 10, &temp))
1022 disk = get_gendisk(dev, &part);
1028 spin_lock_irq(disk->queue->queue_lock);
1029 blkg = blkg_lookup_create(blkcg, disk->queue, false);
1030 spin_unlock_irq(disk->queue->queue_lock);
1033 ret = PTR_ERR(blkg);
1037 pd = blkg->pd[plid];
1040 case BLKIO_POLICY_PROP:
1041 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1042 temp > BLKIO_WEIGHT_MAX)
1045 pd->conf.weight = temp;
1046 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
1048 case BLKIO_POLICY_THROTL:
1050 case BLKIO_THROTL_read_bps_device:
1051 pd->conf.bps[READ] = temp;
1052 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1054 case BLKIO_THROTL_write_bps_device:
1055 pd->conf.bps[WRITE] = temp;
1056 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1058 case BLKIO_THROTL_read_iops_device:
1059 if (temp > THROTL_IOPS_MAX)
1061 pd->conf.iops[READ] = temp;
1062 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1064 case BLKIO_THROTL_write_iops_device:
1065 if (temp > THROTL_IOPS_MAX)
1067 pd->conf.iops[WRITE] = temp;
1068 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1082 * If queue was bypassing, we should retry. Do so after a short
1083 * msleep(). It isn't strictly necessary but queue can be
1084 * bypassing for some time and it's always nice to avoid busy
1087 if (ret == -EBUSY) {
1089 return restart_syscall();
1094 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1099 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1100 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1101 int fileid = BLKIOFILE_ATTR(cft->private);
1103 buf = kstrdup(buffer, GFP_KERNEL);
1107 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
1112 static const char *blkg_dev_name(struct blkio_group *blkg)
1114 /* some drivers (floppy) instantiate a queue w/o disk registered */
1115 if (blkg->q->backing_dev_info.dev)
1116 return dev_name(blkg->q->backing_dev_info.dev);
1120 static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1123 int plid = BLKIOFILE_POLICY(cft->private);
1124 int fileid = BLKIOFILE_ATTR(cft->private);
1125 struct blkg_policy_data *pd = blkg->pd[plid];
1126 const char *dname = blkg_dev_name(blkg);
1133 case BLKIO_POLICY_PROP:
1134 if (pd->conf.weight)
1135 seq_printf(m, "%s\t%u\n",
1136 dname, pd->conf.weight);
1138 case BLKIO_POLICY_THROTL:
1140 case BLKIO_THROTL_read_bps_device:
1142 case BLKIO_THROTL_write_bps_device:
1143 if (pd->conf.bps[rw])
1144 seq_printf(m, "%s\t%llu\n",
1145 dname, pd->conf.bps[rw]);
1147 case BLKIO_THROTL_read_iops_device:
1149 case BLKIO_THROTL_write_iops_device:
1150 if (pd->conf.iops[rw])
1151 seq_printf(m, "%s\t%u\n",
1152 dname, pd->conf.iops[rw]);
1161 /* cgroup files which read their data from policy nodes end up here */
1162 static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1165 struct blkio_group *blkg;
1166 struct hlist_node *n;
1168 spin_lock_irq(&blkcg->lock);
1169 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1170 blkio_print_group_conf(cft, blkg, m);
1171 spin_unlock_irq(&blkcg->lock);
1174 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1177 struct blkio_cgroup *blkcg;
1178 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1179 int name = BLKIOFILE_ATTR(cft->private);
1181 blkcg = cgroup_to_blkio_cgroup(cgrp);
1184 case BLKIO_POLICY_PROP:
1186 case BLKIO_PROP_weight_device:
1187 blkio_read_conf(cft, blkcg, m);
1193 case BLKIO_POLICY_THROTL:
1195 case BLKIO_THROTL_read_bps_device:
1196 case BLKIO_THROTL_write_bps_device:
1197 case BLKIO_THROTL_read_iops_device:
1198 case BLKIO_THROTL_write_iops_device:
1199 blkio_read_conf(cft, blkcg, m);
1212 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1213 struct cftype *cft, struct cgroup_map_cb *cb,
1214 enum stat_type type, bool show_total, bool pcpu)
1216 struct blkio_group *blkg;
1217 struct hlist_node *n;
1218 uint64_t cgroup_total = 0;
1220 spin_lock_irq(&blkcg->lock);
1222 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1223 const char *dname = blkg_dev_name(blkg);
1224 int plid = BLKIOFILE_POLICY(cft->private);
1229 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1232 cgroup_total += blkio_get_stat(blkg, plid,
1236 cb->fill(cb, "Total", cgroup_total);
1238 spin_unlock_irq(&blkcg->lock);
1242 /* All map kind of cgroup file get serviced by this function */
1243 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1244 struct cgroup_map_cb *cb)
1246 struct blkio_cgroup *blkcg;
1247 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1248 int name = BLKIOFILE_ATTR(cft->private);
1250 blkcg = cgroup_to_blkio_cgroup(cgrp);
1253 case BLKIO_POLICY_PROP:
1255 case BLKIO_PROP_time:
1256 return blkio_read_blkg_stats(blkcg, cft, cb,
1257 BLKIO_STAT_TIME, 0, 0);
1258 case BLKIO_PROP_sectors:
1259 return blkio_read_blkg_stats(blkcg, cft, cb,
1260 BLKIO_STAT_CPU_SECTORS, 0, 1);
1261 case BLKIO_PROP_io_service_bytes:
1262 return blkio_read_blkg_stats(blkcg, cft, cb,
1263 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1264 case BLKIO_PROP_io_serviced:
1265 return blkio_read_blkg_stats(blkcg, cft, cb,
1266 BLKIO_STAT_CPU_SERVICED, 1, 1);
1267 case BLKIO_PROP_io_service_time:
1268 return blkio_read_blkg_stats(blkcg, cft, cb,
1269 BLKIO_STAT_SERVICE_TIME, 1, 0);
1270 case BLKIO_PROP_io_wait_time:
1271 return blkio_read_blkg_stats(blkcg, cft, cb,
1272 BLKIO_STAT_WAIT_TIME, 1, 0);
1273 case BLKIO_PROP_io_merged:
1274 return blkio_read_blkg_stats(blkcg, cft, cb,
1275 BLKIO_STAT_MERGED, 1, 0);
1276 case BLKIO_PROP_io_queued:
1277 return blkio_read_blkg_stats(blkcg, cft, cb,
1278 BLKIO_STAT_QUEUED, 1, 0);
1279 #ifdef CONFIG_DEBUG_BLK_CGROUP
1280 case BLKIO_PROP_unaccounted_time:
1281 return blkio_read_blkg_stats(blkcg, cft, cb,
1282 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1283 case BLKIO_PROP_dequeue:
1284 return blkio_read_blkg_stats(blkcg, cft, cb,
1285 BLKIO_STAT_DEQUEUE, 0, 0);
1286 case BLKIO_PROP_avg_queue_size:
1287 return blkio_read_blkg_stats(blkcg, cft, cb,
1288 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1289 case BLKIO_PROP_group_wait_time:
1290 return blkio_read_blkg_stats(blkcg, cft, cb,
1291 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1292 case BLKIO_PROP_idle_time:
1293 return blkio_read_blkg_stats(blkcg, cft, cb,
1294 BLKIO_STAT_IDLE_TIME, 0, 0);
1295 case BLKIO_PROP_empty_time:
1296 return blkio_read_blkg_stats(blkcg, cft, cb,
1297 BLKIO_STAT_EMPTY_TIME, 0, 0);
1303 case BLKIO_POLICY_THROTL:
1305 case BLKIO_THROTL_io_service_bytes:
1306 return blkio_read_blkg_stats(blkcg, cft, cb,
1307 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1308 case BLKIO_THROTL_io_serviced:
1309 return blkio_read_blkg_stats(blkcg, cft, cb,
1310 BLKIO_STAT_CPU_SERVICED, 1, 1);
1322 static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1324 struct blkio_group *blkg;
1325 struct hlist_node *n;
1327 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1330 spin_lock(&blkio_list_lock);
1331 spin_lock_irq(&blkcg->lock);
1332 blkcg->weight = (unsigned int)val;
1334 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1335 struct blkg_policy_data *pd = blkg->pd[plid];
1337 if (!pd->conf.weight)
1338 blkio_update_group_weight(blkg, plid, blkcg->weight);
1341 spin_unlock_irq(&blkcg->lock);
1342 spin_unlock(&blkio_list_lock);
1346 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1347 struct blkio_cgroup *blkcg;
1348 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1349 int name = BLKIOFILE_ATTR(cft->private);
1351 blkcg = cgroup_to_blkio_cgroup(cgrp);
1354 case BLKIO_POLICY_PROP:
1356 case BLKIO_PROP_weight:
1357 return (u64)blkcg->weight;
1367 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1369 struct blkio_cgroup *blkcg;
1370 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1371 int name = BLKIOFILE_ATTR(cft->private);
1373 blkcg = cgroup_to_blkio_cgroup(cgrp);
1376 case BLKIO_POLICY_PROP:
1378 case BLKIO_PROP_weight:
1379 return blkio_weight_write(blkcg, plid, val);
1389 struct cftype blkio_files[] = {
1391 .name = "weight_device",
1392 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1393 BLKIO_PROP_weight_device),
1394 .read_seq_string = blkiocg_file_read,
1395 .write_string = blkiocg_file_write,
1396 .max_write_len = 256,
1400 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1402 .read_u64 = blkiocg_file_read_u64,
1403 .write_u64 = blkiocg_file_write_u64,
1407 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1409 .read_map = blkiocg_file_read_map,
1413 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1414 BLKIO_PROP_sectors),
1415 .read_map = blkiocg_file_read_map,
1418 .name = "io_service_bytes",
1419 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1420 BLKIO_PROP_io_service_bytes),
1421 .read_map = blkiocg_file_read_map,
1424 .name = "io_serviced",
1425 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1426 BLKIO_PROP_io_serviced),
1427 .read_map = blkiocg_file_read_map,
1430 .name = "io_service_time",
1431 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1432 BLKIO_PROP_io_service_time),
1433 .read_map = blkiocg_file_read_map,
1436 .name = "io_wait_time",
1437 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1438 BLKIO_PROP_io_wait_time),
1439 .read_map = blkiocg_file_read_map,
1442 .name = "io_merged",
1443 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1444 BLKIO_PROP_io_merged),
1445 .read_map = blkiocg_file_read_map,
1448 .name = "io_queued",
1449 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1450 BLKIO_PROP_io_queued),
1451 .read_map = blkiocg_file_read_map,
1454 .name = "reset_stats",
1455 .write_u64 = blkiocg_reset_stats,
1457 #ifdef CONFIG_BLK_DEV_THROTTLING
1459 .name = "throttle.read_bps_device",
1460 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1461 BLKIO_THROTL_read_bps_device),
1462 .read_seq_string = blkiocg_file_read,
1463 .write_string = blkiocg_file_write,
1464 .max_write_len = 256,
1468 .name = "throttle.write_bps_device",
1469 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1470 BLKIO_THROTL_write_bps_device),
1471 .read_seq_string = blkiocg_file_read,
1472 .write_string = blkiocg_file_write,
1473 .max_write_len = 256,
1477 .name = "throttle.read_iops_device",
1478 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1479 BLKIO_THROTL_read_iops_device),
1480 .read_seq_string = blkiocg_file_read,
1481 .write_string = blkiocg_file_write,
1482 .max_write_len = 256,
1486 .name = "throttle.write_iops_device",
1487 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1488 BLKIO_THROTL_write_iops_device),
1489 .read_seq_string = blkiocg_file_read,
1490 .write_string = blkiocg_file_write,
1491 .max_write_len = 256,
1494 .name = "throttle.io_service_bytes",
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1496 BLKIO_THROTL_io_service_bytes),
1497 .read_map = blkiocg_file_read_map,
1500 .name = "throttle.io_serviced",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1502 BLKIO_THROTL_io_serviced),
1503 .read_map = blkiocg_file_read_map,
1505 #endif /* CONFIG_BLK_DEV_THROTTLING */
1507 #ifdef CONFIG_DEBUG_BLK_CGROUP
1509 .name = "avg_queue_size",
1510 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1511 BLKIO_PROP_avg_queue_size),
1512 .read_map = blkiocg_file_read_map,
1515 .name = "group_wait_time",
1516 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1517 BLKIO_PROP_group_wait_time),
1518 .read_map = blkiocg_file_read_map,
1521 .name = "idle_time",
1522 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1523 BLKIO_PROP_idle_time),
1524 .read_map = blkiocg_file_read_map,
1527 .name = "empty_time",
1528 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1529 BLKIO_PROP_empty_time),
1530 .read_map = blkiocg_file_read_map,
1534 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1535 BLKIO_PROP_dequeue),
1536 .read_map = blkiocg_file_read_map,
1539 .name = "unaccounted_time",
1540 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1541 BLKIO_PROP_unaccounted_time),
1542 .read_map = blkiocg_file_read_map,
1549 * blkiocg_pre_destroy - cgroup pre_destroy callback
1550 * @cgroup: cgroup of interest
1552 * This function is called when @cgroup is about to go away and responsible
1553 * for shooting down all blkgs associated with @cgroup. blkgs should be
1554 * removed while holding both q and blkcg locks. As blkcg lock is nested
1555 * inside q lock, this function performs reverse double lock dancing.
1557 * This is the blkcg counterpart of ioc_release_fn().
1559 static int blkiocg_pre_destroy(struct cgroup *cgroup)
1561 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1563 spin_lock_irq(&blkcg->lock);
1565 while (!hlist_empty(&blkcg->blkg_list)) {
1566 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1567 struct blkio_group, blkcg_node);
1568 struct request_queue *q = blkg->q;
1570 if (spin_trylock(q->queue_lock)) {
1572 spin_unlock(q->queue_lock);
1574 spin_unlock_irq(&blkcg->lock);
1576 spin_lock_irq(&blkcg->lock);
1580 spin_unlock_irq(&blkcg->lock);
1584 static void blkiocg_destroy(struct cgroup *cgroup)
1586 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1588 if (blkcg != &blkio_root_cgroup)
1592 static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
1594 static atomic64_t id_seq = ATOMIC64_INIT(0);
1595 struct blkio_cgroup *blkcg;
1596 struct cgroup *parent = cgroup->parent;
1599 blkcg = &blkio_root_cgroup;
1603 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1605 return ERR_PTR(-ENOMEM);
1607 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1608 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
1610 spin_lock_init(&blkcg->lock);
1611 INIT_HLIST_HEAD(&blkcg->blkg_list);
1617 * blkcg_init_queue - initialize blkcg part of request queue
1618 * @q: request_queue to initialize
1620 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1621 * part of new request_queue @q.
1624 * 0 on success, -errno on failure.
1626 int blkcg_init_queue(struct request_queue *q)
1632 ret = blk_throtl_init(q);
1636 mutex_lock(&all_q_mutex);
1637 INIT_LIST_HEAD(&q->all_q_node);
1638 list_add_tail(&q->all_q_node, &all_q_list);
1639 mutex_unlock(&all_q_mutex);
1645 * blkcg_drain_queue - drain blkcg part of request_queue
1646 * @q: request_queue to drain
1648 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1650 void blkcg_drain_queue(struct request_queue *q)
1652 lockdep_assert_held(q->queue_lock);
1654 blk_throtl_drain(q);
1658 * blkcg_exit_queue - exit and release blkcg part of request_queue
1659 * @q: request_queue being released
1661 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1663 void blkcg_exit_queue(struct request_queue *q)
1665 mutex_lock(&all_q_mutex);
1666 list_del_init(&q->all_q_node);
1667 mutex_unlock(&all_q_mutex);
1669 blkg_destroy_all(q, true);
1675 * We cannot support shared io contexts, as we have no mean to support
1676 * two tasks with the same ioc in two different groups without major rework
1677 * of the main cic data structures. For now we allow a task to change
1678 * its cgroup only if it's the only owner of its ioc.
1680 static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1682 struct task_struct *task;
1683 struct io_context *ioc;
1686 /* task_lock() is needed to avoid races with exit_io_context() */
1687 cgroup_taskset_for_each(task, cgrp, tset) {
1689 ioc = task->io_context;
1690 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1699 static void blkcg_bypass_start(void)
1700 __acquires(&all_q_mutex)
1702 struct request_queue *q;
1704 mutex_lock(&all_q_mutex);
1706 list_for_each_entry(q, &all_q_list, all_q_node) {
1707 blk_queue_bypass_start(q);
1708 blkg_destroy_all(q, false);
1712 static void blkcg_bypass_end(void)
1713 __releases(&all_q_mutex)
1715 struct request_queue *q;
1717 list_for_each_entry(q, &all_q_list, all_q_node)
1718 blk_queue_bypass_end(q);
1720 mutex_unlock(&all_q_mutex);
1723 struct cgroup_subsys blkio_subsys = {
1725 .create = blkiocg_create,
1726 .can_attach = blkiocg_can_attach,
1727 .pre_destroy = blkiocg_pre_destroy,
1728 .destroy = blkiocg_destroy,
1729 .subsys_id = blkio_subsys_id,
1730 .base_cftypes = blkio_files,
1731 .module = THIS_MODULE,
1733 EXPORT_SYMBOL_GPL(blkio_subsys);
1735 void blkio_policy_register(struct blkio_policy_type *blkiop)
1737 struct request_queue *q;
1739 blkcg_bypass_start();
1740 spin_lock(&blkio_list_lock);
1742 BUG_ON(blkio_policy[blkiop->plid]);
1743 blkio_policy[blkiop->plid] = blkiop;
1744 list_add_tail(&blkiop->list, &blkio_list);
1746 spin_unlock(&blkio_list_lock);
1747 list_for_each_entry(q, &all_q_list, all_q_node)
1748 update_root_blkg_pd(q, blkiop->plid);
1751 EXPORT_SYMBOL_GPL(blkio_policy_register);
1753 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1755 struct request_queue *q;
1757 blkcg_bypass_start();
1758 spin_lock(&blkio_list_lock);
1760 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1761 blkio_policy[blkiop->plid] = NULL;
1762 list_del_init(&blkiop->list);
1764 spin_unlock(&blkio_list_lock);
1765 list_for_each_entry(q, &all_q_list, all_q_node)
1766 update_root_blkg_pd(q, blkiop->plid);
1769 EXPORT_SYMBOL_GPL(blkio_policy_unregister);