2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34 static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
35 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
38 /* for encoding cft->private value on file */
39 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
40 /* What policy owns the file, proportional or throttle */
41 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
42 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
44 struct cgroup_subsys blkio_subsys = {
46 .create = blkiocg_create,
47 .can_attach_task = blkiocg_can_attach_task,
48 .attach_task = blkiocg_attach_task,
49 .destroy = blkiocg_destroy,
50 .populate = blkiocg_populate,
51 #ifdef CONFIG_BLK_CGROUP
52 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
53 .subsys_id = blkio_subsys_id,
56 .module = THIS_MODULE,
58 EXPORT_SYMBOL_GPL(blkio_subsys);
60 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
61 struct blkio_policy_node *pn)
63 list_add(&pn->node, &blkcg->policy_list);
66 static inline bool cftype_blkg_same_policy(struct cftype *cft,
67 struct blkio_group *blkg)
69 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
71 if (blkg->plid == plid)
77 /* Determines if policy node matches cgroup file being accessed */
78 static inline bool pn_matches_cftype(struct cftype *cft,
79 struct blkio_policy_node *pn)
81 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
82 int fileid = BLKIOFILE_ATTR(cft->private);
84 return (plid == pn->plid && fileid == pn->fileid);
87 /* Must be called with blkcg->lock held */
88 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
93 /* Must be called with blkcg->lock held */
94 static struct blkio_policy_node *
95 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
96 enum blkio_policy_id plid, int fileid)
98 struct blkio_policy_node *pn;
100 list_for_each_entry(pn, &blkcg->policy_list, node) {
101 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
108 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
110 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
111 struct blkio_cgroup, css);
113 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
115 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
117 return container_of(task_subsys_state(tsk, blkio_subsys_id),
118 struct blkio_cgroup, css);
120 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
125 struct blkio_policy_type *blkiop;
127 list_for_each_entry(blkiop, &blkio_list, list) {
128 /* If this policy does not own the blkg, do not send updates */
129 if (blkiop->plid != blkg->plid)
131 if (blkiop->ops.blkio_update_group_weight_fn)
132 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
137 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
140 struct blkio_policy_type *blkiop;
142 list_for_each_entry(blkiop, &blkio_list, list) {
144 /* If this policy does not own the blkg, do not send updates */
145 if (blkiop->plid != blkg->plid)
148 if (fileid == BLKIO_THROTL_read_bps_device
149 && blkiop->ops.blkio_update_group_read_bps_fn)
150 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
153 if (fileid == BLKIO_THROTL_write_bps_device
154 && blkiop->ops.blkio_update_group_write_bps_fn)
155 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
160 static inline void blkio_update_group_iops(struct blkio_group *blkg,
161 unsigned int iops, int fileid)
163 struct blkio_policy_type *blkiop;
165 list_for_each_entry(blkiop, &blkio_list, list) {
167 /* If this policy does not own the blkg, do not send updates */
168 if (blkiop->plid != blkg->plid)
171 if (fileid == BLKIO_THROTL_read_iops_device
172 && blkiop->ops.blkio_update_group_read_iops_fn)
173 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
176 if (fileid == BLKIO_THROTL_write_iops_device
177 && blkiop->ops.blkio_update_group_write_iops_fn)
178 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
184 * Add to the appropriate stat variable depending on the request type.
185 * This should be called with the blkg->stats_lock held.
187 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
191 stat[BLKIO_STAT_WRITE] += add;
193 stat[BLKIO_STAT_READ] += add;
195 stat[BLKIO_STAT_SYNC] += add;
197 stat[BLKIO_STAT_ASYNC] += add;
201 * Decrements the appropriate stat variable if non-zero depending on the
202 * request type. Panics on value being zero.
203 * This should be called with the blkg->stats_lock held.
205 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
208 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
209 stat[BLKIO_STAT_WRITE]--;
211 BUG_ON(stat[BLKIO_STAT_READ] == 0);
212 stat[BLKIO_STAT_READ]--;
215 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
216 stat[BLKIO_STAT_SYNC]--;
218 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
219 stat[BLKIO_STAT_ASYNC]--;
223 #ifdef CONFIG_DEBUG_BLK_CGROUP
224 /* This should be called with the blkg->stats_lock held. */
225 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
226 struct blkio_group *curr_blkg)
228 if (blkio_blkg_waiting(&blkg->stats))
230 if (blkg == curr_blkg)
232 blkg->stats.start_group_wait_time = sched_clock();
233 blkio_mark_blkg_waiting(&blkg->stats);
236 /* This should be called with the blkg->stats_lock held. */
237 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
239 unsigned long long now;
241 if (!blkio_blkg_waiting(stats))
245 if (time_after64(now, stats->start_group_wait_time))
246 stats->group_wait_time += now - stats->start_group_wait_time;
247 blkio_clear_blkg_waiting(stats);
250 /* This should be called with the blkg->stats_lock held. */
251 static void blkio_end_empty_time(struct blkio_group_stats *stats)
253 unsigned long long now;
255 if (!blkio_blkg_empty(stats))
259 if (time_after64(now, stats->start_empty_time))
260 stats->empty_time += now - stats->start_empty_time;
261 blkio_clear_blkg_empty(stats);
264 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 BUG_ON(blkio_blkg_idling(&blkg->stats));
270 blkg->stats.start_idle_time = sched_clock();
271 blkio_mark_blkg_idling(&blkg->stats);
272 spin_unlock_irqrestore(&blkg->stats_lock, flags);
274 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
276 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
279 unsigned long long now;
280 struct blkio_group_stats *stats;
282 spin_lock_irqsave(&blkg->stats_lock, flags);
283 stats = &blkg->stats;
284 if (blkio_blkg_idling(stats)) {
286 if (time_after64(now, stats->start_idle_time))
287 stats->idle_time += now - stats->start_idle_time;
288 blkio_clear_blkg_idling(stats);
290 spin_unlock_irqrestore(&blkg->stats_lock, flags);
292 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
294 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
297 struct blkio_group_stats *stats;
299 spin_lock_irqsave(&blkg->stats_lock, flags);
300 stats = &blkg->stats;
301 stats->avg_queue_size_sum +=
302 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
303 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
304 stats->avg_queue_size_samples++;
305 blkio_update_group_wait_time(stats);
306 spin_unlock_irqrestore(&blkg->stats_lock, flags);
308 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
310 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
313 struct blkio_group_stats *stats;
315 spin_lock_irqsave(&blkg->stats_lock, flags);
316 stats = &blkg->stats;
318 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
319 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
320 spin_unlock_irqrestore(&blkg->stats_lock, flags);
325 * group is already marked empty. This can happen if cfqq got new
326 * request in parent group and moved to this group while being added
327 * to service tree. Just ignore the event and move on.
329 if(blkio_blkg_empty(stats)) {
330 spin_unlock_irqrestore(&blkg->stats_lock, flags);
334 stats->start_empty_time = sched_clock();
335 blkio_mark_blkg_empty(stats);
336 spin_unlock_irqrestore(&blkg->stats_lock, flags);
338 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
340 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
341 unsigned long dequeue)
343 blkg->stats.dequeue += dequeue;
345 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
347 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
348 struct blkio_group *curr_blkg) {}
349 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
352 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
353 struct blkio_group *curr_blkg, bool direction,
358 spin_lock_irqsave(&blkg->stats_lock, flags);
359 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
361 blkio_end_empty_time(&blkg->stats);
362 blkio_set_start_group_wait_time(blkg, curr_blkg);
363 spin_unlock_irqrestore(&blkg->stats_lock, flags);
365 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
367 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
368 bool direction, bool sync)
372 spin_lock_irqsave(&blkg->stats_lock, flags);
373 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
375 spin_unlock_irqrestore(&blkg->stats_lock, flags);
377 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
379 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
380 unsigned long unaccounted_time)
384 spin_lock_irqsave(&blkg->stats_lock, flags);
385 blkg->stats.time += time;
386 #ifdef CONFIG_DEBUG_BLK_CGROUP
387 blkg->stats.unaccounted_time += unaccounted_time;
389 spin_unlock_irqrestore(&blkg->stats_lock, flags);
391 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
394 * should be called under rcu read lock or queue lock to make sure blkg pointer
397 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
398 uint64_t bytes, bool direction, bool sync)
400 struct blkio_group_stats_cpu *stats_cpu;
404 * Disabling interrupts to provide mutual exclusion between two
405 * writes on same cpu. It probably is not needed for 64bit. Not
406 * optimizing that case yet.
408 local_irq_save(flags);
410 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
412 u64_stats_update_begin(&stats_cpu->syncp);
413 stats_cpu->sectors += bytes >> 9;
414 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 bytes, direction, sync);
418 u64_stats_update_end(&stats_cpu->syncp);
419 local_irq_restore(flags);
421 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
423 void blkiocg_update_completion_stats(struct blkio_group *blkg,
424 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
426 struct blkio_group_stats *stats;
428 unsigned long long now = sched_clock();
430 spin_lock_irqsave(&blkg->stats_lock, flags);
431 stats = &blkg->stats;
432 if (time_after64(now, io_start_time))
433 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
434 now - io_start_time, direction, sync);
435 if (time_after64(io_start_time, start_time))
436 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
437 io_start_time - start_time, direction, sync);
438 spin_unlock_irqrestore(&blkg->stats_lock, flags);
440 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
442 /* Merged stats are per cpu. */
443 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
446 struct blkio_group_stats_cpu *stats_cpu;
450 * Disabling interrupts to provide mutual exclusion between two
451 * writes on same cpu. It probably is not needed for 64bit. Not
452 * optimizing that case yet.
454 local_irq_save(flags);
456 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
458 u64_stats_update_begin(&stats_cpu->syncp);
459 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
461 u64_stats_update_end(&stats_cpu->syncp);
462 local_irq_restore(flags);
464 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
467 * This function allocates the per cpu stats for blkio_group. Should be called
468 * from sleepable context as alloc_per_cpu() requires that.
470 int blkio_alloc_blkg_stats(struct blkio_group *blkg)
472 /* Allocate memory for per cpu stats */
473 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 if (!blkg->stats_cpu)
478 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
480 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
481 struct blkio_group *blkg, void *key, dev_t dev,
482 enum blkio_policy_id plid)
486 spin_lock_irqsave(&blkcg->lock, flags);
487 spin_lock_init(&blkg->stats_lock);
488 rcu_assign_pointer(blkg->key, key);
489 blkg->blkcg_id = css_id(&blkcg->css);
490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
492 spin_unlock_irqrestore(&blkcg->lock, flags);
493 /* Need to take css reference ? */
494 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
497 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
499 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
501 hlist_del_init_rcu(&blkg->blkcg_node);
506 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507 * indicating that blk_group was unhashed by the time we got to it.
509 int blkiocg_del_blkio_group(struct blkio_group *blkg)
511 struct blkio_cgroup *blkcg;
513 struct cgroup_subsys_state *css;
517 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
519 blkcg = container_of(css, struct blkio_cgroup, css);
520 spin_lock_irqsave(&blkcg->lock, flags);
521 if (!hlist_unhashed(&blkg->blkcg_node)) {
522 __blkiocg_del_blkio_group(blkg);
525 spin_unlock_irqrestore(&blkcg->lock, flags);
531 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
533 /* called under rcu_read_lock(). */
534 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
536 struct blkio_group *blkg;
537 struct hlist_node *n;
540 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
548 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
550 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
552 struct blkio_group_stats_cpu *stats_cpu;
555 * Note: On 64 bit arch this should not be an issue. This has the
556 * possibility of returning some inconsistent value on 32bit arch
557 * as 64bit update on 32bit is non atomic. Taking care of this
558 * corner case makes code very complicated, like sending IPIs to
559 * cpus, taking care of stats of offline cpus etc.
561 * reset stats is anyway more of a debug feature and this sounds a
562 * corner case. So I am not complicating the code yet until and
563 * unless this becomes a real issue.
565 for_each_possible_cpu(i) {
566 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
567 stats_cpu->sectors = 0;
568 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
569 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
570 stats_cpu->stat_arr_cpu[j][k] = 0;
575 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
577 struct blkio_cgroup *blkcg;
578 struct blkio_group *blkg;
579 struct blkio_group_stats *stats;
580 struct hlist_node *n;
581 uint64_t queued[BLKIO_STAT_TOTAL];
583 #ifdef CONFIG_DEBUG_BLK_CGROUP
584 bool idling, waiting, empty;
585 unsigned long long now = sched_clock();
588 blkcg = cgroup_to_blkio_cgroup(cgroup);
589 spin_lock_irq(&blkcg->lock);
590 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
591 spin_lock(&blkg->stats_lock);
592 stats = &blkg->stats;
593 #ifdef CONFIG_DEBUG_BLK_CGROUP
594 idling = blkio_blkg_idling(stats);
595 waiting = blkio_blkg_waiting(stats);
596 empty = blkio_blkg_empty(stats);
598 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
599 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
600 memset(stats, 0, sizeof(struct blkio_group_stats));
601 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
602 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
603 #ifdef CONFIG_DEBUG_BLK_CGROUP
605 blkio_mark_blkg_idling(stats);
606 stats->start_idle_time = now;
609 blkio_mark_blkg_waiting(stats);
610 stats->start_group_wait_time = now;
613 blkio_mark_blkg_empty(stats);
614 stats->start_empty_time = now;
617 spin_unlock(&blkg->stats_lock);
619 /* Reset Per cpu stats which don't take blkg->stats_lock */
620 blkio_reset_stats_cpu(blkg);
623 spin_unlock_irq(&blkcg->lock);
627 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
628 int chars_left, bool diskname_only)
630 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
631 chars_left -= strlen(str);
632 if (chars_left <= 0) {
634 "Possibly incorrect cgroup stat display format");
640 case BLKIO_STAT_READ:
641 strlcat(str, " Read", chars_left);
643 case BLKIO_STAT_WRITE:
644 strlcat(str, " Write", chars_left);
646 case BLKIO_STAT_SYNC:
647 strlcat(str, " Sync", chars_left);
649 case BLKIO_STAT_ASYNC:
650 strlcat(str, " Async", chars_left);
652 case BLKIO_STAT_TOTAL:
653 strlcat(str, " Total", chars_left);
656 strlcat(str, " Invalid", chars_left);
660 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
661 struct cgroup_map_cb *cb, dev_t dev)
663 blkio_get_key_name(0, dev, str, chars_left, true);
664 cb->fill(cb, str, val);
669 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
670 enum stat_type_cpu type, enum stat_sub_type sub_type)
673 struct blkio_group_stats_cpu *stats_cpu;
676 for_each_possible_cpu(cpu) {
678 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
681 start = u64_stats_fetch_begin(&stats_cpu->syncp);
682 if (type == BLKIO_STAT_CPU_SECTORS)
683 tval = stats_cpu->sectors;
685 tval = stats_cpu->stat_arr_cpu[type][sub_type];
686 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
694 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
695 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
697 uint64_t disk_total, val;
698 char key_str[MAX_KEY_LEN];
699 enum stat_sub_type sub_type;
701 if (type == BLKIO_STAT_CPU_SECTORS) {
702 val = blkio_read_stat_cpu(blkg, type, 0);
703 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
706 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
708 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
709 val = blkio_read_stat_cpu(blkg, type, sub_type);
710 cb->fill(cb, key_str, val);
713 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
714 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
716 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
717 cb->fill(cb, key_str, disk_total);
721 /* This should be called with blkg->stats_lock held */
722 static uint64_t blkio_get_stat(struct blkio_group *blkg,
723 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
726 char key_str[MAX_KEY_LEN];
727 enum stat_sub_type sub_type;
729 if (type == BLKIO_STAT_TIME)
730 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
731 blkg->stats.time, cb, dev);
732 #ifdef CONFIG_DEBUG_BLK_CGROUP
733 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
734 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
735 blkg->stats.unaccounted_time, cb, dev);
736 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
737 uint64_t sum = blkg->stats.avg_queue_size_sum;
738 uint64_t samples = blkg->stats.avg_queue_size_samples;
740 do_div(sum, samples);
743 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
745 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
746 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
747 blkg->stats.group_wait_time, cb, dev);
748 if (type == BLKIO_STAT_IDLE_TIME)
749 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
750 blkg->stats.idle_time, cb, dev);
751 if (type == BLKIO_STAT_EMPTY_TIME)
752 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
753 blkg->stats.empty_time, cb, dev);
754 if (type == BLKIO_STAT_DEQUEUE)
755 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
756 blkg->stats.dequeue, cb, dev);
759 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
761 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
762 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
764 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
765 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
766 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
767 cb->fill(cb, key_str, disk_total);
771 static int blkio_check_dev_num(dev_t dev)
774 struct gendisk *disk;
776 disk = get_gendisk(dev, &part);
783 static int blkio_policy_parse_and_set(char *buf,
784 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
786 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
788 unsigned long major, minor, temp;
793 memset(s, 0, sizeof(s));
795 while ((p = strsep(&buf, " ")) != NULL) {
801 /* Prevent from inputing too many things */
809 p = strsep(&s[0], ":");
819 ret = strict_strtoul(major_s, 10, &major);
823 ret = strict_strtoul(minor_s, 10, &minor);
827 dev = MKDEV(major, minor);
829 ret = blkio_check_dev_num(dev);
839 case BLKIO_POLICY_PROP:
840 ret = strict_strtoul(s[1], 10, &temp);
841 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
842 temp > BLKIO_WEIGHT_MAX)
846 newpn->fileid = fileid;
847 newpn->val.weight = temp;
849 case BLKIO_POLICY_THROTL:
851 case BLKIO_THROTL_read_bps_device:
852 case BLKIO_THROTL_write_bps_device:
853 ret = strict_strtoull(s[1], 10, &bps);
858 newpn->fileid = fileid;
859 newpn->val.bps = bps;
861 case BLKIO_THROTL_read_iops_device:
862 case BLKIO_THROTL_write_iops_device:
863 ret = strict_strtoull(s[1], 10, &iops);
867 if (iops > THROTL_IOPS_MAX)
871 newpn->fileid = fileid;
872 newpn->val.iops = (unsigned int)iops;
883 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
886 struct blkio_policy_node *pn;
888 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
889 BLKIO_PROP_weight_device);
891 return pn->val.weight;
893 return blkcg->weight;
895 EXPORT_SYMBOL_GPL(blkcg_get_weight);
897 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
899 struct blkio_policy_node *pn;
901 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
902 BLKIO_THROTL_read_bps_device);
909 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
911 struct blkio_policy_node *pn;
912 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
913 BLKIO_THROTL_write_bps_device);
920 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
922 struct blkio_policy_node *pn;
924 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
925 BLKIO_THROTL_read_iops_device);
932 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
934 struct blkio_policy_node *pn;
935 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
936 BLKIO_THROTL_write_iops_device);
943 /* Checks whether user asked for deleting a policy rule */
944 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
947 case BLKIO_POLICY_PROP:
948 if (pn->val.weight == 0)
951 case BLKIO_POLICY_THROTL:
953 case BLKIO_THROTL_read_bps_device:
954 case BLKIO_THROTL_write_bps_device:
955 if (pn->val.bps == 0)
958 case BLKIO_THROTL_read_iops_device:
959 case BLKIO_THROTL_write_iops_device:
960 if (pn->val.iops == 0)
971 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
972 struct blkio_policy_node *newpn)
974 switch(oldpn->plid) {
975 case BLKIO_POLICY_PROP:
976 oldpn->val.weight = newpn->val.weight;
978 case BLKIO_POLICY_THROTL:
979 switch(newpn->fileid) {
980 case BLKIO_THROTL_read_bps_device:
981 case BLKIO_THROTL_write_bps_device:
982 oldpn->val.bps = newpn->val.bps;
984 case BLKIO_THROTL_read_iops_device:
985 case BLKIO_THROTL_write_iops_device:
986 oldpn->val.iops = newpn->val.iops;
995 * Some rules/values in blkg have changed. Propagate those to respective
998 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
999 struct blkio_group *blkg, struct blkio_policy_node *pn)
1001 unsigned int weight, iops;
1005 case BLKIO_POLICY_PROP:
1006 weight = pn->val.weight ? pn->val.weight :
1008 blkio_update_group_weight(blkg, weight);
1010 case BLKIO_POLICY_THROTL:
1011 switch(pn->fileid) {
1012 case BLKIO_THROTL_read_bps_device:
1013 case BLKIO_THROTL_write_bps_device:
1014 bps = pn->val.bps ? pn->val.bps : (-1);
1015 blkio_update_group_bps(blkg, bps, pn->fileid);
1017 case BLKIO_THROTL_read_iops_device:
1018 case BLKIO_THROTL_write_iops_device:
1019 iops = pn->val.iops ? pn->val.iops : (-1);
1020 blkio_update_group_iops(blkg, iops, pn->fileid);
1030 * A policy node rule has been updated. Propagate this update to all the
1031 * block groups which might be affected by this update.
1033 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1034 struct blkio_policy_node *pn)
1036 struct blkio_group *blkg;
1037 struct hlist_node *n;
1039 spin_lock(&blkio_list_lock);
1040 spin_lock_irq(&blkcg->lock);
1042 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1043 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1045 blkio_update_blkg_policy(blkcg, blkg, pn);
1048 spin_unlock_irq(&blkcg->lock);
1049 spin_unlock(&blkio_list_lock);
1052 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1057 struct blkio_policy_node *newpn, *pn;
1058 struct blkio_cgroup *blkcg;
1060 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1061 int fileid = BLKIOFILE_ATTR(cft->private);
1063 buf = kstrdup(buffer, GFP_KERNEL);
1067 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1073 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1077 blkcg = cgroup_to_blkio_cgroup(cgrp);
1079 spin_lock_irq(&blkcg->lock);
1081 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1083 if (!blkio_delete_rule_command(newpn)) {
1084 blkio_policy_insert_node(blkcg, newpn);
1087 spin_unlock_irq(&blkcg->lock);
1088 goto update_io_group;
1091 if (blkio_delete_rule_command(newpn)) {
1092 blkio_policy_delete_node(pn);
1093 spin_unlock_irq(&blkcg->lock);
1094 goto update_io_group;
1096 spin_unlock_irq(&blkcg->lock);
1098 blkio_update_policy_rule(pn, newpn);
1101 blkio_update_policy_node_blkg(blkcg, newpn);
1112 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1115 case BLKIO_POLICY_PROP:
1116 if (pn->fileid == BLKIO_PROP_weight_device)
1117 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1118 MINOR(pn->dev), pn->val.weight);
1120 case BLKIO_POLICY_THROTL:
1121 switch(pn->fileid) {
1122 case BLKIO_THROTL_read_bps_device:
1123 case BLKIO_THROTL_write_bps_device:
1124 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1125 MINOR(pn->dev), pn->val.bps);
1127 case BLKIO_THROTL_read_iops_device:
1128 case BLKIO_THROTL_write_iops_device:
1129 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1130 MINOR(pn->dev), pn->val.iops);
1139 /* cgroup files which read their data from policy nodes end up here */
1140 static void blkio_read_policy_node_files(struct cftype *cft,
1141 struct blkio_cgroup *blkcg, struct seq_file *m)
1143 struct blkio_policy_node *pn;
1145 if (!list_empty(&blkcg->policy_list)) {
1146 spin_lock_irq(&blkcg->lock);
1147 list_for_each_entry(pn, &blkcg->policy_list, node) {
1148 if (!pn_matches_cftype(cft, pn))
1150 blkio_print_policy_node(m, pn);
1152 spin_unlock_irq(&blkcg->lock);
1156 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1159 struct blkio_cgroup *blkcg;
1160 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1161 int name = BLKIOFILE_ATTR(cft->private);
1163 blkcg = cgroup_to_blkio_cgroup(cgrp);
1166 case BLKIO_POLICY_PROP:
1168 case BLKIO_PROP_weight_device:
1169 blkio_read_policy_node_files(cft, blkcg, m);
1175 case BLKIO_POLICY_THROTL:
1177 case BLKIO_THROTL_read_bps_device:
1178 case BLKIO_THROTL_write_bps_device:
1179 case BLKIO_THROTL_read_iops_device:
1180 case BLKIO_THROTL_write_iops_device:
1181 blkio_read_policy_node_files(cft, blkcg, m);
1194 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1195 struct cftype *cft, struct cgroup_map_cb *cb,
1196 enum stat_type type, bool show_total, bool pcpu)
1198 struct blkio_group *blkg;
1199 struct hlist_node *n;
1200 uint64_t cgroup_total = 0;
1203 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1205 if (!cftype_blkg_same_policy(cft, blkg))
1208 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1211 spin_lock_irq(&blkg->stats_lock);
1212 cgroup_total += blkio_get_stat(blkg, cb,
1214 spin_unlock_irq(&blkg->stats_lock);
1219 cb->fill(cb, "Total", cgroup_total);
1224 /* All map kind of cgroup file get serviced by this function */
1225 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1226 struct cgroup_map_cb *cb)
1228 struct blkio_cgroup *blkcg;
1229 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1230 int name = BLKIOFILE_ATTR(cft->private);
1232 blkcg = cgroup_to_blkio_cgroup(cgrp);
1235 case BLKIO_POLICY_PROP:
1237 case BLKIO_PROP_time:
1238 return blkio_read_blkg_stats(blkcg, cft, cb,
1239 BLKIO_STAT_TIME, 0, 0);
1240 case BLKIO_PROP_sectors:
1241 return blkio_read_blkg_stats(blkcg, cft, cb,
1242 BLKIO_STAT_CPU_SECTORS, 0, 1);
1243 case BLKIO_PROP_io_service_bytes:
1244 return blkio_read_blkg_stats(blkcg, cft, cb,
1245 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1246 case BLKIO_PROP_io_serviced:
1247 return blkio_read_blkg_stats(blkcg, cft, cb,
1248 BLKIO_STAT_CPU_SERVICED, 1, 1);
1249 case BLKIO_PROP_io_service_time:
1250 return blkio_read_blkg_stats(blkcg, cft, cb,
1251 BLKIO_STAT_SERVICE_TIME, 1, 0);
1252 case BLKIO_PROP_io_wait_time:
1253 return blkio_read_blkg_stats(blkcg, cft, cb,
1254 BLKIO_STAT_WAIT_TIME, 1, 0);
1255 case BLKIO_PROP_io_merged:
1256 return blkio_read_blkg_stats(blkcg, cft, cb,
1257 BLKIO_STAT_CPU_MERGED, 1, 1);
1258 case BLKIO_PROP_io_queued:
1259 return blkio_read_blkg_stats(blkcg, cft, cb,
1260 BLKIO_STAT_QUEUED, 1, 0);
1261 #ifdef CONFIG_DEBUG_BLK_CGROUP
1262 case BLKIO_PROP_unaccounted_time:
1263 return blkio_read_blkg_stats(blkcg, cft, cb,
1264 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1265 case BLKIO_PROP_dequeue:
1266 return blkio_read_blkg_stats(blkcg, cft, cb,
1267 BLKIO_STAT_DEQUEUE, 0, 0);
1268 case BLKIO_PROP_avg_queue_size:
1269 return blkio_read_blkg_stats(blkcg, cft, cb,
1270 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1271 case BLKIO_PROP_group_wait_time:
1272 return blkio_read_blkg_stats(blkcg, cft, cb,
1273 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1274 case BLKIO_PROP_idle_time:
1275 return blkio_read_blkg_stats(blkcg, cft, cb,
1276 BLKIO_STAT_IDLE_TIME, 0, 0);
1277 case BLKIO_PROP_empty_time:
1278 return blkio_read_blkg_stats(blkcg, cft, cb,
1279 BLKIO_STAT_EMPTY_TIME, 0, 0);
1285 case BLKIO_POLICY_THROTL:
1287 case BLKIO_THROTL_io_service_bytes:
1288 return blkio_read_blkg_stats(blkcg, cft, cb,
1289 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1290 case BLKIO_THROTL_io_serviced:
1291 return blkio_read_blkg_stats(blkcg, cft, cb,
1292 BLKIO_STAT_CPU_SERVICED, 1, 1);
1304 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1306 struct blkio_group *blkg;
1307 struct hlist_node *n;
1308 struct blkio_policy_node *pn;
1310 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1313 spin_lock(&blkio_list_lock);
1314 spin_lock_irq(&blkcg->lock);
1315 blkcg->weight = (unsigned int)val;
1317 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1318 pn = blkio_policy_search_node(blkcg, blkg->dev,
1319 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1323 blkio_update_group_weight(blkg, blkcg->weight);
1325 spin_unlock_irq(&blkcg->lock);
1326 spin_unlock(&blkio_list_lock);
1330 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1331 struct blkio_cgroup *blkcg;
1332 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1333 int name = BLKIOFILE_ATTR(cft->private);
1335 blkcg = cgroup_to_blkio_cgroup(cgrp);
1338 case BLKIO_POLICY_PROP:
1340 case BLKIO_PROP_weight:
1341 return (u64)blkcg->weight;
1351 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1353 struct blkio_cgroup *blkcg;
1354 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1355 int name = BLKIOFILE_ATTR(cft->private);
1357 blkcg = cgroup_to_blkio_cgroup(cgrp);
1360 case BLKIO_POLICY_PROP:
1362 case BLKIO_PROP_weight:
1363 return blkio_weight_write(blkcg, val);
1373 struct cftype blkio_files[] = {
1375 .name = "weight_device",
1376 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1377 BLKIO_PROP_weight_device),
1378 .read_seq_string = blkiocg_file_read,
1379 .write_string = blkiocg_file_write,
1380 .max_write_len = 256,
1384 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1386 .read_u64 = blkiocg_file_read_u64,
1387 .write_u64 = blkiocg_file_write_u64,
1391 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1393 .read_map = blkiocg_file_read_map,
1397 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1398 BLKIO_PROP_sectors),
1399 .read_map = blkiocg_file_read_map,
1402 .name = "io_service_bytes",
1403 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1404 BLKIO_PROP_io_service_bytes),
1405 .read_map = blkiocg_file_read_map,
1408 .name = "io_serviced",
1409 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1410 BLKIO_PROP_io_serviced),
1411 .read_map = blkiocg_file_read_map,
1414 .name = "io_service_time",
1415 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1416 BLKIO_PROP_io_service_time),
1417 .read_map = blkiocg_file_read_map,
1420 .name = "io_wait_time",
1421 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1422 BLKIO_PROP_io_wait_time),
1423 .read_map = blkiocg_file_read_map,
1426 .name = "io_merged",
1427 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1428 BLKIO_PROP_io_merged),
1429 .read_map = blkiocg_file_read_map,
1432 .name = "io_queued",
1433 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1434 BLKIO_PROP_io_queued),
1435 .read_map = blkiocg_file_read_map,
1438 .name = "reset_stats",
1439 .write_u64 = blkiocg_reset_stats,
1441 #ifdef CONFIG_BLK_DEV_THROTTLING
1443 .name = "throttle.read_bps_device",
1444 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1445 BLKIO_THROTL_read_bps_device),
1446 .read_seq_string = blkiocg_file_read,
1447 .write_string = blkiocg_file_write,
1448 .max_write_len = 256,
1452 .name = "throttle.write_bps_device",
1453 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1454 BLKIO_THROTL_write_bps_device),
1455 .read_seq_string = blkiocg_file_read,
1456 .write_string = blkiocg_file_write,
1457 .max_write_len = 256,
1461 .name = "throttle.read_iops_device",
1462 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1463 BLKIO_THROTL_read_iops_device),
1464 .read_seq_string = blkiocg_file_read,
1465 .write_string = blkiocg_file_write,
1466 .max_write_len = 256,
1470 .name = "throttle.write_iops_device",
1471 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1472 BLKIO_THROTL_write_iops_device),
1473 .read_seq_string = blkiocg_file_read,
1474 .write_string = blkiocg_file_write,
1475 .max_write_len = 256,
1478 .name = "throttle.io_service_bytes",
1479 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1480 BLKIO_THROTL_io_service_bytes),
1481 .read_map = blkiocg_file_read_map,
1484 .name = "throttle.io_serviced",
1485 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1486 BLKIO_THROTL_io_serviced),
1487 .read_map = blkiocg_file_read_map,
1489 #endif /* CONFIG_BLK_DEV_THROTTLING */
1491 #ifdef CONFIG_DEBUG_BLK_CGROUP
1493 .name = "avg_queue_size",
1494 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1495 BLKIO_PROP_avg_queue_size),
1496 .read_map = blkiocg_file_read_map,
1499 .name = "group_wait_time",
1500 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1501 BLKIO_PROP_group_wait_time),
1502 .read_map = blkiocg_file_read_map,
1505 .name = "idle_time",
1506 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1507 BLKIO_PROP_idle_time),
1508 .read_map = blkiocg_file_read_map,
1511 .name = "empty_time",
1512 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1513 BLKIO_PROP_empty_time),
1514 .read_map = blkiocg_file_read_map,
1518 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1519 BLKIO_PROP_dequeue),
1520 .read_map = blkiocg_file_read_map,
1523 .name = "unaccounted_time",
1524 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1525 BLKIO_PROP_unaccounted_time),
1526 .read_map = blkiocg_file_read_map,
1531 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1533 return cgroup_add_files(cgroup, subsys, blkio_files,
1534 ARRAY_SIZE(blkio_files));
1537 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1539 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1540 unsigned long flags;
1541 struct blkio_group *blkg;
1543 struct blkio_policy_type *blkiop;
1544 struct blkio_policy_node *pn, *pntmp;
1548 spin_lock_irqsave(&blkcg->lock, flags);
1550 if (hlist_empty(&blkcg->blkg_list)) {
1551 spin_unlock_irqrestore(&blkcg->lock, flags);
1555 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1557 key = rcu_dereference(blkg->key);
1558 __blkiocg_del_blkio_group(blkg);
1560 spin_unlock_irqrestore(&blkcg->lock, flags);
1563 * This blkio_group is being unlinked as associated cgroup is
1564 * going away. Let all the IO controlling policies know about
1567 spin_lock(&blkio_list_lock);
1568 list_for_each_entry(blkiop, &blkio_list, list) {
1569 if (blkiop->plid != blkg->plid)
1571 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1573 spin_unlock(&blkio_list_lock);
1576 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1577 blkio_policy_delete_node(pn);
1581 free_css_id(&blkio_subsys, &blkcg->css);
1583 if (blkcg != &blkio_root_cgroup)
1587 static struct cgroup_subsys_state *
1588 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1590 struct blkio_cgroup *blkcg;
1591 struct cgroup *parent = cgroup->parent;
1594 blkcg = &blkio_root_cgroup;
1598 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1600 return ERR_PTR(-ENOMEM);
1602 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1604 spin_lock_init(&blkcg->lock);
1605 INIT_HLIST_HEAD(&blkcg->blkg_list);
1607 INIT_LIST_HEAD(&blkcg->policy_list);
1612 * We cannot support shared io contexts, as we have no mean to support
1613 * two tasks with the same ioc in two different groups without major rework
1614 * of the main cic data structures. For now we allow a task to change
1615 * its cgroup only if it's the only owner of its ioc.
1617 static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1619 struct io_context *ioc;
1622 /* task_lock() is needed to avoid races with exit_io_context() */
1624 ioc = tsk->io_context;
1625 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1632 static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1634 struct io_context *ioc;
1637 ioc = tsk->io_context;
1639 ioc->cgroup_changed = 1;
1643 void blkio_policy_register(struct blkio_policy_type *blkiop)
1645 spin_lock(&blkio_list_lock);
1646 list_add_tail(&blkiop->list, &blkio_list);
1647 spin_unlock(&blkio_list_lock);
1649 EXPORT_SYMBOL_GPL(blkio_policy_register);
1651 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1653 spin_lock(&blkio_list_lock);
1654 list_del_init(&blkiop->list);
1655 spin_unlock(&blkio_list_lock);
1657 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1659 static int __init init_cgroup_blkio(void)
1661 return cgroup_load_subsys(&blkio_subsys);
1664 static void __exit exit_cgroup_blkio(void)
1666 cgroup_unload_subsys(&blkio_subsys);
1669 module_init(init_cgroup_blkio);
1670 module_exit(exit_cgroup_blkio);
1671 MODULE_LICENSE("GPL");