1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
28 #define FC_APPID_LEN 129
30 #ifdef CONFIG_BLK_CGROUP
32 enum blkg_iostat_type {
41 struct blkg_policy_data;
44 struct cgroup_subsys_state css;
46 refcount_t online_pin;
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq __rcu *blkg_hint;
50 struct hlist_head blkg_list;
52 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
54 struct list_head all_blkcgs_node;
55 #ifdef CONFIG_BLK_CGROUP_FC_APPID
56 char fc_app_id[FC_APPID_LEN];
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
64 u64 bytes[BLKG_IOSTAT_NR];
65 u64 ios[BLKG_IOSTAT_NR];
68 struct blkg_iostat_set {
69 struct u64_stats_sync sync;
70 struct blkg_iostat cur;
71 struct blkg_iostat last;
74 /* association between a blk cgroup and a request queue */
76 /* Pointer to the associated request_queue */
77 struct request_queue *q;
78 struct list_head q_node;
79 struct hlist_node blkcg_node;
82 /* all non-root blkcg_gq's are guaranteed to have access to parent */
83 struct blkcg_gq *parent;
86 struct percpu_ref refcnt;
88 /* is this blkg online? protected by both blkcg and q locks */
91 struct blkg_iostat_set __percpu *iostat_cpu;
92 struct blkg_iostat_set iostat;
94 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
96 spinlock_t async_bio_lock;
97 struct bio_list async_bios;
98 struct work_struct async_bio_work;
101 atomic64_t delay_nsec;
102 atomic64_t delay_start;
106 struct rcu_head rcu_head;
109 extern struct cgroup_subsys_state * const blkcg_root_css;
111 void blkcg_destroy_blkgs(struct blkcg *blkcg);
112 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
113 void blkcg_maybe_throttle_current(void);
115 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
117 return css ? container_of(css, struct blkcg, css) : NULL;
121 * bio_blkcg - grab the blkcg associated with a bio
124 * This returns the blkcg associated with a bio, %NULL if not associated.
125 * Callers are expected to either handle %NULL or know association has been
126 * done prior to calling this.
128 static inline struct blkcg *bio_blkcg(struct bio *bio)
130 if (bio && bio->bi_blkg)
131 return bio->bi_blkg->blkcg;
135 static inline bool blk_cgroup_congested(void)
137 struct cgroup_subsys_state *css;
141 css = kthread_blkcg();
143 css = task_css(current, io_cgrp_id);
145 if (atomic_read(&css->cgroup->congestion_count)) {
156 * blkcg_parent - get the parent of a blkcg
157 * @blkcg: blkcg of interest
159 * Return the parent blkcg of @blkcg. Can be called anytime.
161 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
163 return css_to_blkcg(blkcg->css.parent);
167 * blkcg_pin_online - pin online state
168 * @blkcg: blkcg of interest
170 * While pinned, a blkcg is kept online. This is primarily used to
171 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
172 * while an associated cgwb is still active.
174 static inline void blkcg_pin_online(struct blkcg *blkcg)
176 refcount_inc(&blkcg->online_pin);
180 * blkcg_unpin_online - unpin online state
181 * @blkcg: blkcg of interest
183 * This is primarily used to impedance-match blkg and cgwb lifetimes so
184 * that blkg doesn't go offline while an associated cgwb is still active.
185 * When this count goes to zero, all active cgwbs have finished so the
186 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
188 static inline void blkcg_unpin_online(struct blkcg *blkcg)
191 if (!refcount_dec_and_test(&blkcg->online_pin))
193 blkcg_destroy_blkgs(blkcg);
194 blkcg = blkcg_parent(blkcg);
198 #else /* CONFIG_BLK_CGROUP */
206 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
208 static inline void blkcg_maybe_throttle_current(void) { }
209 static inline bool blk_cgroup_congested(void) { return false; }
212 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
213 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
214 #endif /* CONFIG_BLOCK */
216 #endif /* CONFIG_BLK_CGROUP */
218 #ifdef CONFIG_BLK_CGROUP_FC_APPID
220 * Sets the fc_app_id field associted to blkcg
221 * @app_id: application identifier
222 * @cgrp_id: cgroup id
223 * @app_id_len: size of application identifier
225 static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
228 struct cgroup_subsys_state *css;
232 if (app_id_len > FC_APPID_LEN)
235 cgrp = cgroup_get_from_id(cgrp_id);
238 css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
243 blkcg = css_to_blkcg(css);
245 * There is a slight race condition on setting the appid.
246 * Worst case an I/O may not find the right id.
247 * This is no different from the I/O we let pass while obtaining
248 * the vmid from the fabric.
249 * Adding the overhead of a lock is not necessary.
251 strlcpy(blkcg->fc_app_id, app_id, app_id_len);
259 * blkcg_get_fc_appid - get the fc app identifier associated with a bio
262 * On success return the fc_app_id, on failure return NULL
264 static inline char *blkcg_get_fc_appid(struct bio *bio)
266 if (bio && bio->bi_blkg &&
267 (bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
268 return bio->bi_blkg->blkcg->fc_app_id;
272 static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
273 static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
274 #endif /*CONFIG_BLK_CGROUP_FC_APPID*/
275 #endif /* _BLK_CGROUP_H */