1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/rbtree.h>
10 #include <uapi/linux/bpf.h>
18 struct bpf_sock_ops_kern;
19 struct bpf_cgroup_storage;
21 struct ctl_table_header;
23 #ifdef CONFIG_CGROUP_BPF
25 extern struct static_key_false cgroup_bpf_enabled_key;
26 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
28 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
29 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
31 #define for_each_cgroup_storage_type(stype) \
32 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
34 struct bpf_cgroup_storage_map;
36 struct bpf_storage_buffer {
41 struct bpf_cgroup_storage {
43 struct bpf_storage_buffer *buf;
44 void __percpu *percpu_buf;
46 struct bpf_cgroup_storage_map *map;
47 struct bpf_cgroup_storage_key key;
48 struct list_head list;
53 struct bpf_prog_list {
54 struct list_head node;
55 struct bpf_prog *prog;
56 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
59 struct bpf_prog_array;
62 /* array of effective progs in this cgroup */
63 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
65 /* attached progs to this cgroup and attach flags
66 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
67 * have either zero or one element
68 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
70 struct list_head progs[MAX_BPF_ATTACH_TYPE];
71 u32 flags[MAX_BPF_ATTACH_TYPE];
73 /* temp storage for effective prog array used by prog_attach/detach */
74 struct bpf_prog_array __rcu *inactive;
77 void cgroup_bpf_put(struct cgroup *cgrp);
78 int cgroup_bpf_inherit(struct cgroup *cgrp);
80 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
81 enum bpf_attach_type type, u32 flags);
82 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
83 enum bpf_attach_type type);
84 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
85 union bpf_attr __user *uattr);
87 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
88 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
89 enum bpf_attach_type type, u32 flags);
90 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
91 enum bpf_attach_type type, u32 flags);
92 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
93 union bpf_attr __user *uattr);
95 int __cgroup_bpf_run_filter_skb(struct sock *sk,
97 enum bpf_attach_type type);
99 int __cgroup_bpf_run_filter_sk(struct sock *sk,
100 enum bpf_attach_type type);
102 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
103 struct sockaddr *uaddr,
104 enum bpf_attach_type type,
107 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
108 struct bpf_sock_ops_kern *sock_ops,
109 enum bpf_attach_type type);
111 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
112 short access, enum bpf_attach_type type);
114 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
115 struct ctl_table *table, int write,
116 void __user *buf, size_t *pcount,
117 loff_t *ppos, void **new_buf,
118 enum bpf_attach_type type);
120 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
123 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
124 return BPF_CGROUP_STORAGE_PERCPU;
126 return BPF_CGROUP_STORAGE_SHARED;
129 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
130 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
132 enum bpf_cgroup_storage_type stype;
134 for_each_cgroup_storage_type(stype)
135 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
138 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
139 enum bpf_cgroup_storage_type stype);
140 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
141 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
142 struct cgroup *cgroup,
143 enum bpf_attach_type type);
144 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
145 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
146 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
148 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
149 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
150 void *value, u64 flags);
152 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
153 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
156 if (cgroup_bpf_enabled) \
157 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
158 BPF_CGROUP_INET_INGRESS); \
163 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
166 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
167 typeof(sk) __sk = sk_to_full_sk(sk); \
168 if (sk_fullsock(__sk)) \
169 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
170 BPF_CGROUP_INET_EGRESS); \
175 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
178 if (cgroup_bpf_enabled) { \
179 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
184 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
185 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
187 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
188 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
190 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
191 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
193 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
196 if (cgroup_bpf_enabled) \
197 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
202 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
205 if (cgroup_bpf_enabled) { \
207 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
214 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
215 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
217 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
218 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
220 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
221 sk->sk_prot->pre_connect)
223 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
224 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
226 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
227 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
229 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
230 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
232 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
233 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
235 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
236 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
238 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
239 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
241 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
244 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
245 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
246 if (__sk && sk_fullsock(__sk)) \
247 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
249 BPF_CGROUP_SOCK_OPS); \
254 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
257 if (cgroup_bpf_enabled) \
258 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
260 BPF_CGROUP_DEVICE); \
266 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
269 if (cgroup_bpf_enabled) \
270 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
271 buf, count, pos, nbuf, \
272 BPF_CGROUP_SYSCTL); \
276 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
277 enum bpf_prog_type ptype, struct bpf_prog *prog);
278 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
279 enum bpf_prog_type ptype);
280 int cgroup_bpf_prog_query(const union bpf_attr *attr,
281 union bpf_attr __user *uattr);
285 struct cgroup_bpf {};
286 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
287 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
289 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
290 enum bpf_prog_type ptype,
291 struct bpf_prog *prog)
296 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
297 enum bpf_prog_type ptype)
302 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
303 union bpf_attr __user *uattr)
308 static inline void bpf_cgroup_storage_set(
309 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
310 static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
311 struct bpf_map *map) { return 0; }
312 static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
313 struct bpf_map *map) {}
314 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
315 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
316 static inline void bpf_cgroup_storage_free(
317 struct bpf_cgroup_storage *storage) {}
318 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
322 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
323 void *key, void *value, u64 flags) {
327 #define cgroup_bpf_enabled (0)
328 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
329 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
330 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
331 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
332 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
333 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
334 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
335 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
336 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
337 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
338 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
339 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
340 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
341 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
342 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
343 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
344 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
346 #define for_each_cgroup_storage_type(stype) for (; false; )
348 #endif /* CONFIG_CGROUP_BPF */
350 #endif /* _BPF_CGROUP_H */