bpf: Propagate modified uaddrlen from cgroup sockaddr programs
[platform/kernel/linux-starfive.git] / include / linux / bpf-cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27                                        const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29                                          const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31                                           const struct bpf_insn *insn);
32
33 #ifdef CONFIG_CGROUP_BPF
34
35 #define CGROUP_ATYPE(type) \
36         case BPF_##type: return type
37
38 static inline enum cgroup_bpf_attach_type
39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41         switch (attach_type) {
42         CGROUP_ATYPE(CGROUP_INET_INGRESS);
43         CGROUP_ATYPE(CGROUP_INET_EGRESS);
44         CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45         CGROUP_ATYPE(CGROUP_SOCK_OPS);
46         CGROUP_ATYPE(CGROUP_DEVICE);
47         CGROUP_ATYPE(CGROUP_INET4_BIND);
48         CGROUP_ATYPE(CGROUP_INET6_BIND);
49         CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50         CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51         CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
52         CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
53         CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
54         CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
55         CGROUP_ATYPE(CGROUP_SYSCTL);
56         CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
57         CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
58         CGROUP_ATYPE(CGROUP_GETSOCKOPT);
59         CGROUP_ATYPE(CGROUP_SETSOCKOPT);
60         CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
61         CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
62         CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
63         CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
64         CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
65         default:
66                 return CGROUP_BPF_ATTACH_TYPE_INVALID;
67         }
68 }
69
70 #undef CGROUP_ATYPE
71
72 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
73 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
74
75 #define for_each_cgroup_storage_type(stype) \
76         for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
77
78 struct bpf_cgroup_storage_map;
79
80 struct bpf_storage_buffer {
81         struct rcu_head rcu;
82         char data[];
83 };
84
85 struct bpf_cgroup_storage {
86         union {
87                 struct bpf_storage_buffer *buf;
88                 void __percpu *percpu_buf;
89         };
90         struct bpf_cgroup_storage_map *map;
91         struct bpf_cgroup_storage_key key;
92         struct list_head list_map;
93         struct list_head list_cg;
94         struct rb_node node;
95         struct rcu_head rcu;
96 };
97
98 struct bpf_cgroup_link {
99         struct bpf_link link;
100         struct cgroup *cgroup;
101         enum bpf_attach_type type;
102 };
103
104 struct bpf_prog_list {
105         struct hlist_node node;
106         struct bpf_prog *prog;
107         struct bpf_cgroup_link *link;
108         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
109 };
110
111 int cgroup_bpf_inherit(struct cgroup *cgrp);
112 void cgroup_bpf_offline(struct cgroup *cgrp);
113
114 int __cgroup_bpf_run_filter_skb(struct sock *sk,
115                                 struct sk_buff *skb,
116                                 enum cgroup_bpf_attach_type atype);
117
118 int __cgroup_bpf_run_filter_sk(struct sock *sk,
119                                enum cgroup_bpf_attach_type atype);
120
121 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
122                                       struct sockaddr *uaddr,
123                                       int *uaddrlen,
124                                       enum cgroup_bpf_attach_type atype,
125                                       void *t_ctx,
126                                       u32 *flags);
127
128 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
129                                      struct bpf_sock_ops_kern *sock_ops,
130                                      enum cgroup_bpf_attach_type atype);
131
132 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
133                                       short access, enum cgroup_bpf_attach_type atype);
134
135 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
136                                    struct ctl_table *table, int write,
137                                    char **buf, size_t *pcount, loff_t *ppos,
138                                    enum cgroup_bpf_attach_type atype);
139
140 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
141                                        int *optname, char __user *optval,
142                                        int *optlen, char **kernel_optval);
143 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
144                                        int optname, char __user *optval,
145                                        int __user *optlen, int max_optlen,
146                                        int retval);
147
148 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
149                                             int optname, void *optval,
150                                             int *optlen, int retval);
151
152 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
153         struct bpf_map *map)
154 {
155         if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
156                 return BPF_CGROUP_STORAGE_PERCPU;
157
158         return BPF_CGROUP_STORAGE_SHARED;
159 }
160
161 struct bpf_cgroup_storage *
162 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
163                       void *key, bool locked);
164 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
165                                         enum bpf_cgroup_storage_type stype);
166 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
167 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
168                              struct cgroup *cgroup,
169                              enum bpf_attach_type type);
170 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
171 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
172
173 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
174 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
175                                      void *value, u64 flags);
176
177 /* Opportunistic check to see whether we have any BPF program attached*/
178 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
179                                            enum cgroup_bpf_attach_type type)
180 {
181         struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
182         struct bpf_prog_array *array;
183
184         array = rcu_access_pointer(cgrp->bpf.effective[type]);
185         return array != &bpf_empty_prog_array.hdr;
186 }
187
188 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
189 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
190 ({                                                                            \
191         int __ret = 0;                                                        \
192         if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) &&                        \
193             cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS))                 \
194                 __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
195                                                     CGROUP_INET_INGRESS); \
196                                                                               \
197         __ret;                                                                \
198 })
199
200 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
201 ({                                                                             \
202         int __ret = 0;                                                         \
203         if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) {                    \
204                 typeof(sk) __sk = sk_to_full_sk(sk);                           \
205                 if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) &&        \
206                     cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS))         \
207                         __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
208                                                       CGROUP_INET_EGRESS); \
209         }                                                                      \
210         __ret;                                                                 \
211 })
212
213 #define BPF_CGROUP_RUN_SK_PROG(sk, atype)                                      \
214 ({                                                                             \
215         int __ret = 0;                                                         \
216         if (cgroup_bpf_enabled(atype)) {                                               \
217                 __ret = __cgroup_bpf_run_filter_sk(sk, atype);                 \
218         }                                                                      \
219         __ret;                                                                 \
220 })
221
222 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
223         BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
224
225 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                              \
226         BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
227
228 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
229         BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
230
231 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
232         BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
233
234 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype)                     \
235 ({                                                                             \
236         int __ret = 0;                                                         \
237         if (cgroup_bpf_enabled(atype))                                         \
238                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
239                                                           atype, NULL, NULL);  \
240         __ret;                                                                 \
241 })
242
243 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx)         \
244 ({                                                                             \
245         int __ret = 0;                                                         \
246         if (cgroup_bpf_enabled(atype))  {                                      \
247                 lock_sock(sk);                                                 \
248                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
249                                                           atype, t_ctx, NULL); \
250                 release_sock(sk);                                              \
251         }                                                                      \
252         __ret;                                                                 \
253 })
254
255 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
256  * via upper bits of return code. The only flag that is supported
257  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
258  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
259  */
260 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
261 ({                                                                             \
262         u32 __flags = 0;                                                       \
263         int __ret = 0;                                                         \
264         if (cgroup_bpf_enabled(atype))  {                                      \
265                 lock_sock(sk);                                                 \
266                 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
267                                                           atype, NULL, &__flags); \
268                 release_sock(sk);                                              \
269                 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)            \
270                         *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;           \
271         }                                                                      \
272         __ret;                                                                 \
273 })
274
275 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)                                     \
276         ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||                  \
277           cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&                 \
278          (sk)->sk_prot->pre_connect)
279
280 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen)                  \
281         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
282
283 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen)                  \
284         BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
285
286 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen)             \
287         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
288
289 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen)             \
290         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
291
292 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)       \
293         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
294
295 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx)       \
296         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
297
298 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen)              \
299         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
300
301 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen)              \
302         BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
303
304 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
305  * fullsock and its parent fullsock cannot be traced by
306  * sk_to_full_sk().
307  *
308  * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
309  * Its listener-sk is not attached to the rsk_listener.
310  * In this case, the caller holds the listener-sk (unlocked),
311  * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
312  * the listener-sk such that the cgroup-bpf-progs of the
313  * listener-sk will be run.
314  *
315  * Regardless of syncookie mode or not,
316  * calling bpf_setsockopt on listener-sk will not make sense anyway,
317  * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
318  */
319 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)                   \
320 ({                                                                      \
321         int __ret = 0;                                                  \
322         if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))                        \
323                 __ret = __cgroup_bpf_run_filter_sock_ops(sk,            \
324                                                          sock_ops,      \
325                                                          CGROUP_SOCK_OPS); \
326         __ret;                                                          \
327 })
328
329 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
330 ({                                                                             \
331         int __ret = 0;                                                         \
332         if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
333                 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
334                 if (__sk && sk_fullsock(__sk))                                 \
335                         __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
336                                                                  sock_ops,     \
337                                                          CGROUP_SOCK_OPS); \
338         }                                                                      \
339         __ret;                                                                 \
340 })
341
342 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)        \
343 ({                                                                            \
344         int __ret = 0;                                                        \
345         if (cgroup_bpf_enabled(CGROUP_DEVICE))                        \
346                 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
347                                                           access,             \
348                                                           CGROUP_DEVICE); \
349                                                                               \
350         __ret;                                                                \
351 })
352
353
354 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
355 ({                                                                             \
356         int __ret = 0;                                                         \
357         if (cgroup_bpf_enabled(CGROUP_SYSCTL))                         \
358                 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
359                                                        buf, count, pos,        \
360                                                        CGROUP_SYSCTL);     \
361         __ret;                                                                 \
362 })
363
364 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
365                                        kernel_optval)                          \
366 ({                                                                             \
367         int __ret = 0;                                                         \
368         if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) &&                           \
369             cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT))                  \
370                 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
371                                                            optname, optval,    \
372                                                            optlen,             \
373                                                            kernel_optval);     \
374         __ret;                                                                 \
375 })
376
377 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
378 ({                                                                             \
379         int __ret = 0;                                                         \
380         if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
381                 get_user(__ret, optlen);                                       \
382         __ret;                                                                 \
383 })
384
385 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
386                                        max_optlen, retval)                     \
387 ({                                                                             \
388         int __ret = retval;                                                    \
389         if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) &&                           \
390             cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT))                  \
391                 if (!(sock)->sk_prot->bpf_bypass_getsockopt ||                 \
392                     !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
393                                         tcp_bpf_bypass_getsockopt,             \
394                                         level, optname))                       \
395                         __ret = __cgroup_bpf_run_filter_getsockopt(            \
396                                 sock, level, optname, optval, optlen,          \
397                                 max_optlen, retval);                           \
398         __ret;                                                                 \
399 })
400
401 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
402                                             optlen, retval)                    \
403 ({                                                                             \
404         int __ret = retval;                                                    \
405         if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
406                 __ret = __cgroup_bpf_run_filter_getsockopt_kern(               \
407                         sock, level, optname, optval, optlen, retval);         \
408         __ret;                                                                 \
409 })
410
411 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
412                            enum bpf_prog_type ptype, struct bpf_prog *prog);
413 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
414                            enum bpf_prog_type ptype);
415 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
416 int cgroup_bpf_prog_query(const union bpf_attr *attr,
417                           union bpf_attr __user *uattr);
418
419 const struct bpf_func_proto *
420 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
421 const struct bpf_func_proto *
422 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
423 #else
424
425 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
426 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
427
428 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
429                                          enum bpf_prog_type ptype,
430                                          struct bpf_prog *prog)
431 {
432         return -EINVAL;
433 }
434
435 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
436                                          enum bpf_prog_type ptype)
437 {
438         return -EINVAL;
439 }
440
441 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
442                                          struct bpf_prog *prog)
443 {
444         return -EINVAL;
445 }
446
447 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
448                                         union bpf_attr __user *uattr)
449 {
450         return -EINVAL;
451 }
452
453 static inline const struct bpf_func_proto *
454 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
455 {
456         return NULL;
457 }
458
459 static inline const struct bpf_func_proto *
460 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
461 {
462         return NULL;
463 }
464
465 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
466                                             struct bpf_map *map) { return 0; }
467 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
468         struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
469 static inline void bpf_cgroup_storage_free(
470         struct bpf_cgroup_storage *storage) {}
471 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
472                                                  void *value) {
473         return 0;
474 }
475 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
476                                         void *key, void *value, u64 flags) {
477         return 0;
478 }
479
480 #define cgroup_bpf_enabled(atype) (0)
481 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
482 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
483 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
484 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
485 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
486 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
487 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
488 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
489 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
490 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
491 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
493 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
496 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
497 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
498 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
499 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
500 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
501 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
502 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
504                                        optlen, max_optlen, retval) ({ retval; })
505 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
506                                             optlen, retval) ({ retval; })
507 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
508                                        kernel_optval) ({ 0; })
509
510 #define for_each_cgroup_storage_type(stype) for (; false; )
511
512 #endif /* CONFIG_CGROUP_BPF */
513
514 #endif /* _BPF_CGROUP_H */