selftests/bpf: Fix erroneous bitmask operation
[platform/kernel/linux-rpi.git] / tools / testing / selftests / bpf / progs / verifier_sock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9 #define offsetofend(TYPE, MEMBER) \
10         (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
11
12 struct {
13         __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14         __uint(max_entries, 1);
15         __type(key, __u32);
16         __type(value, __u64);
17 } map_reuseport_array SEC(".maps");
18
19 struct {
20         __uint(type, BPF_MAP_TYPE_SOCKHASH);
21         __uint(max_entries, 1);
22         __type(key, int);
23         __type(value, int);
24 } map_sockhash SEC(".maps");
25
26 struct {
27         __uint(type, BPF_MAP_TYPE_SOCKMAP);
28         __uint(max_entries, 1);
29         __type(key, int);
30         __type(value, int);
31 } map_sockmap SEC(".maps");
32
33 struct {
34         __uint(type, BPF_MAP_TYPE_XSKMAP);
35         __uint(max_entries, 1);
36         __type(key, int);
37         __type(value, int);
38 } map_xskmap SEC(".maps");
39
40 struct val {
41         int cnt;
42         struct bpf_spin_lock l;
43 };
44
45 struct {
46         __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47         __uint(max_entries, 0);
48         __type(key, int);
49         __type(value, struct val);
50         __uint(map_flags, BPF_F_NO_PREALLOC);
51 } sk_storage_map SEC(".maps");
52
53 SEC("cgroup/skb")
54 __description("skb->sk: no NULL check")
55 __failure __msg("invalid mem access 'sock_common_or_null'")
56 __failure_unpriv
57 __naked void skb_sk_no_null_check(void)
58 {
59         asm volatile ("                                 \
60         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
61         r0 = *(u32*)(r1 + 0);                           \
62         r0 = 0;                                         \
63         exit;                                           \
64 "       :
65         : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
66         : __clobber_all);
67 }
68
69 SEC("cgroup/skb")
70 __description("skb->sk: sk->family [non fullsock field]")
71 __success __success_unpriv __retval(0)
72 __naked void sk_family_non_fullsock_field_1(void)
73 {
74         asm volatile ("                                 \
75         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
76         if r1 != 0 goto l0_%=;                          \
77         r0 = 0;                                         \
78         exit;                                           \
79 l0_%=:  r0 = *(u32*)(r1 + %[bpf_sock_family]);          \
80         r0 = 0;                                         \
81         exit;                                           \
82 "       :
83         : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
84           __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
85         : __clobber_all);
86 }
87
88 SEC("cgroup/skb")
89 __description("skb->sk: sk->type [fullsock field]")
90 __failure __msg("invalid sock_common access")
91 __failure_unpriv
92 __naked void sk_sk_type_fullsock_field_1(void)
93 {
94         asm volatile ("                                 \
95         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
96         if r1 != 0 goto l0_%=;                          \
97         r0 = 0;                                         \
98         exit;                                           \
99 l0_%=:  r0 = *(u32*)(r1 + %[bpf_sock_type]);            \
100         r0 = 0;                                         \
101         exit;                                           \
102 "       :
103         : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
104           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
105         : __clobber_all);
106 }
107
108 SEC("cgroup/skb")
109 __description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
110 __failure __msg("type=sock_common_or_null expected=sock_common")
111 __failure_unpriv
112 __naked void sk_no_skb_sk_check_1(void)
113 {
114         asm volatile ("                                 \
115         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
116         call %[bpf_sk_fullsock];                        \
117         r0 = 0;                                         \
118         exit;                                           \
119 "       :
120         : __imm(bpf_sk_fullsock),
121           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
122         : __clobber_all);
123 }
124
125 SEC("cgroup/skb")
126 __description("sk_fullsock(skb->sk): no NULL check on ret")
127 __failure __msg("invalid mem access 'sock_or_null'")
128 __failure_unpriv
129 __naked void no_null_check_on_ret_1(void)
130 {
131         asm volatile ("                                 \
132         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
133         if r1 != 0 goto l0_%=;                          \
134         r0 = 0;                                         \
135         exit;                                           \
136 l0_%=:  call %[bpf_sk_fullsock];                        \
137         r0 = *(u32*)(r0 + %[bpf_sock_type]);            \
138         r0 = 0;                                         \
139         exit;                                           \
140 "       :
141         : __imm(bpf_sk_fullsock),
142           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
143           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
144         : __clobber_all);
145 }
146
147 SEC("cgroup/skb")
148 __description("sk_fullsock(skb->sk): sk->type [fullsock field]")
149 __success __success_unpriv __retval(0)
150 __naked void sk_sk_type_fullsock_field_2(void)
151 {
152         asm volatile ("                                 \
153         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
154         if r1 != 0 goto l0_%=;                          \
155         r0 = 0;                                         \
156         exit;                                           \
157 l0_%=:  call %[bpf_sk_fullsock];                        \
158         if r0 != 0 goto l1_%=;                          \
159         r0 = 0;                                         \
160         exit;                                           \
161 l1_%=:  r0 = *(u32*)(r0 + %[bpf_sock_type]);            \
162         r0 = 0;                                         \
163         exit;                                           \
164 "       :
165         : __imm(bpf_sk_fullsock),
166           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
168         : __clobber_all);
169 }
170
171 SEC("cgroup/skb")
172 __description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
173 __success __success_unpriv __retval(0)
174 __naked void sk_family_non_fullsock_field_2(void)
175 {
176         asm volatile ("                                 \
177         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
178         if r1 != 0 goto l0_%=;                          \
179         r0 = 0;                                         \
180         exit;                                           \
181 l0_%=:  call %[bpf_sk_fullsock];                        \
182         if r0 != 0 goto l1_%=;                          \
183         exit;                                           \
184 l1_%=:  r0 = *(u32*)(r0 + %[bpf_sock_family]);          \
185         r0 = 0;                                         \
186         exit;                                           \
187 "       :
188         : __imm(bpf_sk_fullsock),
189           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
190           __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
191         : __clobber_all);
192 }
193
194 SEC("cgroup/skb")
195 __description("sk_fullsock(skb->sk): sk->state [narrow load]")
196 __success __success_unpriv __retval(0)
197 __naked void sk_sk_state_narrow_load(void)
198 {
199         asm volatile ("                                 \
200         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
201         if r1 != 0 goto l0_%=;                          \
202         r0 = 0;                                         \
203         exit;                                           \
204 l0_%=:  call %[bpf_sk_fullsock];                        \
205         if r0 != 0 goto l1_%=;                          \
206         r0 = 0;                                         \
207         exit;                                           \
208 l1_%=:  r0 = *(u8*)(r0 + %[bpf_sock_state]);            \
209         r0 = 0;                                         \
210         exit;                                           \
211 "       :
212         : __imm(bpf_sk_fullsock),
213           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
214           __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
215         : __clobber_all);
216 }
217
218 SEC("cgroup/skb")
219 __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
220 __success __success_unpriv __retval(0)
221 __naked void port_word_load_backward_compatibility(void)
222 {
223         asm volatile ("                                 \
224         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
225         if r1 != 0 goto l0_%=;                          \
226         r0 = 0;                                         \
227         exit;                                           \
228 l0_%=:  call %[bpf_sk_fullsock];                        \
229         if r0 != 0 goto l1_%=;                          \
230         r0 = 0;                                         \
231         exit;                                           \
232 l1_%=:  r0 = *(u32*)(r0 + %[bpf_sock_dst_port]);        \
233         r0 = 0;                                         \
234         exit;                                           \
235 "       :
236         : __imm(bpf_sk_fullsock),
237           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
238           __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
239         : __clobber_all);
240 }
241
242 SEC("cgroup/skb")
243 __description("sk_fullsock(skb->sk): sk->dst_port [half load]")
244 __success __success_unpriv __retval(0)
245 __naked void sk_dst_port_half_load(void)
246 {
247         asm volatile ("                                 \
248         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
249         if r1 != 0 goto l0_%=;                          \
250         r0 = 0;                                         \
251         exit;                                           \
252 l0_%=:  call %[bpf_sk_fullsock];                        \
253         if r0 != 0 goto l1_%=;                          \
254         r0 = 0;                                         \
255         exit;                                           \
256 l1_%=:  r0 = *(u16*)(r0 + %[bpf_sock_dst_port]);        \
257         r0 = 0;                                         \
258         exit;                                           \
259 "       :
260         : __imm(bpf_sk_fullsock),
261           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
262           __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
263         : __clobber_all);
264 }
265
266 SEC("cgroup/skb")
267 __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
268 __failure __msg("invalid sock access")
269 __failure_unpriv
270 __naked void dst_port_half_load_invalid_1(void)
271 {
272         asm volatile ("                                 \
273         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
274         if r1 != 0 goto l0_%=;                          \
275         r0 = 0;                                         \
276         exit;                                           \
277 l0_%=:  call %[bpf_sk_fullsock];                        \
278         if r0 != 0 goto l1_%=;                          \
279         r0 = 0;                                         \
280         exit;                                           \
281 l1_%=:  r0 = *(u16*)(r0 + %[__imm_0]);                  \
282         r0 = 0;                                         \
283         exit;                                           \
284 "       :
285         : __imm(bpf_sk_fullsock),
286           __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
287           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
288         : __clobber_all);
289 }
290
291 SEC("cgroup/skb")
292 __description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
293 __success __success_unpriv __retval(0)
294 __naked void sk_dst_port_byte_load(void)
295 {
296         asm volatile ("                                 \
297         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
298         if r1 != 0 goto l0_%=;                          \
299         r0 = 0;                                         \
300         exit;                                           \
301 l0_%=:  call %[bpf_sk_fullsock];                        \
302         if r0 != 0 goto l1_%=;                          \
303         r0 = 0;                                         \
304         exit;                                           \
305 l1_%=:  r2 = *(u8*)(r0 + %[bpf_sock_dst_port]);         \
306         r2 = *(u8*)(r0 + %[__imm_0]);                   \
307         r0 = 0;                                         \
308         exit;                                           \
309 "       :
310         : __imm(bpf_sk_fullsock),
311           __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
312           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
313           __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
314         : __clobber_all);
315 }
316
317 SEC("cgroup/skb")
318 __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
319 __failure __msg("invalid sock access")
320 __failure_unpriv
321 __naked void dst_port_byte_load_invalid(void)
322 {
323         asm volatile ("                                 \
324         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
325         if r1 != 0 goto l0_%=;                          \
326         r0 = 0;                                         \
327         exit;                                           \
328 l0_%=:  call %[bpf_sk_fullsock];                        \
329         if r0 != 0 goto l1_%=;                          \
330         r0 = 0;                                         \
331         exit;                                           \
332 l1_%=:  r0 = *(u8*)(r0 + %[__imm_0]);                   \
333         r0 = 0;                                         \
334         exit;                                           \
335 "       :
336         : __imm(bpf_sk_fullsock),
337           __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
338           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
339         : __clobber_all);
340 }
341
342 SEC("cgroup/skb")
343 __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
344 __failure __msg("invalid sock access")
345 __failure_unpriv
346 __naked void dst_port_half_load_invalid_2(void)
347 {
348         asm volatile ("                                 \
349         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
350         if r1 != 0 goto l0_%=;                          \
351         r0 = 0;                                         \
352         exit;                                           \
353 l0_%=:  call %[bpf_sk_fullsock];                        \
354         if r0 != 0 goto l1_%=;                          \
355         r0 = 0;                                         \
356         exit;                                           \
357 l1_%=:  r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]);   \
358         r0 = 0;                                         \
359         exit;                                           \
360 "       :
361         : __imm(bpf_sk_fullsock),
362           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
363           __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
364         : __clobber_all);
365 }
366
367 SEC("cgroup/skb")
368 __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
369 __success __success_unpriv __retval(0)
370 __naked void dst_ip6_load_2nd_byte(void)
371 {
372         asm volatile ("                                 \
373         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
374         if r1 != 0 goto l0_%=;                          \
375         r0 = 0;                                         \
376         exit;                                           \
377 l0_%=:  call %[bpf_sk_fullsock];                        \
378         if r0 != 0 goto l1_%=;                          \
379         r0 = 0;                                         \
380         exit;                                           \
381 l1_%=:  r0 = *(u8*)(r0 + %[__imm_0]);                   \
382         r0 = 0;                                         \
383         exit;                                           \
384 "       :
385         : __imm(bpf_sk_fullsock),
386           __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
387           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
388         : __clobber_all);
389 }
390
391 SEC("cgroup/skb")
392 __description("sk_fullsock(skb->sk): sk->type [narrow load]")
393 __success __success_unpriv __retval(0)
394 __naked void sk_sk_type_narrow_load(void)
395 {
396         asm volatile ("                                 \
397         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
398         if r1 != 0 goto l0_%=;                          \
399         r0 = 0;                                         \
400         exit;                                           \
401 l0_%=:  call %[bpf_sk_fullsock];                        \
402         if r0 != 0 goto l1_%=;                          \
403         r0 = 0;                                         \
404         exit;                                           \
405 l1_%=:  r0 = *(u8*)(r0 + %[bpf_sock_type]);             \
406         r0 = 0;                                         \
407         exit;                                           \
408 "       :
409         : __imm(bpf_sk_fullsock),
410           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
411           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
412         : __clobber_all);
413 }
414
415 SEC("cgroup/skb")
416 __description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
417 __success __success_unpriv __retval(0)
418 __naked void sk_sk_protocol_narrow_load(void)
419 {
420         asm volatile ("                                 \
421         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
422         if r1 != 0 goto l0_%=;                          \
423         r0 = 0;                                         \
424         exit;                                           \
425 l0_%=:  call %[bpf_sk_fullsock];                        \
426         if r0 != 0 goto l1_%=;                          \
427         r0 = 0;                                         \
428         exit;                                           \
429 l1_%=:  r0 = *(u8*)(r0 + %[bpf_sock_protocol]);         \
430         r0 = 0;                                         \
431         exit;                                           \
432 "       :
433         : __imm(bpf_sk_fullsock),
434           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
435           __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
436         : __clobber_all);
437 }
438
439 SEC("cgroup/skb")
440 __description("sk_fullsock(skb->sk): beyond last field")
441 __failure __msg("invalid sock access")
442 __failure_unpriv
443 __naked void skb_sk_beyond_last_field_1(void)
444 {
445         asm volatile ("                                 \
446         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
447         if r1 != 0 goto l0_%=;                          \
448         r0 = 0;                                         \
449         exit;                                           \
450 l0_%=:  call %[bpf_sk_fullsock];                        \
451         if r0 != 0 goto l1_%=;                          \
452         r0 = 0;                                         \
453         exit;                                           \
454 l1_%=:  r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
455         r0 = 0;                                         \
456         exit;                                           \
457 "       :
458         : __imm(bpf_sk_fullsock),
459           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
460           __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
461         : __clobber_all);
462 }
463
464 SEC("cgroup/skb")
465 __description("bpf_tcp_sock(skb->sk): no !skb->sk check")
466 __failure __msg("type=sock_common_or_null expected=sock_common")
467 __failure_unpriv
468 __naked void sk_no_skb_sk_check_2(void)
469 {
470         asm volatile ("                                 \
471         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
472         call %[bpf_tcp_sock];                           \
473         r0 = 0;                                         \
474         exit;                                           \
475 "       :
476         : __imm(bpf_tcp_sock),
477           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
478         : __clobber_all);
479 }
480
481 SEC("cgroup/skb")
482 __description("bpf_tcp_sock(skb->sk): no NULL check on ret")
483 __failure __msg("invalid mem access 'tcp_sock_or_null'")
484 __failure_unpriv
485 __naked void no_null_check_on_ret_2(void)
486 {
487         asm volatile ("                                 \
488         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
489         if r1 != 0 goto l0_%=;                          \
490         r0 = 0;                                         \
491         exit;                                           \
492 l0_%=:  call %[bpf_tcp_sock];                           \
493         r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);    \
494         r0 = 0;                                         \
495         exit;                                           \
496 "       :
497         : __imm(bpf_tcp_sock),
498           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
499           __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
500         : __clobber_all);
501 }
502
503 SEC("cgroup/skb")
504 __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
505 __success __success_unpriv __retval(0)
506 __naked void skb_sk_tp_snd_cwnd_1(void)
507 {
508         asm volatile ("                                 \
509         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
510         if r1 != 0 goto l0_%=;                          \
511         r0 = 0;                                         \
512         exit;                                           \
513 l0_%=:  call %[bpf_tcp_sock];                           \
514         if r0 != 0 goto l1_%=;                          \
515         exit;                                           \
516 l1_%=:  r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);    \
517         r0 = 0;                                         \
518         exit;                                           \
519 "       :
520         : __imm(bpf_tcp_sock),
521           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
522           __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
523         : __clobber_all);
524 }
525
526 SEC("cgroup/skb")
527 __description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
528 __success __success_unpriv __retval(0)
529 __naked void skb_sk_tp_bytes_acked(void)
530 {
531         asm volatile ("                                 \
532         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
533         if r1 != 0 goto l0_%=;                          \
534         r0 = 0;                                         \
535         exit;                                           \
536 l0_%=:  call %[bpf_tcp_sock];                           \
537         if r0 != 0 goto l1_%=;                          \
538         exit;                                           \
539 l1_%=:  r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
540         r0 = 0;                                         \
541         exit;                                           \
542 "       :
543         : __imm(bpf_tcp_sock),
544           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
545           __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
546         : __clobber_all);
547 }
548
549 SEC("cgroup/skb")
550 __description("bpf_tcp_sock(skb->sk): beyond last field")
551 __failure __msg("invalid tcp_sock access")
552 __failure_unpriv
553 __naked void skb_sk_beyond_last_field_2(void)
554 {
555         asm volatile ("                                 \
556         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
557         if r1 != 0 goto l0_%=;                          \
558         r0 = 0;                                         \
559         exit;                                           \
560 l0_%=:  call %[bpf_tcp_sock];                           \
561         if r0 != 0 goto l1_%=;                          \
562         exit;                                           \
563 l1_%=:  r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
564         r0 = 0;                                         \
565         exit;                                           \
566 "       :
567         : __imm(bpf_tcp_sock),
568           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
569           __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
570         : __clobber_all);
571 }
572
573 SEC("cgroup/skb")
574 __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
575 __success __success_unpriv __retval(0)
576 __naked void skb_sk_tp_snd_cwnd_2(void)
577 {
578         asm volatile ("                                 \
579         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
580         if r1 != 0 goto l0_%=;                          \
581         r0 = 0;                                         \
582         exit;                                           \
583 l0_%=:  call %[bpf_sk_fullsock];                        \
584         if r0 != 0 goto l1_%=;                          \
585         exit;                                           \
586 l1_%=:  r1 = r0;                                        \
587         call %[bpf_tcp_sock];                           \
588         if r0 != 0 goto l2_%=;                          \
589         exit;                                           \
590 l2_%=:  r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);    \
591         r0 = 0;                                         \
592         exit;                                           \
593 "       :
594         : __imm(bpf_sk_fullsock),
595           __imm(bpf_tcp_sock),
596           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
597           __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
598         : __clobber_all);
599 }
600
601 SEC("tc")
602 __description("bpf_sk_release(skb->sk)")
603 __failure __msg("R1 must be referenced when passed to release function")
604 __naked void bpf_sk_release_skb_sk(void)
605 {
606         asm volatile ("                                 \
607         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
608         if r1 == 0 goto l0_%=;                          \
609         call %[bpf_sk_release];                         \
610 l0_%=:  r0 = 0;                                         \
611         exit;                                           \
612 "       :
613         : __imm(bpf_sk_release),
614           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
615         : __clobber_all);
616 }
617
618 SEC("tc")
619 __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
620 __failure __msg("R1 must be referenced when passed to release function")
621 __naked void bpf_sk_fullsock_skb_sk(void)
622 {
623         asm volatile ("                                 \
624         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
625         if r1 != 0 goto l0_%=;                          \
626         r0 = 0;                                         \
627         exit;                                           \
628 l0_%=:  call %[bpf_sk_fullsock];                        \
629         if r0 != 0 goto l1_%=;                          \
630         exit;                                           \
631 l1_%=:  r1 = r0;                                        \
632         call %[bpf_sk_release];                         \
633         r0 = 1;                                         \
634         exit;                                           \
635 "       :
636         : __imm(bpf_sk_fullsock),
637           __imm(bpf_sk_release),
638           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
639         : __clobber_all);
640 }
641
642 SEC("tc")
643 __description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
644 __failure __msg("R1 must be referenced when passed to release function")
645 __naked void bpf_tcp_sock_skb_sk(void)
646 {
647         asm volatile ("                                 \
648         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
649         if r1 != 0 goto l0_%=;                          \
650         r0 = 0;                                         \
651         exit;                                           \
652 l0_%=:  call %[bpf_tcp_sock];                           \
653         if r0 != 0 goto l1_%=;                          \
654         exit;                                           \
655 l1_%=:  r1 = r0;                                        \
656         call %[bpf_sk_release];                         \
657         r0 = 1;                                         \
658         exit;                                           \
659 "       :
660         : __imm(bpf_sk_release),
661           __imm(bpf_tcp_sock),
662           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
663         : __clobber_all);
664 }
665
666 SEC("tc")
667 __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
668 __success __retval(0)
669 __naked void sk_null_0_value_null(void)
670 {
671         asm volatile ("                                 \
672         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
673         if r1 != 0 goto l0_%=;                          \
674         r0 = 0;                                         \
675         exit;                                           \
676 l0_%=:  call %[bpf_sk_fullsock];                        \
677         if r0 != 0 goto l1_%=;                          \
678         r0 = 0;                                         \
679         exit;                                           \
680 l1_%=:  r4 = 0;                                         \
681         r3 = 0;                                         \
682         r2 = r0;                                        \
683         r1 = %[sk_storage_map] ll;                      \
684         call %[bpf_sk_storage_get];                     \
685         r0 = 0;                                         \
686         exit;                                           \
687 "       :
688         : __imm(bpf_sk_fullsock),
689           __imm(bpf_sk_storage_get),
690           __imm_addr(sk_storage_map),
691           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
692         : __clobber_all);
693 }
694
695 SEC("tc")
696 __description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
697 __failure __msg("R3 type=scalar expected=fp")
698 __naked void sk_1_1_value_1(void)
699 {
700         asm volatile ("                                 \
701         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
702         if r1 != 0 goto l0_%=;                          \
703         r0 = 0;                                         \
704         exit;                                           \
705 l0_%=:  call %[bpf_sk_fullsock];                        \
706         if r0 != 0 goto l1_%=;                          \
707         r0 = 0;                                         \
708         exit;                                           \
709 l1_%=:  r4 = 1;                                         \
710         r3 = 1;                                         \
711         r2 = r0;                                        \
712         r1 = %[sk_storage_map] ll;                      \
713         call %[bpf_sk_storage_get];                     \
714         r0 = 0;                                         \
715         exit;                                           \
716 "       :
717         : __imm(bpf_sk_fullsock),
718           __imm(bpf_sk_storage_get),
719           __imm_addr(sk_storage_map),
720           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
721         : __clobber_all);
722 }
723
724 SEC("tc")
725 __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
726 __success __retval(0)
727 __naked void stack_value_1_stack_value(void)
728 {
729         asm volatile ("                                 \
730         r2 = 0;                                         \
731         *(u64*)(r10 - 8) = r2;                          \
732         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
733         if r1 != 0 goto l0_%=;                          \
734         r0 = 0;                                         \
735         exit;                                           \
736 l0_%=:  call %[bpf_sk_fullsock];                        \
737         if r0 != 0 goto l1_%=;                          \
738         r0 = 0;                                         \
739         exit;                                           \
740 l1_%=:  r4 = 1;                                         \
741         r3 = r10;                                       \
742         r3 += -8;                                       \
743         r2 = r0;                                        \
744         r1 = %[sk_storage_map] ll;                      \
745         call %[bpf_sk_storage_get];                     \
746         r0 = 0;                                         \
747         exit;                                           \
748 "       :
749         : __imm(bpf_sk_fullsock),
750           __imm(bpf_sk_storage_get),
751           __imm_addr(sk_storage_map),
752           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
753         : __clobber_all);
754 }
755
756 SEC("tc")
757 __description("bpf_map_lookup_elem(smap, &key)")
758 __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
759 __naked void map_lookup_elem_smap_key(void)
760 {
761         asm volatile ("                                 \
762         r1 = 0;                                         \
763         *(u32*)(r10 - 4) = r1;                          \
764         r2 = r10;                                       \
765         r2 += -4;                                       \
766         r1 = %[sk_storage_map] ll;                      \
767         call %[bpf_map_lookup_elem];                    \
768         r0 = 0;                                         \
769         exit;                                           \
770 "       :
771         : __imm(bpf_map_lookup_elem),
772           __imm_addr(sk_storage_map)
773         : __clobber_all);
774 }
775
776 SEC("xdp")
777 __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
778 __success __retval(0)
779 __naked void xskmap_key_xs_queue_id(void)
780 {
781         asm volatile ("                                 \
782         r1 = 0;                                         \
783         *(u32*)(r10 - 8) = r1;                          \
784         r2 = r10;                                       \
785         r2 += -8;                                       \
786         r1 = %[map_xskmap] ll;                          \
787         call %[bpf_map_lookup_elem];                    \
788         if r0 != 0 goto l0_%=;                          \
789         exit;                                           \
790 l0_%=:  r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]);    \
791         r0 = 0;                                         \
792         exit;                                           \
793 "       :
794         : __imm(bpf_map_lookup_elem),
795           __imm_addr(map_xskmap),
796           __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
797         : __clobber_all);
798 }
799
800 SEC("sk_skb")
801 __description("bpf_map_lookup_elem(sockmap, &key)")
802 __failure __msg("Unreleased reference id=2 alloc_insn=6")
803 __naked void map_lookup_elem_sockmap_key(void)
804 {
805         asm volatile ("                                 \
806         r1 = 0;                                         \
807         *(u32*)(r10 - 4) = r1;                          \
808         r2 = r10;                                       \
809         r2 += -4;                                       \
810         r1 = %[map_sockmap] ll;                         \
811         call %[bpf_map_lookup_elem];                    \
812         r0 = 0;                                         \
813         exit;                                           \
814 "       :
815         : __imm(bpf_map_lookup_elem),
816           __imm_addr(map_sockmap)
817         : __clobber_all);
818 }
819
820 SEC("sk_skb")
821 __description("bpf_map_lookup_elem(sockhash, &key)")
822 __failure __msg("Unreleased reference id=2 alloc_insn=6")
823 __naked void map_lookup_elem_sockhash_key(void)
824 {
825         asm volatile ("                                 \
826         r1 = 0;                                         \
827         *(u32*)(r10 - 4) = r1;                          \
828         r2 = r10;                                       \
829         r2 += -4;                                       \
830         r1 = %[map_sockhash] ll;                        \
831         call %[bpf_map_lookup_elem];                    \
832         r0 = 0;                                         \
833         exit;                                           \
834 "       :
835         : __imm(bpf_map_lookup_elem),
836           __imm_addr(map_sockhash)
837         : __clobber_all);
838 }
839
840 SEC("sk_skb")
841 __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
842 __success
843 __naked void field_bpf_sk_release_sk_1(void)
844 {
845         asm volatile ("                                 \
846         r1 = 0;                                         \
847         *(u32*)(r10 - 4) = r1;                          \
848         r2 = r10;                                       \
849         r2 += -4;                                       \
850         r1 = %[map_sockmap] ll;                         \
851         call %[bpf_map_lookup_elem];                    \
852         if r0 != 0 goto l0_%=;                          \
853         exit;                                           \
854 l0_%=:  r1 = r0;                                        \
855         r0 = *(u32*)(r0 + %[bpf_sock_type]);            \
856         call %[bpf_sk_release];                         \
857         exit;                                           \
858 "       :
859         : __imm(bpf_map_lookup_elem),
860           __imm(bpf_sk_release),
861           __imm_addr(map_sockmap),
862           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
863         : __clobber_all);
864 }
865
866 SEC("sk_skb")
867 __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
868 __success
869 __naked void field_bpf_sk_release_sk_2(void)
870 {
871         asm volatile ("                                 \
872         r1 = 0;                                         \
873         *(u32*)(r10 - 4) = r1;                          \
874         r2 = r10;                                       \
875         r2 += -4;                                       \
876         r1 = %[map_sockhash] ll;                        \
877         call %[bpf_map_lookup_elem];                    \
878         if r0 != 0 goto l0_%=;                          \
879         exit;                                           \
880 l0_%=:  r1 = r0;                                        \
881         r0 = *(u32*)(r0 + %[bpf_sock_type]);            \
882         call %[bpf_sk_release];                         \
883         exit;                                           \
884 "       :
885         : __imm(bpf_map_lookup_elem),
886           __imm(bpf_sk_release),
887           __imm_addr(map_sockhash),
888           __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
889         : __clobber_all);
890 }
891
892 SEC("sk_reuseport")
893 __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
894 __success
895 __naked void ctx_reuseport_array_key_flags(void)
896 {
897         asm volatile ("                                 \
898         r4 = 0;                                         \
899         r2 = 0;                                         \
900         *(u32*)(r10 - 4) = r2;                          \
901         r3 = r10;                                       \
902         r3 += -4;                                       \
903         r2 = %[map_reuseport_array] ll;                 \
904         call %[bpf_sk_select_reuseport];                \
905         exit;                                           \
906 "       :
907         : __imm(bpf_sk_select_reuseport),
908           __imm_addr(map_reuseport_array)
909         : __clobber_all);
910 }
911
912 SEC("sk_reuseport")
913 __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
914 __success
915 __naked void reuseport_ctx_sockmap_key_flags(void)
916 {
917         asm volatile ("                                 \
918         r4 = 0;                                         \
919         r2 = 0;                                         \
920         *(u32*)(r10 - 4) = r2;                          \
921         r3 = r10;                                       \
922         r3 += -4;                                       \
923         r2 = %[map_sockmap] ll;                         \
924         call %[bpf_sk_select_reuseport];                \
925         exit;                                           \
926 "       :
927         : __imm(bpf_sk_select_reuseport),
928           __imm_addr(map_sockmap)
929         : __clobber_all);
930 }
931
932 SEC("sk_reuseport")
933 __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
934 __success
935 __naked void reuseport_ctx_sockhash_key_flags(void)
936 {
937         asm volatile ("                                 \
938         r4 = 0;                                         \
939         r2 = 0;                                         \
940         *(u32*)(r10 - 4) = r2;                          \
941         r3 = r10;                                       \
942         r3 += -4;                                       \
943         r2 = %[map_sockmap] ll;                         \
944         call %[bpf_sk_select_reuseport];                \
945         exit;                                           \
946 "       :
947         : __imm(bpf_sk_select_reuseport),
948           __imm_addr(map_sockmap)
949         : __clobber_all);
950 }
951
952 SEC("tc")
953 __description("mark null check on return value of bpf_skc_to helpers")
954 __failure __msg("invalid mem access")
955 __naked void of_bpf_skc_to_helpers(void)
956 {
957         asm volatile ("                                 \
958         r1 = *(u64*)(r1 + %[__sk_buff_sk]);             \
959         if r1 != 0 goto l0_%=;                          \
960         r0 = 0;                                         \
961         exit;                                           \
962 l0_%=:  r6 = r1;                                        \
963         call %[bpf_skc_to_tcp_sock];                    \
964         r7 = r0;                                        \
965         r1 = r6;                                        \
966         call %[bpf_skc_to_tcp_request_sock];            \
967         r8 = r0;                                        \
968         if r8 != 0 goto l1_%=;                          \
969         r0 = 0;                                         \
970         exit;                                           \
971 l1_%=:  r0 = *(u8*)(r7 + 0);                            \
972         exit;                                           \
973 "       :
974         : __imm(bpf_skc_to_tcp_request_sock),
975           __imm(bpf_skc_to_tcp_sock),
976           __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
977         : __clobber_all);
978 }
979
980 char _license[] SEC("license") = "GPL";