1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/sock.c */
5 #include <bpf/bpf_helpers.h>
8 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9 #define offsetofend(TYPE, MEMBER) \
10 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
13 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14 __uint(max_entries, 1);
17 } map_reuseport_array SEC(".maps");
20 __uint(type, BPF_MAP_TYPE_SOCKHASH);
21 __uint(max_entries, 1);
24 } map_sockhash SEC(".maps");
27 __uint(type, BPF_MAP_TYPE_SOCKMAP);
28 __uint(max_entries, 1);
31 } map_sockmap SEC(".maps");
34 __uint(type, BPF_MAP_TYPE_XSKMAP);
35 __uint(max_entries, 1);
38 } map_xskmap SEC(".maps");
42 struct bpf_spin_lock l;
46 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47 __uint(max_entries, 0);
49 __type(value, struct val);
50 __uint(map_flags, BPF_F_NO_PREALLOC);
51 } sk_storage_map SEC(".maps");
54 __description("skb->sk: no NULL check")
55 __failure __msg("invalid mem access 'sock_common_or_null'")
57 __naked void skb_sk_no_null_check(void)
60 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
61 r0 = *(u32*)(r1 + 0); \
65 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
70 __description("skb->sk: sk->family [non fullsock field]")
71 __success __success_unpriv __retval(0)
72 __naked void sk_family_non_fullsock_field_1(void)
75 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
76 if r1 != 0 goto l0_%=; \
79 l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
83 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
84 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
89 __description("skb->sk: sk->type [fullsock field]")
90 __failure __msg("invalid sock_common access")
92 __naked void sk_sk_type_fullsock_field_1(void)
95 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
96 if r1 != 0 goto l0_%=; \
99 l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
103 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
104 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
109 __description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
110 __failure __msg("type=sock_common_or_null expected=sock_common")
112 __naked void sk_no_skb_sk_check_1(void)
115 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
116 call %[bpf_sk_fullsock]; \
120 : __imm(bpf_sk_fullsock),
121 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
126 __description("sk_fullsock(skb->sk): no NULL check on ret")
127 __failure __msg("invalid mem access 'sock_or_null'")
129 __naked void no_null_check_on_ret_1(void)
132 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
133 if r1 != 0 goto l0_%=; \
136 l0_%=: call %[bpf_sk_fullsock]; \
137 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
141 : __imm(bpf_sk_fullsock),
142 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
143 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
148 __description("sk_fullsock(skb->sk): sk->type [fullsock field]")
149 __success __success_unpriv __retval(0)
150 __naked void sk_sk_type_fullsock_field_2(void)
153 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
154 if r1 != 0 goto l0_%=; \
157 l0_%=: call %[bpf_sk_fullsock]; \
158 if r0 != 0 goto l1_%=; \
161 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
165 : __imm(bpf_sk_fullsock),
166 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
172 __description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
173 __success __success_unpriv __retval(0)
174 __naked void sk_family_non_fullsock_field_2(void)
177 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
178 if r1 != 0 goto l0_%=; \
181 l0_%=: call %[bpf_sk_fullsock]; \
182 if r0 != 0 goto l1_%=; \
184 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
188 : __imm(bpf_sk_fullsock),
189 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
190 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
195 __description("sk_fullsock(skb->sk): sk->state [narrow load]")
196 __success __success_unpriv __retval(0)
197 __naked void sk_sk_state_narrow_load(void)
200 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
201 if r1 != 0 goto l0_%=; \
204 l0_%=: call %[bpf_sk_fullsock]; \
205 if r0 != 0 goto l1_%=; \
208 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
212 : __imm(bpf_sk_fullsock),
213 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
214 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
219 __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
220 __success __success_unpriv __retval(0)
221 __naked void port_word_load_backward_compatibility(void)
224 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
225 if r1 != 0 goto l0_%=; \
228 l0_%=: call %[bpf_sk_fullsock]; \
229 if r0 != 0 goto l1_%=; \
232 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
236 : __imm(bpf_sk_fullsock),
237 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
238 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
243 __description("sk_fullsock(skb->sk): sk->dst_port [half load]")
244 __success __success_unpriv __retval(0)
245 __naked void sk_dst_port_half_load(void)
248 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
249 if r1 != 0 goto l0_%=; \
252 l0_%=: call %[bpf_sk_fullsock]; \
253 if r0 != 0 goto l1_%=; \
256 l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
260 : __imm(bpf_sk_fullsock),
261 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
262 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
267 __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
268 __failure __msg("invalid sock access")
270 __naked void dst_port_half_load_invalid_1(void)
273 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
274 if r1 != 0 goto l0_%=; \
277 l0_%=: call %[bpf_sk_fullsock]; \
278 if r0 != 0 goto l1_%=; \
281 l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
285 : __imm(bpf_sk_fullsock),
286 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
287 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
292 __description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
293 __success __success_unpriv __retval(0)
294 __naked void sk_dst_port_byte_load(void)
297 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
298 if r1 != 0 goto l0_%=; \
301 l0_%=: call %[bpf_sk_fullsock]; \
302 if r0 != 0 goto l1_%=; \
305 l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
306 r2 = *(u8*)(r0 + %[__imm_0]); \
310 : __imm(bpf_sk_fullsock),
311 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
312 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
313 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
318 __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
319 __failure __msg("invalid sock access")
321 __naked void dst_port_byte_load_invalid(void)
324 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
325 if r1 != 0 goto l0_%=; \
328 l0_%=: call %[bpf_sk_fullsock]; \
329 if r0 != 0 goto l1_%=; \
332 l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
336 : __imm(bpf_sk_fullsock),
337 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
338 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
343 __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
344 __failure __msg("invalid sock access")
346 __naked void dst_port_half_load_invalid_2(void)
349 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
350 if r1 != 0 goto l0_%=; \
353 l0_%=: call %[bpf_sk_fullsock]; \
354 if r0 != 0 goto l1_%=; \
357 l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
361 : __imm(bpf_sk_fullsock),
362 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
363 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
368 __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
369 __success __success_unpriv __retval(0)
370 __naked void dst_ip6_load_2nd_byte(void)
373 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
374 if r1 != 0 goto l0_%=; \
377 l0_%=: call %[bpf_sk_fullsock]; \
378 if r0 != 0 goto l1_%=; \
381 l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
385 : __imm(bpf_sk_fullsock),
386 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
387 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
392 __description("sk_fullsock(skb->sk): sk->type [narrow load]")
393 __success __success_unpriv __retval(0)
394 __naked void sk_sk_type_narrow_load(void)
397 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
398 if r1 != 0 goto l0_%=; \
401 l0_%=: call %[bpf_sk_fullsock]; \
402 if r0 != 0 goto l1_%=; \
405 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
409 : __imm(bpf_sk_fullsock),
410 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
411 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
416 __description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
417 __success __success_unpriv __retval(0)
418 __naked void sk_sk_protocol_narrow_load(void)
421 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
422 if r1 != 0 goto l0_%=; \
425 l0_%=: call %[bpf_sk_fullsock]; \
426 if r0 != 0 goto l1_%=; \
429 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
433 : __imm(bpf_sk_fullsock),
434 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
435 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
440 __description("sk_fullsock(skb->sk): beyond last field")
441 __failure __msg("invalid sock access")
443 __naked void skb_sk_beyond_last_field_1(void)
446 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
447 if r1 != 0 goto l0_%=; \
450 l0_%=: call %[bpf_sk_fullsock]; \
451 if r0 != 0 goto l1_%=; \
454 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
458 : __imm(bpf_sk_fullsock),
459 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
460 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
465 __description("bpf_tcp_sock(skb->sk): no !skb->sk check")
466 __failure __msg("type=sock_common_or_null expected=sock_common")
468 __naked void sk_no_skb_sk_check_2(void)
471 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
472 call %[bpf_tcp_sock]; \
476 : __imm(bpf_tcp_sock),
477 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
482 __description("bpf_tcp_sock(skb->sk): no NULL check on ret")
483 __failure __msg("invalid mem access 'tcp_sock_or_null'")
485 __naked void no_null_check_on_ret_2(void)
488 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
489 if r1 != 0 goto l0_%=; \
492 l0_%=: call %[bpf_tcp_sock]; \
493 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
497 : __imm(bpf_tcp_sock),
498 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
499 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
504 __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
505 __success __success_unpriv __retval(0)
506 __naked void skb_sk_tp_snd_cwnd_1(void)
509 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
510 if r1 != 0 goto l0_%=; \
513 l0_%=: call %[bpf_tcp_sock]; \
514 if r0 != 0 goto l1_%=; \
516 l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
520 : __imm(bpf_tcp_sock),
521 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
522 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
527 __description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
528 __success __success_unpriv __retval(0)
529 __naked void skb_sk_tp_bytes_acked(void)
532 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
533 if r1 != 0 goto l0_%=; \
536 l0_%=: call %[bpf_tcp_sock]; \
537 if r0 != 0 goto l1_%=; \
539 l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
543 : __imm(bpf_tcp_sock),
544 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
545 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
550 __description("bpf_tcp_sock(skb->sk): beyond last field")
551 __failure __msg("invalid tcp_sock access")
553 __naked void skb_sk_beyond_last_field_2(void)
556 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
557 if r1 != 0 goto l0_%=; \
560 l0_%=: call %[bpf_tcp_sock]; \
561 if r0 != 0 goto l1_%=; \
563 l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
567 : __imm(bpf_tcp_sock),
568 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
569 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
574 __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
575 __success __success_unpriv __retval(0)
576 __naked void skb_sk_tp_snd_cwnd_2(void)
579 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
580 if r1 != 0 goto l0_%=; \
583 l0_%=: call %[bpf_sk_fullsock]; \
584 if r0 != 0 goto l1_%=; \
587 call %[bpf_tcp_sock]; \
588 if r0 != 0 goto l2_%=; \
590 l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
594 : __imm(bpf_sk_fullsock),
596 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
597 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
602 __description("bpf_sk_release(skb->sk)")
603 __failure __msg("R1 must be referenced when passed to release function")
604 __naked void bpf_sk_release_skb_sk(void)
607 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
608 if r1 == 0 goto l0_%=; \
609 call %[bpf_sk_release]; \
613 : __imm(bpf_sk_release),
614 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
619 __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
620 __failure __msg("R1 must be referenced when passed to release function")
621 __naked void bpf_sk_fullsock_skb_sk(void)
624 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
625 if r1 != 0 goto l0_%=; \
628 l0_%=: call %[bpf_sk_fullsock]; \
629 if r0 != 0 goto l1_%=; \
632 call %[bpf_sk_release]; \
636 : __imm(bpf_sk_fullsock),
637 __imm(bpf_sk_release),
638 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
643 __description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
644 __failure __msg("R1 must be referenced when passed to release function")
645 __naked void bpf_tcp_sock_skb_sk(void)
648 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
649 if r1 != 0 goto l0_%=; \
652 l0_%=: call %[bpf_tcp_sock]; \
653 if r0 != 0 goto l1_%=; \
656 call %[bpf_sk_release]; \
660 : __imm(bpf_sk_release),
662 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
667 __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
668 __success __retval(0)
669 __naked void sk_null_0_value_null(void)
672 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
673 if r1 != 0 goto l0_%=; \
676 l0_%=: call %[bpf_sk_fullsock]; \
677 if r0 != 0 goto l1_%=; \
683 r1 = %[sk_storage_map] ll; \
684 call %[bpf_sk_storage_get]; \
688 : __imm(bpf_sk_fullsock),
689 __imm(bpf_sk_storage_get),
690 __imm_addr(sk_storage_map),
691 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
696 __description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
697 __failure __msg("R3 type=scalar expected=fp")
698 __naked void sk_1_1_value_1(void)
701 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
702 if r1 != 0 goto l0_%=; \
705 l0_%=: call %[bpf_sk_fullsock]; \
706 if r0 != 0 goto l1_%=; \
712 r1 = %[sk_storage_map] ll; \
713 call %[bpf_sk_storage_get]; \
717 : __imm(bpf_sk_fullsock),
718 __imm(bpf_sk_storage_get),
719 __imm_addr(sk_storage_map),
720 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
725 __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
726 __success __retval(0)
727 __naked void stack_value_1_stack_value(void)
731 *(u64*)(r10 - 8) = r2; \
732 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
733 if r1 != 0 goto l0_%=; \
736 l0_%=: call %[bpf_sk_fullsock]; \
737 if r0 != 0 goto l1_%=; \
744 r1 = %[sk_storage_map] ll; \
745 call %[bpf_sk_storage_get]; \
749 : __imm(bpf_sk_fullsock),
750 __imm(bpf_sk_storage_get),
751 __imm_addr(sk_storage_map),
752 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
757 __description("bpf_map_lookup_elem(smap, &key)")
758 __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
759 __naked void map_lookup_elem_smap_key(void)
763 *(u32*)(r10 - 4) = r1; \
766 r1 = %[sk_storage_map] ll; \
767 call %[bpf_map_lookup_elem]; \
771 : __imm(bpf_map_lookup_elem),
772 __imm_addr(sk_storage_map)
777 __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
778 __success __retval(0)
779 __naked void xskmap_key_xs_queue_id(void)
783 *(u32*)(r10 - 8) = r1; \
786 r1 = %[map_xskmap] ll; \
787 call %[bpf_map_lookup_elem]; \
788 if r0 != 0 goto l0_%=; \
790 l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
794 : __imm(bpf_map_lookup_elem),
795 __imm_addr(map_xskmap),
796 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
801 __description("bpf_map_lookup_elem(sockmap, &key)")
802 __failure __msg("Unreleased reference id=2 alloc_insn=6")
803 __naked void map_lookup_elem_sockmap_key(void)
807 *(u32*)(r10 - 4) = r1; \
810 r1 = %[map_sockmap] ll; \
811 call %[bpf_map_lookup_elem]; \
815 : __imm(bpf_map_lookup_elem),
816 __imm_addr(map_sockmap)
821 __description("bpf_map_lookup_elem(sockhash, &key)")
822 __failure __msg("Unreleased reference id=2 alloc_insn=6")
823 __naked void map_lookup_elem_sockhash_key(void)
827 *(u32*)(r10 - 4) = r1; \
830 r1 = %[map_sockhash] ll; \
831 call %[bpf_map_lookup_elem]; \
835 : __imm(bpf_map_lookup_elem),
836 __imm_addr(map_sockhash)
841 __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
843 __naked void field_bpf_sk_release_sk_1(void)
847 *(u32*)(r10 - 4) = r1; \
850 r1 = %[map_sockmap] ll; \
851 call %[bpf_map_lookup_elem]; \
852 if r0 != 0 goto l0_%=; \
855 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
856 call %[bpf_sk_release]; \
859 : __imm(bpf_map_lookup_elem),
860 __imm(bpf_sk_release),
861 __imm_addr(map_sockmap),
862 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
867 __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
869 __naked void field_bpf_sk_release_sk_2(void)
873 *(u32*)(r10 - 4) = r1; \
876 r1 = %[map_sockhash] ll; \
877 call %[bpf_map_lookup_elem]; \
878 if r0 != 0 goto l0_%=; \
881 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
882 call %[bpf_sk_release]; \
885 : __imm(bpf_map_lookup_elem),
886 __imm(bpf_sk_release),
887 __imm_addr(map_sockhash),
888 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
893 __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
895 __naked void ctx_reuseport_array_key_flags(void)
900 *(u32*)(r10 - 4) = r2; \
903 r2 = %[map_reuseport_array] ll; \
904 call %[bpf_sk_select_reuseport]; \
907 : __imm(bpf_sk_select_reuseport),
908 __imm_addr(map_reuseport_array)
913 __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
915 __naked void reuseport_ctx_sockmap_key_flags(void)
920 *(u32*)(r10 - 4) = r2; \
923 r2 = %[map_sockmap] ll; \
924 call %[bpf_sk_select_reuseport]; \
927 : __imm(bpf_sk_select_reuseport),
928 __imm_addr(map_sockmap)
933 __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
935 __naked void reuseport_ctx_sockhash_key_flags(void)
940 *(u32*)(r10 - 4) = r2; \
943 r2 = %[map_sockmap] ll; \
944 call %[bpf_sk_select_reuseport]; \
947 : __imm(bpf_sk_select_reuseport),
948 __imm_addr(map_sockmap)
953 __description("mark null check on return value of bpf_skc_to helpers")
954 __failure __msg("invalid mem access")
955 __naked void of_bpf_skc_to_helpers(void)
958 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
959 if r1 != 0 goto l0_%=; \
963 call %[bpf_skc_to_tcp_sock]; \
966 call %[bpf_skc_to_tcp_request_sock]; \
968 if r8 != 0 goto l1_%=; \
971 l1_%=: r0 = *(u8*)(r7 + 0); \
974 : __imm(bpf_skc_to_tcp_request_sock),
975 __imm(bpf_skc_to_tcp_sock),
976 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
980 char _license[] SEC("license") = "GPL";