1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <linux/module.h>
21 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/socket.h>
25 #include <linux/sock_diag.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_packet.h>
30 #include <linux/if_arp.h>
31 #include <linux/gfp.h>
32 #include <net/inet_common.h>
34 #include <net/protocol.h>
35 #include <net/netlink.h>
36 #include <linux/skbuff.h>
37 #include <linux/skmsg.h>
39 #include <net/flow_dissector.h>
40 #include <linux/errno.h>
41 #include <linux/timer.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44 #include <asm/cmpxchg.h>
45 #include <linux/filter.h>
46 #include <linux/ratelimit.h>
47 #include <linux/seccomp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/bpf.h>
50 #include <linux/btf.h>
51 #include <net/sch_generic.h>
52 #include <net/cls_cgroup.h>
53 #include <net/dst_metadata.h>
55 #include <net/sock_reuseport.h>
56 #include <net/busy_poll.h>
60 #include <linux/bpf_trace.h>
61 #include <net/xdp_sock.h>
62 #include <linux/inetdevice.h>
63 #include <net/inet_hashtables.h>
64 #include <net/inet6_hashtables.h>
65 #include <net/ip_fib.h>
66 #include <net/nexthop.h>
70 #include <net/net_namespace.h>
71 #include <linux/seg6_local.h>
73 #include <net/seg6_local.h>
74 #include <net/lwtunnel.h>
75 #include <net/ipv6_stubs.h>
76 #include <net/bpf_sk_storage.h>
77 #include <net/transp_v6.h>
78 #include <linux/btf_ids.h>
80 static const struct bpf_func_proto *
81 bpf_sk_base_func_proto(enum bpf_func_id func_id);
83 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
85 if (in_compat_syscall()) {
86 struct compat_sock_fprog f32;
88 if (len != sizeof(f32))
90 if (copy_from_sockptr(&f32, src, sizeof(f32)))
92 memset(dst, 0, sizeof(*dst));
94 dst->filter = compat_ptr(f32.filter);
96 if (len != sizeof(*dst))
98 if (copy_from_sockptr(dst, src, sizeof(*dst)))
104 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
107 * sk_filter_trim_cap - run a packet through a socket filter
108 * @sk: sock associated with &sk_buff
109 * @skb: buffer to filter
110 * @cap: limit on how short the eBPF program may trim the packet
112 * Run the eBPF program and then cut skb->data to correct size returned by
113 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
114 * than pkt_len we keep whole skb->data. This is the socket level
115 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
116 * be accepted or -EPERM if the packet should be tossed.
119 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
122 struct sk_filter *filter;
125 * If the skb was allocated from pfmemalloc reserves, only
126 * allow SOCK_MEMALLOC sockets to use it as this socket is
127 * helping free memory
129 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
130 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
133 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
137 err = security_sock_rcv_skb(sk, skb);
142 filter = rcu_dereference(sk->sk_filter);
144 struct sock *save_sk = skb->sk;
145 unsigned int pkt_len;
148 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
150 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
156 EXPORT_SYMBOL(sk_filter_trim_cap);
158 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
160 return skb_get_poff(skb);
163 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
167 if (skb_is_nonlinear(skb))
170 if (skb->len < sizeof(struct nlattr))
173 if (a > skb->len - sizeof(struct nlattr))
176 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
178 return (void *) nla - (void *) skb->data;
183 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
187 if (skb_is_nonlinear(skb))
190 if (skb->len < sizeof(struct nlattr))
193 if (a > skb->len - sizeof(struct nlattr))
196 nla = (struct nlattr *) &skb->data[a];
197 if (nla->nla_len > skb->len - a)
200 nla = nla_find_nested(nla, x);
202 return (void *) nla - (void *) skb->data;
207 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
208 data, int, headlen, int, offset)
211 const int len = sizeof(tmp);
214 if (headlen - offset >= len)
215 return *(u8 *)(data + offset);
216 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
219 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
227 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
230 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
234 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
235 data, int, headlen, int, offset)
238 const int len = sizeof(tmp);
241 if (headlen - offset >= len)
242 return get_unaligned_be16(data + offset);
243 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
244 return be16_to_cpu(tmp);
246 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
248 return get_unaligned_be16(ptr);
254 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
257 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
261 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
262 data, int, headlen, int, offset)
265 const int len = sizeof(tmp);
267 if (likely(offset >= 0)) {
268 if (headlen - offset >= len)
269 return get_unaligned_be32(data + offset);
270 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
271 return be32_to_cpu(tmp);
273 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
275 return get_unaligned_be32(ptr);
281 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
284 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
288 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
289 struct bpf_insn *insn_buf)
291 struct bpf_insn *insn = insn_buf;
295 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
297 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
298 offsetof(struct sk_buff, mark));
302 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
303 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
304 #ifdef __BIG_ENDIAN_BITFIELD
305 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
310 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
312 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
313 offsetof(struct sk_buff, queue_mapping));
316 case SKF_AD_VLAN_TAG:
317 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
319 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
320 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
321 offsetof(struct sk_buff, vlan_tci));
323 case SKF_AD_VLAN_TAG_PRESENT:
324 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
325 if (PKT_VLAN_PRESENT_BIT)
326 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
327 if (PKT_VLAN_PRESENT_BIT < 7)
328 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
332 return insn - insn_buf;
335 static bool convert_bpf_extensions(struct sock_filter *fp,
336 struct bpf_insn **insnp)
338 struct bpf_insn *insn = *insnp;
342 case SKF_AD_OFF + SKF_AD_PROTOCOL:
343 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
345 /* A = *(u16 *) (CTX + offsetof(protocol)) */
346 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
347 offsetof(struct sk_buff, protocol));
348 /* A = ntohs(A) [emitting a nop or swap16] */
349 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
352 case SKF_AD_OFF + SKF_AD_PKTTYPE:
353 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
357 case SKF_AD_OFF + SKF_AD_IFINDEX:
358 case SKF_AD_OFF + SKF_AD_HATYPE:
359 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
360 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
362 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
363 BPF_REG_TMP, BPF_REG_CTX,
364 offsetof(struct sk_buff, dev));
365 /* if (tmp != 0) goto pc + 1 */
366 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
367 *insn++ = BPF_EXIT_INSN();
368 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
369 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
370 offsetof(struct net_device, ifindex));
372 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
373 offsetof(struct net_device, type));
376 case SKF_AD_OFF + SKF_AD_MARK:
377 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
381 case SKF_AD_OFF + SKF_AD_RXHASH:
382 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
384 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
385 offsetof(struct sk_buff, hash));
388 case SKF_AD_OFF + SKF_AD_QUEUE:
389 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
393 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
394 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
395 BPF_REG_A, BPF_REG_CTX, insn);
399 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
400 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
401 BPF_REG_A, BPF_REG_CTX, insn);
405 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
406 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
408 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
409 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
410 offsetof(struct sk_buff, vlan_proto));
411 /* A = ntohs(A) [emitting a nop or swap16] */
412 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
415 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
416 case SKF_AD_OFF + SKF_AD_NLATTR:
417 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
418 case SKF_AD_OFF + SKF_AD_CPU:
419 case SKF_AD_OFF + SKF_AD_RANDOM:
421 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
423 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
425 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
426 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
428 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
429 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
431 case SKF_AD_OFF + SKF_AD_NLATTR:
432 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
434 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
435 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
437 case SKF_AD_OFF + SKF_AD_CPU:
438 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
440 case SKF_AD_OFF + SKF_AD_RANDOM:
441 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
442 bpf_user_rnd_init_once();
447 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
449 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
453 /* This is just a dummy call to avoid letting the compiler
454 * evict __bpf_call_base() as an optimization. Placed here
455 * where no-one bothers.
457 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
465 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
467 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
468 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
469 bool endian = BPF_SIZE(fp->code) == BPF_H ||
470 BPF_SIZE(fp->code) == BPF_W;
471 bool indirect = BPF_MODE(fp->code) == BPF_IND;
472 const int ip_align = NET_IP_ALIGN;
473 struct bpf_insn *insn = *insnp;
477 ((unaligned_ok && offset >= 0) ||
478 (!unaligned_ok && offset >= 0 &&
479 offset + ip_align >= 0 &&
480 offset + ip_align % size == 0))) {
481 bool ldx_off_ok = offset <= S16_MAX;
483 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
485 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
486 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
487 size, 2 + endian + (!ldx_off_ok * 2));
489 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
492 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
493 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
494 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
498 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
499 *insn++ = BPF_JMP_A(8);
502 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
503 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
504 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
506 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
508 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
510 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
513 switch (BPF_SIZE(fp->code)) {
515 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
518 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
521 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
527 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
528 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
529 *insn = BPF_EXIT_INSN();
536 * bpf_convert_filter - convert filter program
537 * @prog: the user passed filter program
538 * @len: the length of the user passed filter program
539 * @new_prog: allocated 'struct bpf_prog' or NULL
540 * @new_len: pointer to store length of converted program
541 * @seen_ld_abs: bool whether we've seen ld_abs/ind
543 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
544 * style extended BPF (eBPF).
545 * Conversion workflow:
547 * 1) First pass for calculating the new program length:
548 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
550 * 2) 2nd pass to remap in two passes: 1st pass finds new
551 * jump offsets, 2nd pass remapping:
552 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
554 static int bpf_convert_filter(struct sock_filter *prog, int len,
555 struct bpf_prog *new_prog, int *new_len,
558 int new_flen = 0, pass = 0, target, i, stack_off;
559 struct bpf_insn *new_insn, *first_insn = NULL;
560 struct sock_filter *fp;
564 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
565 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
567 if (len <= 0 || len > BPF_MAXINSNS)
571 first_insn = new_prog->insnsi;
572 addrs = kcalloc(len, sizeof(*addrs),
573 GFP_KERNEL | __GFP_NOWARN);
579 new_insn = first_insn;
582 /* Classic BPF related prologue emission. */
584 /* Classic BPF expects A and X to be reset first. These need
585 * to be guaranteed to be the first two instructions.
587 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
588 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
590 /* All programs must keep CTX in callee saved BPF_REG_CTX.
591 * In eBPF case it's done by the compiler, here we need to
592 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
594 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
596 /* For packet access in classic BPF, cache skb->data
597 * in callee-saved BPF R8 and skb->len - skb->data_len
598 * (headlen) in BPF R9. Since classic BPF is read-only
599 * on CTX, we only need to cache it once.
601 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
602 BPF_REG_D, BPF_REG_CTX,
603 offsetof(struct sk_buff, data));
604 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
605 offsetof(struct sk_buff, len));
606 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
607 offsetof(struct sk_buff, data_len));
608 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
614 for (i = 0; i < len; fp++, i++) {
615 struct bpf_insn tmp_insns[32] = { };
616 struct bpf_insn *insn = tmp_insns;
619 addrs[i] = new_insn - first_insn;
622 /* All arithmetic insns and skb loads map as-is. */
623 case BPF_ALU | BPF_ADD | BPF_X:
624 case BPF_ALU | BPF_ADD | BPF_K:
625 case BPF_ALU | BPF_SUB | BPF_X:
626 case BPF_ALU | BPF_SUB | BPF_K:
627 case BPF_ALU | BPF_AND | BPF_X:
628 case BPF_ALU | BPF_AND | BPF_K:
629 case BPF_ALU | BPF_OR | BPF_X:
630 case BPF_ALU | BPF_OR | BPF_K:
631 case BPF_ALU | BPF_LSH | BPF_X:
632 case BPF_ALU | BPF_LSH | BPF_K:
633 case BPF_ALU | BPF_RSH | BPF_X:
634 case BPF_ALU | BPF_RSH | BPF_K:
635 case BPF_ALU | BPF_XOR | BPF_X:
636 case BPF_ALU | BPF_XOR | BPF_K:
637 case BPF_ALU | BPF_MUL | BPF_X:
638 case BPF_ALU | BPF_MUL | BPF_K:
639 case BPF_ALU | BPF_DIV | BPF_X:
640 case BPF_ALU | BPF_DIV | BPF_K:
641 case BPF_ALU | BPF_MOD | BPF_X:
642 case BPF_ALU | BPF_MOD | BPF_K:
643 case BPF_ALU | BPF_NEG:
644 case BPF_LD | BPF_ABS | BPF_W:
645 case BPF_LD | BPF_ABS | BPF_H:
646 case BPF_LD | BPF_ABS | BPF_B:
647 case BPF_LD | BPF_IND | BPF_W:
648 case BPF_LD | BPF_IND | BPF_H:
649 case BPF_LD | BPF_IND | BPF_B:
650 /* Check for overloaded BPF extension and
651 * directly convert it if found, otherwise
652 * just move on with mapping.
654 if (BPF_CLASS(fp->code) == BPF_LD &&
655 BPF_MODE(fp->code) == BPF_ABS &&
656 convert_bpf_extensions(fp, &insn))
658 if (BPF_CLASS(fp->code) == BPF_LD &&
659 convert_bpf_ld_abs(fp, &insn)) {
664 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
665 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
666 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
667 /* Error with exception code on div/mod by 0.
668 * For cBPF programs, this was always return 0.
670 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
671 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
672 *insn++ = BPF_EXIT_INSN();
675 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
678 /* Jump transformation cannot use BPF block macros
679 * everywhere as offset calculation and target updates
680 * require a bit more work than the rest, i.e. jump
681 * opcodes map as-is, but offsets need adjustment.
684 #define BPF_EMIT_JMP \
686 const s32 off_min = S16_MIN, off_max = S16_MAX; \
689 if (target >= len || target < 0) \
691 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
692 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
693 off -= insn - tmp_insns; \
694 /* Reject anything not fitting into insn->off. */ \
695 if (off < off_min || off > off_max) \
700 case BPF_JMP | BPF_JA:
701 target = i + fp->k + 1;
702 insn->code = fp->code;
706 case BPF_JMP | BPF_JEQ | BPF_K:
707 case BPF_JMP | BPF_JEQ | BPF_X:
708 case BPF_JMP | BPF_JSET | BPF_K:
709 case BPF_JMP | BPF_JSET | BPF_X:
710 case BPF_JMP | BPF_JGT | BPF_K:
711 case BPF_JMP | BPF_JGT | BPF_X:
712 case BPF_JMP | BPF_JGE | BPF_K:
713 case BPF_JMP | BPF_JGE | BPF_X:
714 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
715 /* BPF immediates are signed, zero extend
716 * immediate into tmp register and use it
719 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
721 insn->dst_reg = BPF_REG_A;
722 insn->src_reg = BPF_REG_TMP;
725 insn->dst_reg = BPF_REG_A;
727 bpf_src = BPF_SRC(fp->code);
728 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
731 /* Common case where 'jump_false' is next insn. */
733 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
734 target = i + fp->jt + 1;
739 /* Convert some jumps when 'jump_true' is next insn. */
741 switch (BPF_OP(fp->code)) {
743 insn->code = BPF_JMP | BPF_JNE | bpf_src;
746 insn->code = BPF_JMP | BPF_JLE | bpf_src;
749 insn->code = BPF_JMP | BPF_JLT | bpf_src;
755 target = i + fp->jf + 1;
760 /* Other jumps are mapped into two insns: Jxx and JA. */
761 target = i + fp->jt + 1;
762 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
766 insn->code = BPF_JMP | BPF_JA;
767 target = i + fp->jf + 1;
771 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
772 case BPF_LDX | BPF_MSH | BPF_B: {
773 struct sock_filter tmp = {
774 .code = BPF_LD | BPF_ABS | BPF_B,
781 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
782 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
783 convert_bpf_ld_abs(&tmp, &insn);
786 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
788 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
790 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
792 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
794 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
797 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
798 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
800 case BPF_RET | BPF_A:
801 case BPF_RET | BPF_K:
802 if (BPF_RVAL(fp->code) == BPF_K)
803 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
805 *insn = BPF_EXIT_INSN();
808 /* Store to stack. */
811 stack_off = fp->k * 4 + 4;
812 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
813 BPF_ST ? BPF_REG_A : BPF_REG_X,
815 /* check_load_and_stores() verifies that classic BPF can
816 * load from stack only after write, so tracking
817 * stack_depth for ST|STX insns is enough
819 if (new_prog && new_prog->aux->stack_depth < stack_off)
820 new_prog->aux->stack_depth = stack_off;
823 /* Load from stack. */
824 case BPF_LD | BPF_MEM:
825 case BPF_LDX | BPF_MEM:
826 stack_off = fp->k * 4 + 4;
827 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
828 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
833 case BPF_LD | BPF_IMM:
834 case BPF_LDX | BPF_IMM:
835 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
836 BPF_REG_A : BPF_REG_X, fp->k);
840 case BPF_MISC | BPF_TAX:
841 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
845 case BPF_MISC | BPF_TXA:
846 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
849 /* A = skb->len or X = skb->len */
850 case BPF_LD | BPF_W | BPF_LEN:
851 case BPF_LDX | BPF_W | BPF_LEN:
852 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
853 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
854 offsetof(struct sk_buff, len));
857 /* Access seccomp_data fields. */
858 case BPF_LDX | BPF_ABS | BPF_W:
859 /* A = *(u32 *) (ctx + K) */
860 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
863 /* Unknown instruction. */
870 memcpy(new_insn, tmp_insns,
871 sizeof(*insn) * (insn - tmp_insns));
872 new_insn += insn - tmp_insns;
876 /* Only calculating new length. */
877 *new_len = new_insn - first_insn;
879 *new_len += 4; /* Prologue bits. */
884 if (new_flen != new_insn - first_insn) {
885 new_flen = new_insn - first_insn;
892 BUG_ON(*new_len != new_flen);
901 * As we dont want to clear mem[] array for each packet going through
902 * __bpf_prog_run(), we check that filter loaded by user never try to read
903 * a cell if not previously written, and we check all branches to be sure
904 * a malicious user doesn't try to abuse us.
906 static int check_load_and_stores(const struct sock_filter *filter, int flen)
908 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
911 BUILD_BUG_ON(BPF_MEMWORDS > 16);
913 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
917 memset(masks, 0xff, flen * sizeof(*masks));
919 for (pc = 0; pc < flen; pc++) {
920 memvalid &= masks[pc];
922 switch (filter[pc].code) {
925 memvalid |= (1 << filter[pc].k);
927 case BPF_LD | BPF_MEM:
928 case BPF_LDX | BPF_MEM:
929 if (!(memvalid & (1 << filter[pc].k))) {
934 case BPF_JMP | BPF_JA:
935 /* A jump must set masks on target */
936 masks[pc + 1 + filter[pc].k] &= memvalid;
939 case BPF_JMP | BPF_JEQ | BPF_K:
940 case BPF_JMP | BPF_JEQ | BPF_X:
941 case BPF_JMP | BPF_JGE | BPF_K:
942 case BPF_JMP | BPF_JGE | BPF_X:
943 case BPF_JMP | BPF_JGT | BPF_K:
944 case BPF_JMP | BPF_JGT | BPF_X:
945 case BPF_JMP | BPF_JSET | BPF_K:
946 case BPF_JMP | BPF_JSET | BPF_X:
947 /* A jump must set masks on targets */
948 masks[pc + 1 + filter[pc].jt] &= memvalid;
949 masks[pc + 1 + filter[pc].jf] &= memvalid;
959 static bool chk_code_allowed(u16 code_to_probe)
961 static const bool codes[] = {
962 /* 32 bit ALU operations */
963 [BPF_ALU | BPF_ADD | BPF_K] = true,
964 [BPF_ALU | BPF_ADD | BPF_X] = true,
965 [BPF_ALU | BPF_SUB | BPF_K] = true,
966 [BPF_ALU | BPF_SUB | BPF_X] = true,
967 [BPF_ALU | BPF_MUL | BPF_K] = true,
968 [BPF_ALU | BPF_MUL | BPF_X] = true,
969 [BPF_ALU | BPF_DIV | BPF_K] = true,
970 [BPF_ALU | BPF_DIV | BPF_X] = true,
971 [BPF_ALU | BPF_MOD | BPF_K] = true,
972 [BPF_ALU | BPF_MOD | BPF_X] = true,
973 [BPF_ALU | BPF_AND | BPF_K] = true,
974 [BPF_ALU | BPF_AND | BPF_X] = true,
975 [BPF_ALU | BPF_OR | BPF_K] = true,
976 [BPF_ALU | BPF_OR | BPF_X] = true,
977 [BPF_ALU | BPF_XOR | BPF_K] = true,
978 [BPF_ALU | BPF_XOR | BPF_X] = true,
979 [BPF_ALU | BPF_LSH | BPF_K] = true,
980 [BPF_ALU | BPF_LSH | BPF_X] = true,
981 [BPF_ALU | BPF_RSH | BPF_K] = true,
982 [BPF_ALU | BPF_RSH | BPF_X] = true,
983 [BPF_ALU | BPF_NEG] = true,
984 /* Load instructions */
985 [BPF_LD | BPF_W | BPF_ABS] = true,
986 [BPF_LD | BPF_H | BPF_ABS] = true,
987 [BPF_LD | BPF_B | BPF_ABS] = true,
988 [BPF_LD | BPF_W | BPF_LEN] = true,
989 [BPF_LD | BPF_W | BPF_IND] = true,
990 [BPF_LD | BPF_H | BPF_IND] = true,
991 [BPF_LD | BPF_B | BPF_IND] = true,
992 [BPF_LD | BPF_IMM] = true,
993 [BPF_LD | BPF_MEM] = true,
994 [BPF_LDX | BPF_W | BPF_LEN] = true,
995 [BPF_LDX | BPF_B | BPF_MSH] = true,
996 [BPF_LDX | BPF_IMM] = true,
997 [BPF_LDX | BPF_MEM] = true,
998 /* Store instructions */
1001 /* Misc instructions */
1002 [BPF_MISC | BPF_TAX] = true,
1003 [BPF_MISC | BPF_TXA] = true,
1004 /* Return instructions */
1005 [BPF_RET | BPF_K] = true,
1006 [BPF_RET | BPF_A] = true,
1007 /* Jump instructions */
1008 [BPF_JMP | BPF_JA] = true,
1009 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1010 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1011 [BPF_JMP | BPF_JGE | BPF_K] = true,
1012 [BPF_JMP | BPF_JGE | BPF_X] = true,
1013 [BPF_JMP | BPF_JGT | BPF_K] = true,
1014 [BPF_JMP | BPF_JGT | BPF_X] = true,
1015 [BPF_JMP | BPF_JSET | BPF_K] = true,
1016 [BPF_JMP | BPF_JSET | BPF_X] = true,
1019 if (code_to_probe >= ARRAY_SIZE(codes))
1022 return codes[code_to_probe];
1025 static bool bpf_check_basics_ok(const struct sock_filter *filter,
1030 if (flen == 0 || flen > BPF_MAXINSNS)
1037 * bpf_check_classic - verify socket filter code
1038 * @filter: filter to verify
1039 * @flen: length of filter
1041 * Check the user's filter code. If we let some ugly
1042 * filter code slip through kaboom! The filter must contain
1043 * no references or jumps that are out of range, no illegal
1044 * instructions, and must end with a RET instruction.
1046 * All jumps are forward as they are not signed.
1048 * Returns 0 if the rule set is legal or -EINVAL if not.
1050 static int bpf_check_classic(const struct sock_filter *filter,
1056 /* Check the filter code now */
1057 for (pc = 0; pc < flen; pc++) {
1058 const struct sock_filter *ftest = &filter[pc];
1060 /* May we actually operate on this code? */
1061 if (!chk_code_allowed(ftest->code))
1064 /* Some instructions need special checks */
1065 switch (ftest->code) {
1066 case BPF_ALU | BPF_DIV | BPF_K:
1067 case BPF_ALU | BPF_MOD | BPF_K:
1068 /* Check for division by zero */
1072 case BPF_ALU | BPF_LSH | BPF_K:
1073 case BPF_ALU | BPF_RSH | BPF_K:
1077 case BPF_LD | BPF_MEM:
1078 case BPF_LDX | BPF_MEM:
1081 /* Check for invalid memory addresses */
1082 if (ftest->k >= BPF_MEMWORDS)
1085 case BPF_JMP | BPF_JA:
1086 /* Note, the large ftest->k might cause loops.
1087 * Compare this with conditional jumps below,
1088 * where offsets are limited. --ANK (981016)
1090 if (ftest->k >= (unsigned int)(flen - pc - 1))
1093 case BPF_JMP | BPF_JEQ | BPF_K:
1094 case BPF_JMP | BPF_JEQ | BPF_X:
1095 case BPF_JMP | BPF_JGE | BPF_K:
1096 case BPF_JMP | BPF_JGE | BPF_X:
1097 case BPF_JMP | BPF_JGT | BPF_K:
1098 case BPF_JMP | BPF_JGT | BPF_X:
1099 case BPF_JMP | BPF_JSET | BPF_K:
1100 case BPF_JMP | BPF_JSET | BPF_X:
1101 /* Both conditionals must be safe */
1102 if (pc + ftest->jt + 1 >= flen ||
1103 pc + ftest->jf + 1 >= flen)
1106 case BPF_LD | BPF_W | BPF_ABS:
1107 case BPF_LD | BPF_H | BPF_ABS:
1108 case BPF_LD | BPF_B | BPF_ABS:
1110 if (bpf_anc_helper(ftest) & BPF_ANC)
1112 /* Ancillary operation unknown or unsupported */
1113 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1118 /* Last instruction must be a RET code */
1119 switch (filter[flen - 1].code) {
1120 case BPF_RET | BPF_K:
1121 case BPF_RET | BPF_A:
1122 return check_load_and_stores(filter, flen);
1128 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1129 const struct sock_fprog *fprog)
1131 unsigned int fsize = bpf_classic_proglen(fprog);
1132 struct sock_fprog_kern *fkprog;
1134 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1138 fkprog = fp->orig_prog;
1139 fkprog->len = fprog->len;
1141 fkprog->filter = kmemdup(fp->insns, fsize,
1142 GFP_KERNEL | __GFP_NOWARN);
1143 if (!fkprog->filter) {
1144 kfree(fp->orig_prog);
1151 static void bpf_release_orig_filter(struct bpf_prog *fp)
1153 struct sock_fprog_kern *fprog = fp->orig_prog;
1156 kfree(fprog->filter);
1161 static void __bpf_prog_release(struct bpf_prog *prog)
1163 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1166 bpf_release_orig_filter(prog);
1167 bpf_prog_free(prog);
1171 static void __sk_filter_release(struct sk_filter *fp)
1173 __bpf_prog_release(fp->prog);
1178 * sk_filter_release_rcu - Release a socket filter by rcu_head
1179 * @rcu: rcu_head that contains the sk_filter to free
1181 static void sk_filter_release_rcu(struct rcu_head *rcu)
1183 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1185 __sk_filter_release(fp);
1189 * sk_filter_release - release a socket filter
1190 * @fp: filter to remove
1192 * Remove a filter from a socket and release its resources.
1194 static void sk_filter_release(struct sk_filter *fp)
1196 if (refcount_dec_and_test(&fp->refcnt))
1197 call_rcu(&fp->rcu, sk_filter_release_rcu);
1200 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1202 u32 filter_size = bpf_prog_size(fp->prog->len);
1204 atomic_sub(filter_size, &sk->sk_omem_alloc);
1205 sk_filter_release(fp);
1208 /* try to charge the socket memory if there is space available
1209 * return true on success
1211 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1213 u32 filter_size = bpf_prog_size(fp->prog->len);
1215 /* same check as in sock_kmalloc() */
1216 if (filter_size <= sysctl_optmem_max &&
1217 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1218 atomic_add(filter_size, &sk->sk_omem_alloc);
1224 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1226 if (!refcount_inc_not_zero(&fp->refcnt))
1229 if (!__sk_filter_charge(sk, fp)) {
1230 sk_filter_release(fp);
1236 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1238 struct sock_filter *old_prog;
1239 struct bpf_prog *old_fp;
1240 int err, new_len, old_len = fp->len;
1241 bool seen_ld_abs = false;
1243 /* We are free to overwrite insns et al right here as it
1244 * won't be used at this point in time anymore internally
1245 * after the migration to the internal BPF instruction
1248 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1249 sizeof(struct bpf_insn));
1251 /* Conversion cannot happen on overlapping memory areas,
1252 * so we need to keep the user BPF around until the 2nd
1253 * pass. At this time, the user BPF is stored in fp->insns.
1255 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1256 GFP_KERNEL | __GFP_NOWARN);
1262 /* 1st pass: calculate the new program length. */
1263 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1268 /* Expand fp for appending the new filter representation. */
1270 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1272 /* The old_fp is still around in case we couldn't
1273 * allocate new memory, so uncharge on that one.
1282 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1283 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1286 /* 2nd bpf_convert_filter() can fail only if it fails
1287 * to allocate memory, remapping must succeed. Note,
1288 * that at this time old_fp has already been released
1293 fp = bpf_prog_select_runtime(fp, &err);
1303 __bpf_prog_release(fp);
1304 return ERR_PTR(err);
1307 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1308 bpf_aux_classic_check_t trans)
1312 fp->bpf_func = NULL;
1315 err = bpf_check_classic(fp->insns, fp->len);
1317 __bpf_prog_release(fp);
1318 return ERR_PTR(err);
1321 /* There might be additional checks and transformations
1322 * needed on classic filters, f.e. in case of seccomp.
1325 err = trans(fp->insns, fp->len);
1327 __bpf_prog_release(fp);
1328 return ERR_PTR(err);
1332 /* Probe if we can JIT compile the filter and if so, do
1333 * the compilation of the filter.
1335 bpf_jit_compile(fp);
1337 /* JIT compiler couldn't process this filter, so do the
1338 * internal BPF translation for the optimized interpreter.
1341 fp = bpf_migrate_filter(fp);
1347 * bpf_prog_create - create an unattached filter
1348 * @pfp: the unattached filter that is created
1349 * @fprog: the filter program
1351 * Create a filter independent of any socket. We first run some
1352 * sanity checks on it to make sure it does not explode on us later.
1353 * If an error occurs or there is insufficient memory for the filter
1354 * a negative errno code is returned. On success the return is zero.
1356 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1358 unsigned int fsize = bpf_classic_proglen(fprog);
1359 struct bpf_prog *fp;
1361 /* Make sure new filter is there and in the right amounts. */
1362 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1365 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1369 memcpy(fp->insns, fprog->filter, fsize);
1371 fp->len = fprog->len;
1372 /* Since unattached filters are not copied back to user
1373 * space through sk_get_filter(), we do not need to hold
1374 * a copy here, and can spare us the work.
1376 fp->orig_prog = NULL;
1378 /* bpf_prepare_filter() already takes care of freeing
1379 * memory in case something goes wrong.
1381 fp = bpf_prepare_filter(fp, NULL);
1388 EXPORT_SYMBOL_GPL(bpf_prog_create);
1391 * bpf_prog_create_from_user - create an unattached filter from user buffer
1392 * @pfp: the unattached filter that is created
1393 * @fprog: the filter program
1394 * @trans: post-classic verifier transformation handler
1395 * @save_orig: save classic BPF program
1397 * This function effectively does the same as bpf_prog_create(), only
1398 * that it builds up its insns buffer from user space provided buffer.
1399 * It also allows for passing a bpf_aux_classic_check_t handler.
1401 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1402 bpf_aux_classic_check_t trans, bool save_orig)
1404 unsigned int fsize = bpf_classic_proglen(fprog);
1405 struct bpf_prog *fp;
1408 /* Make sure new filter is there and in the right amounts. */
1409 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1412 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1416 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1417 __bpf_prog_free(fp);
1421 fp->len = fprog->len;
1422 fp->orig_prog = NULL;
1425 err = bpf_prog_store_orig_filter(fp, fprog);
1427 __bpf_prog_free(fp);
1432 /* bpf_prepare_filter() already takes care of freeing
1433 * memory in case something goes wrong.
1435 fp = bpf_prepare_filter(fp, trans);
1442 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1444 void bpf_prog_destroy(struct bpf_prog *fp)
1446 __bpf_prog_release(fp);
1448 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1450 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1452 struct sk_filter *fp, *old_fp;
1454 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1460 if (!__sk_filter_charge(sk, fp)) {
1464 refcount_set(&fp->refcnt, 1);
1466 old_fp = rcu_dereference_protected(sk->sk_filter,
1467 lockdep_sock_is_held(sk));
1468 rcu_assign_pointer(sk->sk_filter, fp);
1471 sk_filter_uncharge(sk, old_fp);
1477 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1479 unsigned int fsize = bpf_classic_proglen(fprog);
1480 struct bpf_prog *prog;
1483 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1484 return ERR_PTR(-EPERM);
1486 /* Make sure new filter is there and in the right amounts. */
1487 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1488 return ERR_PTR(-EINVAL);
1490 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1492 return ERR_PTR(-ENOMEM);
1494 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1495 __bpf_prog_free(prog);
1496 return ERR_PTR(-EFAULT);
1499 prog->len = fprog->len;
1501 err = bpf_prog_store_orig_filter(prog, fprog);
1503 __bpf_prog_free(prog);
1504 return ERR_PTR(-ENOMEM);
1507 /* bpf_prepare_filter() already takes care of freeing
1508 * memory in case something goes wrong.
1510 return bpf_prepare_filter(prog, NULL);
1514 * sk_attach_filter - attach a socket filter
1515 * @fprog: the filter program
1516 * @sk: the socket to use
1518 * Attach the user's filter code. We first run some sanity checks on
1519 * it to make sure it does not explode on us later. If an error
1520 * occurs or there is insufficient memory for the filter a negative
1521 * errno code is returned. On success the return is zero.
1523 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1525 struct bpf_prog *prog = __get_filter(fprog, sk);
1529 return PTR_ERR(prog);
1531 err = __sk_attach_prog(prog, sk);
1533 __bpf_prog_release(prog);
1539 EXPORT_SYMBOL_GPL(sk_attach_filter);
1541 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1543 struct bpf_prog *prog = __get_filter(fprog, sk);
1547 return PTR_ERR(prog);
1549 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1552 err = reuseport_attach_prog(sk, prog);
1555 __bpf_prog_release(prog);
1560 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1562 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1563 return ERR_PTR(-EPERM);
1565 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1568 int sk_attach_bpf(u32 ufd, struct sock *sk)
1570 struct bpf_prog *prog = __get_bpf(ufd, sk);
1574 return PTR_ERR(prog);
1576 err = __sk_attach_prog(prog, sk);
1585 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1587 struct bpf_prog *prog;
1590 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1593 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1594 if (PTR_ERR(prog) == -EINVAL)
1595 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
1597 return PTR_ERR(prog);
1599 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1600 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1601 * bpf prog (e.g. sockmap). It depends on the
1602 * limitation imposed by bpf_prog_load().
1603 * Hence, sysctl_optmem_max is not checked.
1605 if ((sk->sk_type != SOCK_STREAM &&
1606 sk->sk_type != SOCK_DGRAM) ||
1607 (sk->sk_protocol != IPPROTO_UDP &&
1608 sk->sk_protocol != IPPROTO_TCP) ||
1609 (sk->sk_family != AF_INET &&
1610 sk->sk_family != AF_INET6)) {
1615 /* BPF_PROG_TYPE_SOCKET_FILTER */
1616 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1622 err = reuseport_attach_prog(sk, prog);
1630 void sk_reuseport_prog_free(struct bpf_prog *prog)
1635 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1638 bpf_prog_destroy(prog);
1641 struct bpf_scratchpad {
1643 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1644 u8 buff[MAX_BPF_STACK];
1648 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1650 static inline int __bpf_try_make_writable(struct sk_buff *skb,
1651 unsigned int write_len)
1653 return skb_ensure_writable(skb, write_len);
1656 static inline int bpf_try_make_writable(struct sk_buff *skb,
1657 unsigned int write_len)
1659 int err = __bpf_try_make_writable(skb, write_len);
1661 bpf_compute_data_pointers(skb);
1665 static int bpf_try_make_head_writable(struct sk_buff *skb)
1667 return bpf_try_make_writable(skb, skb_headlen(skb));
1670 static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1672 if (skb_at_tc_ingress(skb))
1673 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1676 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1678 if (skb_at_tc_ingress(skb))
1679 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1682 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1683 const void *, from, u32, len, u64, flags)
1687 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1689 if (unlikely(offset > 0xffff))
1691 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1694 ptr = skb->data + offset;
1695 if (flags & BPF_F_RECOMPUTE_CSUM)
1696 __skb_postpull_rcsum(skb, ptr, len, offset);
1698 memcpy(ptr, from, len);
1700 if (flags & BPF_F_RECOMPUTE_CSUM)
1701 __skb_postpush_rcsum(skb, ptr, len, offset);
1702 if (flags & BPF_F_INVALIDATE_HASH)
1703 skb_clear_hash(skb);
1708 static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1709 .func = bpf_skb_store_bytes,
1711 .ret_type = RET_INTEGER,
1712 .arg1_type = ARG_PTR_TO_CTX,
1713 .arg2_type = ARG_ANYTHING,
1714 .arg3_type = ARG_PTR_TO_MEM,
1715 .arg4_type = ARG_CONST_SIZE,
1716 .arg5_type = ARG_ANYTHING,
1719 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1720 void *, to, u32, len)
1724 if (unlikely(offset > 0xffff))
1727 ptr = skb_header_pointer(skb, offset, len, to);
1731 memcpy(to, ptr, len);
1739 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1740 .func = bpf_skb_load_bytes,
1742 .ret_type = RET_INTEGER,
1743 .arg1_type = ARG_PTR_TO_CTX,
1744 .arg2_type = ARG_ANYTHING,
1745 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1746 .arg4_type = ARG_CONST_SIZE,
1749 BPF_CALL_4(bpf_flow_dissector_load_bytes,
1750 const struct bpf_flow_dissector *, ctx, u32, offset,
1751 void *, to, u32, len)
1755 if (unlikely(offset > 0xffff))
1758 if (unlikely(!ctx->skb))
1761 ptr = skb_header_pointer(ctx->skb, offset, len, to);
1765 memcpy(to, ptr, len);
1773 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1774 .func = bpf_flow_dissector_load_bytes,
1776 .ret_type = RET_INTEGER,
1777 .arg1_type = ARG_PTR_TO_CTX,
1778 .arg2_type = ARG_ANYTHING,
1779 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1780 .arg4_type = ARG_CONST_SIZE,
1783 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1784 u32, offset, void *, to, u32, len, u32, start_header)
1786 u8 *end = skb_tail_pointer(skb);
1789 if (unlikely(offset > 0xffff))
1792 switch (start_header) {
1793 case BPF_HDR_START_MAC:
1794 if (unlikely(!skb_mac_header_was_set(skb)))
1796 start = skb_mac_header(skb);
1798 case BPF_HDR_START_NET:
1799 start = skb_network_header(skb);
1805 ptr = start + offset;
1807 if (likely(ptr + len <= end)) {
1808 memcpy(to, ptr, len);
1817 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1818 .func = bpf_skb_load_bytes_relative,
1820 .ret_type = RET_INTEGER,
1821 .arg1_type = ARG_PTR_TO_CTX,
1822 .arg2_type = ARG_ANYTHING,
1823 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1824 .arg4_type = ARG_CONST_SIZE,
1825 .arg5_type = ARG_ANYTHING,
1828 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1830 /* Idea is the following: should the needed direct read/write
1831 * test fail during runtime, we can pull in more data and redo
1832 * again, since implicitly, we invalidate previous checks here.
1834 * Or, since we know how much we need to make read/writeable,
1835 * this can be done once at the program beginning for direct
1836 * access case. By this we overcome limitations of only current
1837 * headroom being accessible.
1839 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1842 static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1843 .func = bpf_skb_pull_data,
1845 .ret_type = RET_INTEGER,
1846 .arg1_type = ARG_PTR_TO_CTX,
1847 .arg2_type = ARG_ANYTHING,
1850 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1852 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1855 static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1856 .func = bpf_sk_fullsock,
1858 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
1859 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
1862 static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1863 unsigned int write_len)
1865 int err = __bpf_try_make_writable(skb, write_len);
1867 bpf_compute_data_end_sk_skb(skb);
1871 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1873 /* Idea is the following: should the needed direct read/write
1874 * test fail during runtime, we can pull in more data and redo
1875 * again, since implicitly, we invalidate previous checks here.
1877 * Or, since we know how much we need to make read/writeable,
1878 * this can be done once at the program beginning for direct
1879 * access case. By this we overcome limitations of only current
1880 * headroom being accessible.
1882 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1885 static const struct bpf_func_proto sk_skb_pull_data_proto = {
1886 .func = sk_skb_pull_data,
1888 .ret_type = RET_INTEGER,
1889 .arg1_type = ARG_PTR_TO_CTX,
1890 .arg2_type = ARG_ANYTHING,
1893 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1894 u64, from, u64, to, u64, flags)
1898 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1900 if (unlikely(offset > 0xffff || offset & 1))
1902 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1905 ptr = (__sum16 *)(skb->data + offset);
1906 switch (flags & BPF_F_HDR_FIELD_MASK) {
1908 if (unlikely(from != 0))
1911 csum_replace_by_diff(ptr, to);
1914 csum_replace2(ptr, from, to);
1917 csum_replace4(ptr, from, to);
1926 static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1927 .func = bpf_l3_csum_replace,
1929 .ret_type = RET_INTEGER,
1930 .arg1_type = ARG_PTR_TO_CTX,
1931 .arg2_type = ARG_ANYTHING,
1932 .arg3_type = ARG_ANYTHING,
1933 .arg4_type = ARG_ANYTHING,
1934 .arg5_type = ARG_ANYTHING,
1937 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1938 u64, from, u64, to, u64, flags)
1940 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1941 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1942 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1945 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1946 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1948 if (unlikely(offset > 0xffff || offset & 1))
1950 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1953 ptr = (__sum16 *)(skb->data + offset);
1954 if (is_mmzero && !do_mforce && !*ptr)
1957 switch (flags & BPF_F_HDR_FIELD_MASK) {
1959 if (unlikely(from != 0))
1962 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1965 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1968 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1974 if (is_mmzero && !*ptr)
1975 *ptr = CSUM_MANGLED_0;
1979 static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1980 .func = bpf_l4_csum_replace,
1982 .ret_type = RET_INTEGER,
1983 .arg1_type = ARG_PTR_TO_CTX,
1984 .arg2_type = ARG_ANYTHING,
1985 .arg3_type = ARG_ANYTHING,
1986 .arg4_type = ARG_ANYTHING,
1987 .arg5_type = ARG_ANYTHING,
1990 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1991 __be32 *, to, u32, to_size, __wsum, seed)
1993 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1994 u32 diff_size = from_size + to_size;
1997 /* This is quite flexible, some examples:
1999 * from_size == 0, to_size > 0, seed := csum --> pushing data
2000 * from_size > 0, to_size == 0, seed := csum --> pulling data
2001 * from_size > 0, to_size > 0, seed := 0 --> diffing data
2003 * Even for diffing, from_size and to_size don't need to be equal.
2005 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
2006 diff_size > sizeof(sp->diff)))
2009 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
2010 sp->diff[j] = ~from[i];
2011 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
2012 sp->diff[j] = to[i];
2014 return csum_partial(sp->diff, diff_size, seed);
2017 static const struct bpf_func_proto bpf_csum_diff_proto = {
2018 .func = bpf_csum_diff,
2021 .ret_type = RET_INTEGER,
2022 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
2023 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2024 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
2025 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
2026 .arg5_type = ARG_ANYTHING,
2029 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2031 /* The interface is to be used in combination with bpf_csum_diff()
2032 * for direct packet writes. csum rotation for alignment as well
2033 * as emulating csum_sub() can be done from the eBPF program.
2035 if (skb->ip_summed == CHECKSUM_COMPLETE)
2036 return (skb->csum = csum_add(skb->csum, csum));
2041 static const struct bpf_func_proto bpf_csum_update_proto = {
2042 .func = bpf_csum_update,
2044 .ret_type = RET_INTEGER,
2045 .arg1_type = ARG_PTR_TO_CTX,
2046 .arg2_type = ARG_ANYTHING,
2049 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
2051 /* The interface is to be used in combination with bpf_skb_adjust_room()
2052 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET
2053 * is passed as flags, for example.
2056 case BPF_CSUM_LEVEL_INC:
2057 __skb_incr_checksum_unnecessary(skb);
2059 case BPF_CSUM_LEVEL_DEC:
2060 __skb_decr_checksum_unnecessary(skb);
2062 case BPF_CSUM_LEVEL_RESET:
2063 __skb_reset_checksum_unnecessary(skb);
2065 case BPF_CSUM_LEVEL_QUERY:
2066 return skb->ip_summed == CHECKSUM_UNNECESSARY ?
2067 skb->csum_level : -EACCES;
2075 static const struct bpf_func_proto bpf_csum_level_proto = {
2076 .func = bpf_csum_level,
2078 .ret_type = RET_INTEGER,
2079 .arg1_type = ARG_PTR_TO_CTX,
2080 .arg2_type = ARG_ANYTHING,
2083 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2085 return dev_forward_skb(dev, skb);
2088 static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2089 struct sk_buff *skb)
2091 int ret = ____dev_forward_skb(dev, skb);
2095 ret = netif_rx(skb);
2101 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2105 if (dev_xmit_recursion()) {
2106 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2114 dev_xmit_recursion_inc();
2115 ret = dev_queue_xmit(skb);
2116 dev_xmit_recursion_dec();
2121 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2124 unsigned int mlen = skb_network_offset(skb);
2127 __skb_pull(skb, mlen);
2129 /* At ingress, the mac header has already been pulled once.
2130 * At egress, skb_pospull_rcsum has to be done in case that
2131 * the skb is originated from ingress (i.e. a forwarded skb)
2132 * to ensure that rcsum starts at net header.
2134 if (!skb_at_tc_ingress(skb))
2135 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2137 skb_pop_mac_header(skb);
2138 skb_reset_mac_len(skb);
2139 return flags & BPF_F_INGRESS ?
2140 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2143 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2146 /* Verify that a link layer header is carried */
2147 if (unlikely(skb->mac_header >= skb->network_header)) {
2152 bpf_push_mac_rcsum(skb);
2153 return flags & BPF_F_INGRESS ?
2154 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2157 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2160 if (dev_is_mac_header_xmit(dev))
2161 return __bpf_redirect_common(skb, dev, flags);
2163 return __bpf_redirect_no_mac(skb, dev, flags);
2166 #if IS_ENABLED(CONFIG_IPV6)
2167 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb)
2169 struct dst_entry *dst = skb_dst(skb);
2170 struct net_device *dev = dst->dev;
2171 u32 hh_len = LL_RESERVED_SPACE(dev);
2172 const struct in6_addr *nexthop;
2173 struct neighbour *neigh;
2175 if (dev_xmit_recursion()) {
2176 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2183 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2184 struct sk_buff *skb2;
2186 skb2 = skb_realloc_headroom(skb, hh_len);
2187 if (unlikely(!skb2)) {
2192 skb_set_owner_w(skb2, skb->sk);
2198 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
2199 &ipv6_hdr(skb)->daddr);
2200 neigh = ip_neigh_gw6(dev, nexthop);
2201 if (likely(!IS_ERR(neigh))) {
2204 sock_confirm_neigh(skb, neigh);
2205 dev_xmit_recursion_inc();
2206 ret = neigh_output(neigh, skb, false);
2207 dev_xmit_recursion_dec();
2208 rcu_read_unlock_bh();
2211 rcu_read_unlock_bh();
2212 IP6_INC_STATS(dev_net(dst->dev),
2213 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
2219 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
2221 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
2222 struct net *net = dev_net(dev);
2223 int err, ret = NET_XMIT_DROP;
2224 struct dst_entry *dst;
2225 struct flowi6 fl6 = {
2226 .flowi6_flags = FLOWI_FLAG_ANYSRC,
2227 .flowi6_mark = skb->mark,
2228 .flowlabel = ip6_flowinfo(ip6h),
2229 .flowi6_oif = dev->ifindex,
2230 .flowi6_proto = ip6h->nexthdr,
2231 .daddr = ip6h->daddr,
2232 .saddr = ip6h->saddr,
2235 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
2239 skb_dst_set(skb, dst);
2241 err = bpf_out_neigh_v6(net, skb);
2242 if (unlikely(net_xmit_eval(err)))
2243 dev->stats.tx_errors++;
2245 ret = NET_XMIT_SUCCESS;
2248 dev->stats.tx_errors++;
2254 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
2257 return NET_XMIT_DROP;
2259 #endif /* CONFIG_IPV6 */
2261 #if IS_ENABLED(CONFIG_INET)
2262 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb)
2264 struct dst_entry *dst = skb_dst(skb);
2265 struct rtable *rt = container_of(dst, struct rtable, dst);
2266 struct net_device *dev = dst->dev;
2267 u32 hh_len = LL_RESERVED_SPACE(dev);
2268 struct neighbour *neigh;
2269 bool is_v6gw = false;
2271 if (dev_xmit_recursion()) {
2272 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2279 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2280 struct sk_buff *skb2;
2282 skb2 = skb_realloc_headroom(skb, hh_len);
2283 if (unlikely(!skb2)) {
2288 skb_set_owner_w(skb2, skb->sk);
2294 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2295 if (likely(!IS_ERR(neigh))) {
2298 sock_confirm_neigh(skb, neigh);
2299 dev_xmit_recursion_inc();
2300 ret = neigh_output(neigh, skb, is_v6gw);
2301 dev_xmit_recursion_dec();
2302 rcu_read_unlock_bh();
2305 rcu_read_unlock_bh();
2311 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
2313 const struct iphdr *ip4h = ip_hdr(skb);
2314 struct net *net = dev_net(dev);
2315 int err, ret = NET_XMIT_DROP;
2317 struct flowi4 fl4 = {
2318 .flowi4_flags = FLOWI_FLAG_ANYSRC,
2319 .flowi4_mark = skb->mark,
2320 .flowi4_tos = RT_TOS(ip4h->tos),
2321 .flowi4_oif = dev->ifindex,
2322 .flowi4_proto = ip4h->protocol,
2323 .daddr = ip4h->daddr,
2324 .saddr = ip4h->saddr,
2327 rt = ip_route_output_flow(net, &fl4, NULL);
2330 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
2335 skb_dst_set(skb, &rt->dst);
2337 err = bpf_out_neigh_v4(net, skb);
2338 if (unlikely(net_xmit_eval(err)))
2339 dev->stats.tx_errors++;
2341 ret = NET_XMIT_SUCCESS;
2344 dev->stats.tx_errors++;
2350 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
2353 return NET_XMIT_DROP;
2355 #endif /* CONFIG_INET */
2357 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev)
2359 struct ethhdr *ethh = eth_hdr(skb);
2361 if (unlikely(skb->mac_header >= skb->network_header))
2363 bpf_push_mac_rcsum(skb);
2364 if (is_multicast_ether_addr(ethh->h_dest))
2367 skb_pull(skb, sizeof(*ethh));
2368 skb_unset_mac_header(skb);
2369 skb_reset_network_header(skb);
2371 if (skb->protocol == htons(ETH_P_IP))
2372 return __bpf_redirect_neigh_v4(skb, dev);
2373 else if (skb->protocol == htons(ETH_P_IPV6))
2374 return __bpf_redirect_neigh_v6(skb, dev);
2380 /* Internal, non-exposed redirect flags. */
2382 BPF_F_NEIGH = (1ULL << 1),
2383 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH)
2386 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2388 struct net_device *dev;
2389 struct sk_buff *clone;
2392 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
2395 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2399 clone = skb_clone(skb, GFP_ATOMIC);
2400 if (unlikely(!clone))
2403 /* For direct write, we need to keep the invariant that the skbs
2404 * we're dealing with need to be uncloned. Should uncloning fail
2405 * here, we need to free the just generated clone to unclone once
2408 ret = bpf_try_make_head_writable(skb);
2409 if (unlikely(ret)) {
2414 return __bpf_redirect(clone, dev, flags);
2417 static const struct bpf_func_proto bpf_clone_redirect_proto = {
2418 .func = bpf_clone_redirect,
2420 .ret_type = RET_INTEGER,
2421 .arg1_type = ARG_PTR_TO_CTX,
2422 .arg2_type = ARG_ANYTHING,
2423 .arg3_type = ARG_ANYTHING,
2426 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2427 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
2429 int skb_do_redirect(struct sk_buff *skb)
2431 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2432 struct net_device *dev;
2433 u32 flags = ri->flags;
2435 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
2437 if (unlikely(!dev)) {
2442 return flags & BPF_F_NEIGH ?
2443 __bpf_redirect_neigh(skb, dev) :
2444 __bpf_redirect(skb, dev, flags);
2447 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2449 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2451 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
2455 ri->tgt_index = ifindex;
2457 return TC_ACT_REDIRECT;
2460 static const struct bpf_func_proto bpf_redirect_proto = {
2461 .func = bpf_redirect,
2463 .ret_type = RET_INTEGER,
2464 .arg1_type = ARG_ANYTHING,
2465 .arg2_type = ARG_ANYTHING,
2468 BPF_CALL_2(bpf_redirect_neigh, u32, ifindex, u64, flags)
2470 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2472 if (unlikely(flags))
2475 ri->flags = BPF_F_NEIGH;
2476 ri->tgt_index = ifindex;
2478 return TC_ACT_REDIRECT;
2481 static const struct bpf_func_proto bpf_redirect_neigh_proto = {
2482 .func = bpf_redirect_neigh,
2484 .ret_type = RET_INTEGER,
2485 .arg1_type = ARG_ANYTHING,
2486 .arg2_type = ARG_ANYTHING,
2489 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2491 msg->apply_bytes = bytes;
2495 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2496 .func = bpf_msg_apply_bytes,
2498 .ret_type = RET_INTEGER,
2499 .arg1_type = ARG_PTR_TO_CTX,
2500 .arg2_type = ARG_ANYTHING,
2503 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
2505 msg->cork_bytes = bytes;
2509 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2510 .func = bpf_msg_cork_bytes,
2512 .ret_type = RET_INTEGER,
2513 .arg1_type = ARG_PTR_TO_CTX,
2514 .arg2_type = ARG_ANYTHING,
2517 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2518 u32, end, u64, flags)
2520 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2521 u32 first_sge, last_sge, i, shift, bytes_sg_total;
2522 struct scatterlist *sge;
2523 u8 *raw, *to, *from;
2526 if (unlikely(flags || end <= start))
2529 /* First find the starting scatterlist element */
2533 len = sk_msg_elem(msg, i)->length;
2534 if (start < offset + len)
2536 sk_msg_iter_var_next(i);
2537 } while (i != msg->sg.end);
2539 if (unlikely(start >= offset + len))
2543 /* The start may point into the sg element so we need to also
2544 * account for the headroom.
2546 bytes_sg_total = start - offset + bytes;
2547 if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
2550 /* At this point we need to linearize multiple scatterlist
2551 * elements or a single shared page. Either way we need to
2552 * copy into a linear buffer exclusively owned by BPF. Then
2553 * place the buffer in the scatterlist and fixup the original
2554 * entries by removing the entries now in the linear buffer
2555 * and shifting the remaining entries. For now we do not try
2556 * to copy partial entries to avoid complexity of running out
2557 * of sg_entry slots. The downside is reading a single byte
2558 * will copy the entire sg entry.
2561 copy += sk_msg_elem(msg, i)->length;
2562 sk_msg_iter_var_next(i);
2563 if (bytes_sg_total <= copy)
2565 } while (i != msg->sg.end);
2568 if (unlikely(bytes_sg_total > copy))
2571 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2573 if (unlikely(!page))
2576 raw = page_address(page);
2579 sge = sk_msg_elem(msg, i);
2580 from = sg_virt(sge);
2584 memcpy(to, from, len);
2587 put_page(sg_page(sge));
2589 sk_msg_iter_var_next(i);
2590 } while (i != last_sge);
2592 sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
2594 /* To repair sg ring we need to shift entries. If we only
2595 * had a single entry though we can just replace it and
2596 * be done. Otherwise walk the ring and shift the entries.
2598 WARN_ON_ONCE(last_sge == first_sge);
2599 shift = last_sge > first_sge ?
2600 last_sge - first_sge - 1 :
2601 NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
2606 sk_msg_iter_var_next(i);
2610 if (i + shift >= NR_MSG_FRAG_IDS)
2611 move_from = i + shift - NR_MSG_FRAG_IDS;
2613 move_from = i + shift;
2614 if (move_from == msg->sg.end)
2617 msg->sg.data[i] = msg->sg.data[move_from];
2618 msg->sg.data[move_from].length = 0;
2619 msg->sg.data[move_from].page_link = 0;
2620 msg->sg.data[move_from].offset = 0;
2621 sk_msg_iter_var_next(i);
2624 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2625 msg->sg.end - shift + NR_MSG_FRAG_IDS :
2626 msg->sg.end - shift;
2628 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
2629 msg->data_end = msg->data + bytes;
2633 static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2634 .func = bpf_msg_pull_data,
2636 .ret_type = RET_INTEGER,
2637 .arg1_type = ARG_PTR_TO_CTX,
2638 .arg2_type = ARG_ANYTHING,
2639 .arg3_type = ARG_ANYTHING,
2640 .arg4_type = ARG_ANYTHING,
2643 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2644 u32, len, u64, flags)
2646 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2647 u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
2648 u8 *raw, *to, *from;
2651 if (unlikely(flags))
2654 /* First find the starting scatterlist element */
2658 l = sk_msg_elem(msg, i)->length;
2660 if (start < offset + l)
2662 sk_msg_iter_var_next(i);
2663 } while (i != msg->sg.end);
2665 if (start >= offset + l)
2668 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2670 /* If no space available will fallback to copy, we need at
2671 * least one scatterlist elem available to push data into
2672 * when start aligns to the beginning of an element or two
2673 * when it falls inside an element. We handle the start equals
2674 * offset case because its the common case for inserting a
2677 if (!space || (space == 1 && start != offset))
2678 copy = msg->sg.data[i].length;
2680 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2681 get_order(copy + len));
2682 if (unlikely(!page))
2688 raw = page_address(page);
2690 psge = sk_msg_elem(msg, i);
2691 front = start - offset;
2692 back = psge->length - front;
2693 from = sg_virt(psge);
2696 memcpy(raw, from, front);
2700 to = raw + front + len;
2702 memcpy(to, from, back);
2705 put_page(sg_page(psge));
2706 } else if (start - offset) {
2707 psge = sk_msg_elem(msg, i);
2708 rsge = sk_msg_elem_cpy(msg, i);
2710 psge->length = start - offset;
2711 rsge.length -= psge->length;
2712 rsge.offset += start;
2714 sk_msg_iter_var_next(i);
2715 sg_unmark_end(psge);
2716 sg_unmark_end(&rsge);
2717 sk_msg_iter_next(msg, end);
2720 /* Slot(s) to place newly allocated data */
2723 /* Shift one or two slots as needed */
2725 sge = sk_msg_elem_cpy(msg, i);
2727 sk_msg_iter_var_next(i);
2728 sg_unmark_end(&sge);
2729 sk_msg_iter_next(msg, end);
2731 nsge = sk_msg_elem_cpy(msg, i);
2733 sk_msg_iter_var_next(i);
2734 nnsge = sk_msg_elem_cpy(msg, i);
2737 while (i != msg->sg.end) {
2738 msg->sg.data[i] = sge;
2740 sk_msg_iter_var_next(i);
2743 nnsge = sk_msg_elem_cpy(msg, i);
2745 nsge = sk_msg_elem_cpy(msg, i);
2750 /* Place newly allocated data buffer */
2751 sk_mem_charge(msg->sk, len);
2752 msg->sg.size += len;
2753 __clear_bit(new, &msg->sg.copy);
2754 sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2756 get_page(sg_page(&rsge));
2757 sk_msg_iter_var_next(new);
2758 msg->sg.data[new] = rsge;
2761 sk_msg_compute_data_pointers(msg);
2765 static const struct bpf_func_proto bpf_msg_push_data_proto = {
2766 .func = bpf_msg_push_data,
2768 .ret_type = RET_INTEGER,
2769 .arg1_type = ARG_PTR_TO_CTX,
2770 .arg2_type = ARG_ANYTHING,
2771 .arg3_type = ARG_ANYTHING,
2772 .arg4_type = ARG_ANYTHING,
2775 static void sk_msg_shift_left(struct sk_msg *msg, int i)
2781 sk_msg_iter_var_next(i);
2782 msg->sg.data[prev] = msg->sg.data[i];
2783 } while (i != msg->sg.end);
2785 sk_msg_iter_prev(msg, end);
2788 static void sk_msg_shift_right(struct sk_msg *msg, int i)
2790 struct scatterlist tmp, sge;
2792 sk_msg_iter_next(msg, end);
2793 sge = sk_msg_elem_cpy(msg, i);
2794 sk_msg_iter_var_next(i);
2795 tmp = sk_msg_elem_cpy(msg, i);
2797 while (i != msg->sg.end) {
2798 msg->sg.data[i] = sge;
2799 sk_msg_iter_var_next(i);
2801 tmp = sk_msg_elem_cpy(msg, i);
2805 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2806 u32, len, u64, flags)
2808 u32 i = 0, l = 0, space, offset = 0;
2809 u64 last = start + len;
2812 if (unlikely(flags))
2815 /* First find the starting scatterlist element */
2819 l = sk_msg_elem(msg, i)->length;
2821 if (start < offset + l)
2823 sk_msg_iter_var_next(i);
2824 } while (i != msg->sg.end);
2826 /* Bounds checks: start and pop must be inside message */
2827 if (start >= offset + l || last >= msg->sg.size)
2830 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2833 /* --------------| offset
2834 * -| start |-------- len -------|
2836 * |----- a ----|-------- pop -------|----- b ----|
2837 * |______________________________________________| length
2840 * a: region at front of scatter element to save
2841 * b: region at back of scatter element to save when length > A + pop
2842 * pop: region to pop from element, same as input 'pop' here will be
2843 * decremented below per iteration.
2845 * Two top-level cases to handle when start != offset, first B is non
2846 * zero and second B is zero corresponding to when a pop includes more
2849 * Then if B is non-zero AND there is no space allocate space and
2850 * compact A, B regions into page. If there is space shift ring to
2851 * the rigth free'ing the next element in ring to place B, leaving
2852 * A untouched except to reduce length.
2854 if (start != offset) {
2855 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2857 int b = sge->length - pop - a;
2859 sk_msg_iter_var_next(i);
2861 if (pop < sge->length - a) {
2864 sk_msg_shift_right(msg, i);
2865 nsge = sk_msg_elem(msg, i);
2866 get_page(sg_page(sge));
2869 b, sge->offset + pop + a);
2871 struct page *page, *orig;
2874 page = alloc_pages(__GFP_NOWARN |
2875 __GFP_COMP | GFP_ATOMIC,
2877 if (unlikely(!page))
2881 orig = sg_page(sge);
2882 from = sg_virt(sge);
2883 to = page_address(page);
2884 memcpy(to, from, a);
2885 memcpy(to + a, from + a + pop, b);
2886 sg_set_page(sge, page, a + b, 0);
2890 } else if (pop >= sge->length - a) {
2891 pop -= (sge->length - a);
2896 /* From above the current layout _must_ be as follows,
2901 * |---- pop ---|---------------- b ------------|
2902 * |____________________________________________| length
2904 * Offset and start of the current msg elem are equal because in the
2905 * previous case we handled offset != start and either consumed the
2906 * entire element and advanced to the next element OR pop == 0.
2908 * Two cases to handle here are first pop is less than the length
2909 * leaving some remainder b above. Simply adjust the element's layout
2910 * in this case. Or pop >= length of the element so that b = 0. In this
2911 * case advance to next element decrementing pop.
2914 struct scatterlist *sge = sk_msg_elem(msg, i);
2916 if (pop < sge->length) {
2922 sk_msg_shift_left(msg, i);
2924 sk_msg_iter_var_next(i);
2927 sk_mem_uncharge(msg->sk, len - pop);
2928 msg->sg.size -= (len - pop);
2929 sk_msg_compute_data_pointers(msg);
2933 static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2934 .func = bpf_msg_pop_data,
2936 .ret_type = RET_INTEGER,
2937 .arg1_type = ARG_PTR_TO_CTX,
2938 .arg2_type = ARG_ANYTHING,
2939 .arg3_type = ARG_ANYTHING,
2940 .arg4_type = ARG_ANYTHING,
2943 #ifdef CONFIG_CGROUP_NET_CLASSID
2944 BPF_CALL_0(bpf_get_cgroup_classid_curr)
2946 return __task_get_classid(current);
2949 static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
2950 .func = bpf_get_cgroup_classid_curr,
2952 .ret_type = RET_INTEGER,
2955 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
2957 struct sock *sk = skb_to_full_sk(skb);
2959 if (!sk || !sk_fullsock(sk))
2962 return sock_cgroup_classid(&sk->sk_cgrp_data);
2965 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
2966 .func = bpf_skb_cgroup_classid,
2968 .ret_type = RET_INTEGER,
2969 .arg1_type = ARG_PTR_TO_CTX,
2973 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2975 return task_get_classid(skb);
2978 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2979 .func = bpf_get_cgroup_classid,
2981 .ret_type = RET_INTEGER,
2982 .arg1_type = ARG_PTR_TO_CTX,
2985 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2987 return dst_tclassid(skb);
2990 static const struct bpf_func_proto bpf_get_route_realm_proto = {
2991 .func = bpf_get_route_realm,
2993 .ret_type = RET_INTEGER,
2994 .arg1_type = ARG_PTR_TO_CTX,
2997 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2999 /* If skb_clear_hash() was called due to mangling, we can
3000 * trigger SW recalculation here. Later access to hash
3001 * can then use the inline skb->hash via context directly
3002 * instead of calling this helper again.
3004 return skb_get_hash(skb);
3007 static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
3008 .func = bpf_get_hash_recalc,
3010 .ret_type = RET_INTEGER,
3011 .arg1_type = ARG_PTR_TO_CTX,
3014 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
3016 /* After all direct packet write, this can be used once for
3017 * triggering a lazy recalc on next skb_get_hash() invocation.
3019 skb_clear_hash(skb);
3023 static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
3024 .func = bpf_set_hash_invalid,
3026 .ret_type = RET_INTEGER,
3027 .arg1_type = ARG_PTR_TO_CTX,
3030 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
3032 /* Set user specified hash as L4(+), so that it gets returned
3033 * on skb_get_hash() call unless BPF prog later on triggers a
3036 __skb_set_sw_hash(skb, hash, true);
3040 static const struct bpf_func_proto bpf_set_hash_proto = {
3041 .func = bpf_set_hash,
3043 .ret_type = RET_INTEGER,
3044 .arg1_type = ARG_PTR_TO_CTX,
3045 .arg2_type = ARG_ANYTHING,
3048 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
3053 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
3054 vlan_proto != htons(ETH_P_8021AD)))
3055 vlan_proto = htons(ETH_P_8021Q);
3057 bpf_push_mac_rcsum(skb);
3058 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
3059 bpf_pull_mac_rcsum(skb);
3061 bpf_compute_data_pointers(skb);
3065 static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
3066 .func = bpf_skb_vlan_push,
3068 .ret_type = RET_INTEGER,
3069 .arg1_type = ARG_PTR_TO_CTX,
3070 .arg2_type = ARG_ANYTHING,
3071 .arg3_type = ARG_ANYTHING,
3074 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
3078 bpf_push_mac_rcsum(skb);
3079 ret = skb_vlan_pop(skb);
3080 bpf_pull_mac_rcsum(skb);
3082 bpf_compute_data_pointers(skb);
3086 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
3087 .func = bpf_skb_vlan_pop,
3089 .ret_type = RET_INTEGER,
3090 .arg1_type = ARG_PTR_TO_CTX,
3093 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
3095 /* Caller already did skb_cow() with len as headroom,
3096 * so no need to do it here.
3099 memmove(skb->data, skb->data + len, off);
3100 memset(skb->data + off, 0, len);
3102 /* No skb_postpush_rcsum(skb, skb->data + off, len)
3103 * needed here as it does not change the skb->csum
3104 * result for checksum complete when summing over
3110 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
3112 /* skb_ensure_writable() is not needed here, as we're
3113 * already working on an uncloned skb.
3115 if (unlikely(!pskb_may_pull(skb, off + len)))
3118 skb_postpull_rcsum(skb, skb->data + off, len);
3119 memmove(skb->data + len, skb->data, off);
3120 __skb_pull(skb, len);
3125 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
3127 bool trans_same = skb->transport_header == skb->network_header;
3130 /* There's no need for __skb_push()/__skb_pull() pair to
3131 * get to the start of the mac header as we're guaranteed
3132 * to always start from here under eBPF.
3134 ret = bpf_skb_generic_push(skb, off, len);
3136 skb->mac_header -= len;
3137 skb->network_header -= len;
3139 skb->transport_header = skb->network_header;
3145 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
3147 bool trans_same = skb->transport_header == skb->network_header;
3150 /* Same here, __skb_push()/__skb_pull() pair not needed. */
3151 ret = bpf_skb_generic_pop(skb, off, len);
3153 skb->mac_header += len;
3154 skb->network_header += len;
3156 skb->transport_header = skb->network_header;
3162 static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
3164 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
3165 u32 off = skb_mac_header_len(skb);
3168 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
3171 ret = skb_cow(skb, len_diff);
3172 if (unlikely(ret < 0))
3175 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3176 if (unlikely(ret < 0))
3179 if (skb_is_gso(skb)) {
3180 struct skb_shared_info *shinfo = skb_shinfo(skb);
3182 /* SKB_GSO_TCPV4 needs to be changed into
3185 if (shinfo->gso_type & SKB_GSO_TCPV4) {
3186 shinfo->gso_type &= ~SKB_GSO_TCPV4;
3187 shinfo->gso_type |= SKB_GSO_TCPV6;
3190 /* Due to IPv6 header, MSS needs to be downgraded. */
3191 skb_decrease_gso_size(shinfo, len_diff);
3192 /* Header must be checked, and gso_segs recomputed. */
3193 shinfo->gso_type |= SKB_GSO_DODGY;
3194 shinfo->gso_segs = 0;
3197 skb->protocol = htons(ETH_P_IPV6);
3198 skb_clear_hash(skb);
3203 static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
3205 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
3206 u32 off = skb_mac_header_len(skb);
3209 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
3212 ret = skb_unclone(skb, GFP_ATOMIC);
3213 if (unlikely(ret < 0))
3216 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3217 if (unlikely(ret < 0))
3220 if (skb_is_gso(skb)) {
3221 struct skb_shared_info *shinfo = skb_shinfo(skb);
3223 /* SKB_GSO_TCPV6 needs to be changed into
3226 if (shinfo->gso_type & SKB_GSO_TCPV6) {
3227 shinfo->gso_type &= ~SKB_GSO_TCPV6;
3228 shinfo->gso_type |= SKB_GSO_TCPV4;
3231 /* Due to IPv4 header, MSS can be upgraded. */
3232 skb_increase_gso_size(shinfo, len_diff);
3233 /* Header must be checked, and gso_segs recomputed. */
3234 shinfo->gso_type |= SKB_GSO_DODGY;
3235 shinfo->gso_segs = 0;
3238 skb->protocol = htons(ETH_P_IP);
3239 skb_clear_hash(skb);
3244 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
3246 __be16 from_proto = skb->protocol;
3248 if (from_proto == htons(ETH_P_IP) &&
3249 to_proto == htons(ETH_P_IPV6))
3250 return bpf_skb_proto_4_to_6(skb);
3252 if (from_proto == htons(ETH_P_IPV6) &&
3253 to_proto == htons(ETH_P_IP))
3254 return bpf_skb_proto_6_to_4(skb);
3259 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
3264 if (unlikely(flags))
3267 /* General idea is that this helper does the basic groundwork
3268 * needed for changing the protocol, and eBPF program fills the
3269 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
3270 * and other helpers, rather than passing a raw buffer here.
3272 * The rationale is to keep this minimal and without a need to
3273 * deal with raw packet data. F.e. even if we would pass buffers
3274 * here, the program still needs to call the bpf_lX_csum_replace()
3275 * helpers anyway. Plus, this way we keep also separation of
3276 * concerns, since f.e. bpf_skb_store_bytes() should only take
3279 * Currently, additional options and extension header space are
3280 * not supported, but flags register is reserved so we can adapt
3281 * that. For offloads, we mark packet as dodgy, so that headers
3282 * need to be verified first.
3284 ret = bpf_skb_proto_xlat(skb, proto);
3285 bpf_compute_data_pointers(skb);
3289 static const struct bpf_func_proto bpf_skb_change_proto_proto = {
3290 .func = bpf_skb_change_proto,
3292 .ret_type = RET_INTEGER,
3293 .arg1_type = ARG_PTR_TO_CTX,
3294 .arg2_type = ARG_ANYTHING,
3295 .arg3_type = ARG_ANYTHING,
3298 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
3300 /* We only allow a restricted subset to be changed for now. */
3301 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
3302 !skb_pkt_type_ok(pkt_type)))
3305 skb->pkt_type = pkt_type;
3309 static const struct bpf_func_proto bpf_skb_change_type_proto = {
3310 .func = bpf_skb_change_type,
3312 .ret_type = RET_INTEGER,
3313 .arg1_type = ARG_PTR_TO_CTX,
3314 .arg2_type = ARG_ANYTHING,
3317 static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
3319 switch (skb->protocol) {
3320 case htons(ETH_P_IP):
3321 return sizeof(struct iphdr);
3322 case htons(ETH_P_IPV6):
3323 return sizeof(struct ipv6hdr);
3329 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3330 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3332 #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
3333 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3334 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
3335 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3336 BPF_F_ADJ_ROOM_ENCAP_L2( \
3337 BPF_ADJ_ROOM_ENCAP_L2_MASK))
3339 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3342 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
3343 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
3344 u16 mac_len = 0, inner_net = 0, inner_trans = 0;
3345 unsigned int gso_type = SKB_GSO_DODGY;
3348 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3349 /* udp gso_size delineates datagrams, only allow if fixed */
3350 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3351 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3355 ret = skb_cow_head(skb, len_diff);
3356 if (unlikely(ret < 0))
3360 if (skb->protocol != htons(ETH_P_IP) &&
3361 skb->protocol != htons(ETH_P_IPV6))
3364 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3365 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3368 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3369 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3372 if (skb->encapsulation)
3375 mac_len = skb->network_header - skb->mac_header;
3376 inner_net = skb->network_header;
3377 if (inner_mac_len > len_diff)
3379 inner_trans = skb->transport_header;
3382 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3383 if (unlikely(ret < 0))
3387 skb->inner_mac_header = inner_net - inner_mac_len;
3388 skb->inner_network_header = inner_net;
3389 skb->inner_transport_header = inner_trans;
3390 skb_set_inner_protocol(skb, skb->protocol);
3392 skb->encapsulation = 1;
3393 skb_set_network_header(skb, mac_len);
3395 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3396 gso_type |= SKB_GSO_UDP_TUNNEL;
3397 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3398 gso_type |= SKB_GSO_GRE;
3399 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3400 gso_type |= SKB_GSO_IPXIP6;
3401 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3402 gso_type |= SKB_GSO_IPXIP4;
3404 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3405 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3406 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3407 sizeof(struct ipv6hdr) :
3408 sizeof(struct iphdr);
3410 skb_set_transport_header(skb, mac_len + nh_len);
3413 /* Match skb->protocol to new outer l3 protocol */
3414 if (skb->protocol == htons(ETH_P_IP) &&
3415 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3416 skb->protocol = htons(ETH_P_IPV6);
3417 else if (skb->protocol == htons(ETH_P_IPV6) &&
3418 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3419 skb->protocol = htons(ETH_P_IP);
3422 if (skb_is_gso(skb)) {
3423 struct skb_shared_info *shinfo = skb_shinfo(skb);
3425 /* Due to header grow, MSS needs to be downgraded. */
3426 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3427 skb_decrease_gso_size(shinfo, len_diff);
3429 /* Header must be checked, and gso_segs recomputed. */
3430 shinfo->gso_type |= gso_type;
3431 shinfo->gso_segs = 0;
3437 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3442 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
3443 BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
3446 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3447 /* udp gso_size delineates datagrams, only allow if fixed */
3448 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3449 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3453 ret = skb_unclone(skb, GFP_ATOMIC);
3454 if (unlikely(ret < 0))
3457 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3458 if (unlikely(ret < 0))
3461 if (skb_is_gso(skb)) {
3462 struct skb_shared_info *shinfo = skb_shinfo(skb);
3464 /* Due to header shrink, MSS can be upgraded. */
3465 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3466 skb_increase_gso_size(shinfo, len_diff);
3468 /* Header must be checked, and gso_segs recomputed. */
3469 shinfo->gso_type |= SKB_GSO_DODGY;
3470 shinfo->gso_segs = 0;
3476 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3478 return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3482 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3483 u32, mode, u64, flags)
3485 u32 len_cur, len_diff_abs = abs(len_diff);
3486 u32 len_min = bpf_skb_net_base_len(skb);
3487 u32 len_max = __bpf_skb_max_len(skb);
3488 __be16 proto = skb->protocol;
3489 bool shrink = len_diff < 0;
3493 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
3494 BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
3496 if (unlikely(len_diff_abs > 0xfffU))
3498 if (unlikely(proto != htons(ETH_P_IP) &&
3499 proto != htons(ETH_P_IPV6)))
3502 off = skb_mac_header_len(skb);
3504 case BPF_ADJ_ROOM_NET:
3505 off += bpf_skb_net_base_len(skb);
3507 case BPF_ADJ_ROOM_MAC:
3513 len_cur = skb->len - skb_network_offset(skb);
3514 if ((shrink && (len_diff_abs >= len_cur ||
3515 len_cur - len_diff_abs < len_min)) ||
3516 (!shrink && (skb->len + len_diff_abs > len_max &&
3520 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3521 bpf_skb_net_grow(skb, off, len_diff_abs, flags);
3522 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
3523 __skb_reset_checksum_unnecessary(skb);
3525 bpf_compute_data_pointers(skb);
3529 static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3530 .func = bpf_skb_adjust_room,
3532 .ret_type = RET_INTEGER,
3533 .arg1_type = ARG_PTR_TO_CTX,
3534 .arg2_type = ARG_ANYTHING,
3535 .arg3_type = ARG_ANYTHING,
3536 .arg4_type = ARG_ANYTHING,
3539 static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3541 u32 min_len = skb_network_offset(skb);
3543 if (skb_transport_header_was_set(skb))
3544 min_len = skb_transport_offset(skb);
3545 if (skb->ip_summed == CHECKSUM_PARTIAL)
3546 min_len = skb_checksum_start_offset(skb) +
3547 skb->csum_offset + sizeof(__sum16);
3551 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3553 unsigned int old_len = skb->len;
3556 ret = __skb_grow_rcsum(skb, new_len);
3558 memset(skb->data + old_len, 0, new_len - old_len);
3562 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3564 return __skb_trim_rcsum(skb, new_len);
3567 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3570 u32 max_len = __bpf_skb_max_len(skb);
3571 u32 min_len = __bpf_skb_min_len(skb);
3574 if (unlikely(flags || new_len > max_len || new_len < min_len))
3576 if (skb->encapsulation)
3579 /* The basic idea of this helper is that it's performing the
3580 * needed work to either grow or trim an skb, and eBPF program
3581 * rewrites the rest via helpers like bpf_skb_store_bytes(),
3582 * bpf_lX_csum_replace() and others rather than passing a raw
3583 * buffer here. This one is a slow path helper and intended
3584 * for replies with control messages.
3586 * Like in bpf_skb_change_proto(), we want to keep this rather
3587 * minimal and without protocol specifics so that we are able
3588 * to separate concerns as in bpf_skb_store_bytes() should only
3589 * be the one responsible for writing buffers.
3591 * It's really expected to be a slow path operation here for
3592 * control message replies, so we're implicitly linearizing,
3593 * uncloning and drop offloads from the skb by this.
3595 ret = __bpf_try_make_writable(skb, skb->len);
3597 if (new_len > skb->len)
3598 ret = bpf_skb_grow_rcsum(skb, new_len);
3599 else if (new_len < skb->len)
3600 ret = bpf_skb_trim_rcsum(skb, new_len);
3601 if (!ret && skb_is_gso(skb))
3607 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3610 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3612 bpf_compute_data_pointers(skb);
3616 static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3617 .func = bpf_skb_change_tail,
3619 .ret_type = RET_INTEGER,
3620 .arg1_type = ARG_PTR_TO_CTX,
3621 .arg2_type = ARG_ANYTHING,
3622 .arg3_type = ARG_ANYTHING,
3625 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3628 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3630 bpf_compute_data_end_sk_skb(skb);
3634 static const struct bpf_func_proto sk_skb_change_tail_proto = {
3635 .func = sk_skb_change_tail,
3637 .ret_type = RET_INTEGER,
3638 .arg1_type = ARG_PTR_TO_CTX,
3639 .arg2_type = ARG_ANYTHING,
3640 .arg3_type = ARG_ANYTHING,
3643 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3646 u32 max_len = __bpf_skb_max_len(skb);
3647 u32 new_len = skb->len + head_room;
3650 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3651 new_len < skb->len))
3654 ret = skb_cow(skb, head_room);
3656 /* Idea for this helper is that we currently only
3657 * allow to expand on mac header. This means that
3658 * skb->protocol network header, etc, stay as is.
3659 * Compared to bpf_skb_change_tail(), we're more
3660 * flexible due to not needing to linearize or
3661 * reset GSO. Intention for this helper is to be
3662 * used by an L3 skb that needs to push mac header
3663 * for redirection into L2 device.
3665 __skb_push(skb, head_room);
3666 memset(skb->data, 0, head_room);
3667 skb_reset_mac_header(skb);
3673 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3676 int ret = __bpf_skb_change_head(skb, head_room, flags);
3678 bpf_compute_data_pointers(skb);
3682 static const struct bpf_func_proto bpf_skb_change_head_proto = {
3683 .func = bpf_skb_change_head,
3685 .ret_type = RET_INTEGER,
3686 .arg1_type = ARG_PTR_TO_CTX,
3687 .arg2_type = ARG_ANYTHING,
3688 .arg3_type = ARG_ANYTHING,
3691 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3694 int ret = __bpf_skb_change_head(skb, head_room, flags);
3696 bpf_compute_data_end_sk_skb(skb);
3700 static const struct bpf_func_proto sk_skb_change_head_proto = {
3701 .func = sk_skb_change_head,
3703 .ret_type = RET_INTEGER,
3704 .arg1_type = ARG_PTR_TO_CTX,
3705 .arg2_type = ARG_ANYTHING,
3706 .arg3_type = ARG_ANYTHING,
3708 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3710 return xdp_data_meta_unsupported(xdp) ? 0 :
3711 xdp->data - xdp->data_meta;
3714 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3716 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3717 unsigned long metalen = xdp_get_metalen(xdp);
3718 void *data_start = xdp_frame_end + metalen;
3719 void *data = xdp->data + offset;
3721 if (unlikely(data < data_start ||
3722 data > xdp->data_end - ETH_HLEN))
3726 memmove(xdp->data_meta + offset,
3727 xdp->data_meta, metalen);
3728 xdp->data_meta += offset;
3734 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3735 .func = bpf_xdp_adjust_head,
3737 .ret_type = RET_INTEGER,
3738 .arg1_type = ARG_PTR_TO_CTX,
3739 .arg2_type = ARG_ANYTHING,
3742 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3744 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
3745 void *data_end = xdp->data_end + offset;
3747 /* Notice that xdp_data_hard_end have reserved some tailroom */
3748 if (unlikely(data_end > data_hard_end))
3751 /* ALL drivers MUST init xdp->frame_sz, chicken check below */
3752 if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
3753 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
3757 if (unlikely(data_end < xdp->data + ETH_HLEN))
3760 /* Clear memory area on grow, can contain uninit kernel memory */
3762 memset(xdp->data_end, 0, offset);
3764 xdp->data_end = data_end;
3769 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3770 .func = bpf_xdp_adjust_tail,
3772 .ret_type = RET_INTEGER,
3773 .arg1_type = ARG_PTR_TO_CTX,
3774 .arg2_type = ARG_ANYTHING,
3777 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3779 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3780 void *meta = xdp->data_meta + offset;
3781 unsigned long metalen = xdp->data - meta;
3783 if (xdp_data_meta_unsupported(xdp))
3785 if (unlikely(meta < xdp_frame_end ||
3788 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3792 xdp->data_meta = meta;
3797 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3798 .func = bpf_xdp_adjust_meta,
3800 .ret_type = RET_INTEGER,
3801 .arg1_type = ARG_PTR_TO_CTX,
3802 .arg2_type = ARG_ANYTHING,
3805 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3806 struct bpf_map *map, struct xdp_buff *xdp)
3808 switch (map->map_type) {
3809 case BPF_MAP_TYPE_DEVMAP:
3810 case BPF_MAP_TYPE_DEVMAP_HASH:
3811 return dev_map_enqueue(fwd, xdp, dev_rx);
3812 case BPF_MAP_TYPE_CPUMAP:
3813 return cpu_map_enqueue(fwd, xdp, dev_rx);
3814 case BPF_MAP_TYPE_XSKMAP:
3815 return __xsk_map_redirect(fwd, xdp);
3822 void xdp_do_flush(void)
3828 EXPORT_SYMBOL_GPL(xdp_do_flush);
3830 static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3832 switch (map->map_type) {
3833 case BPF_MAP_TYPE_DEVMAP:
3834 return __dev_map_lookup_elem(map, index);
3835 case BPF_MAP_TYPE_DEVMAP_HASH:
3836 return __dev_map_hash_lookup_elem(map, index);
3837 case BPF_MAP_TYPE_CPUMAP:
3838 return __cpu_map_lookup_elem(map, index);
3839 case BPF_MAP_TYPE_XSKMAP:
3840 return __xsk_map_lookup_elem(map, index);
3846 void bpf_clear_redirect_map(struct bpf_map *map)
3848 struct bpf_redirect_info *ri;
3851 for_each_possible_cpu(cpu) {
3852 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3853 /* Avoid polluting remote cacheline due to writes if
3854 * not needed. Once we pass this test, we need the
3855 * cmpxchg() to make sure it hasn't been changed in
3856 * the meantime by remote CPU.
3858 if (unlikely(READ_ONCE(ri->map) == map))
3859 cmpxchg(&ri->map, map, NULL);
3863 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3864 struct bpf_prog *xdp_prog)
3866 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3867 struct bpf_map *map = READ_ONCE(ri->map);
3868 u32 index = ri->tgt_index;
3869 void *fwd = ri->tgt_value;
3873 ri->tgt_value = NULL;
3874 WRITE_ONCE(ri->map, NULL);
3876 if (unlikely(!map)) {
3877 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3878 if (unlikely(!fwd)) {
3883 err = dev_xdp_enqueue(fwd, xdp, dev);
3885 err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
3891 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3894 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3897 EXPORT_SYMBOL_GPL(xdp_do_redirect);
3899 static int xdp_do_generic_redirect_map(struct net_device *dev,
3900 struct sk_buff *skb,
3901 struct xdp_buff *xdp,
3902 struct bpf_prog *xdp_prog,
3903 struct bpf_map *map)
3905 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3906 u32 index = ri->tgt_index;
3907 void *fwd = ri->tgt_value;
3911 ri->tgt_value = NULL;
3912 WRITE_ONCE(ri->map, NULL);
3914 if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
3915 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
3916 struct bpf_dtab_netdev *dst = fwd;
3918 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3921 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3922 struct xdp_sock *xs = fwd;
3924 err = xsk_generic_rcv(xs, xdp);
3929 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3934 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3937 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3941 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3942 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
3944 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3945 struct bpf_map *map = READ_ONCE(ri->map);
3946 u32 index = ri->tgt_index;
3947 struct net_device *fwd;
3951 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3954 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3955 if (unlikely(!fwd)) {
3960 err = xdp_ok_fwd_dev(fwd, skb->len);
3965 _trace_xdp_redirect(dev, xdp_prog, index);
3966 generic_xdp_tx(skb, xdp_prog);
3969 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3973 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3975 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3977 if (unlikely(flags))
3981 ri->tgt_index = ifindex;
3982 ri->tgt_value = NULL;
3983 WRITE_ONCE(ri->map, NULL);
3985 return XDP_REDIRECT;
3988 static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3989 .func = bpf_xdp_redirect,
3991 .ret_type = RET_INTEGER,
3992 .arg1_type = ARG_ANYTHING,
3993 .arg2_type = ARG_ANYTHING,
3996 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3999 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
4001 /* Lower bits of the flags are used as return code on lookup failure */
4002 if (unlikely(flags > XDP_TX))
4005 ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
4006 if (unlikely(!ri->tgt_value)) {
4007 /* If the lookup fails we want to clear out the state in the
4008 * redirect_info struct completely, so that if an eBPF program
4009 * performs multiple lookups, the last one always takes
4012 WRITE_ONCE(ri->map, NULL);
4017 ri->tgt_index = ifindex;
4018 WRITE_ONCE(ri->map, map);
4020 return XDP_REDIRECT;
4023 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
4024 .func = bpf_xdp_redirect_map,
4026 .ret_type = RET_INTEGER,
4027 .arg1_type = ARG_CONST_MAP_PTR,
4028 .arg2_type = ARG_ANYTHING,
4029 .arg3_type = ARG_ANYTHING,
4032 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
4033 unsigned long off, unsigned long len)
4035 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
4039 if (ptr != dst_buff)
4040 memcpy(dst_buff, ptr, len);
4045 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
4046 u64, flags, void *, meta, u64, meta_size)
4048 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4050 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4052 if (unlikely(!skb || skb_size > skb->len))
4055 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
4059 static const struct bpf_func_proto bpf_skb_event_output_proto = {
4060 .func = bpf_skb_event_output,
4062 .ret_type = RET_INTEGER,
4063 .arg1_type = ARG_PTR_TO_CTX,
4064 .arg2_type = ARG_CONST_MAP_PTR,
4065 .arg3_type = ARG_ANYTHING,
4066 .arg4_type = ARG_PTR_TO_MEM,
4067 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4070 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
4072 const struct bpf_func_proto bpf_skb_output_proto = {
4073 .func = bpf_skb_event_output,
4075 .ret_type = RET_INTEGER,
4076 .arg1_type = ARG_PTR_TO_BTF_ID,
4077 .arg1_btf_id = &bpf_skb_output_btf_ids[0],
4078 .arg2_type = ARG_CONST_MAP_PTR,
4079 .arg3_type = ARG_ANYTHING,
4080 .arg4_type = ARG_PTR_TO_MEM,
4081 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4084 static unsigned short bpf_tunnel_key_af(u64 flags)
4086 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
4089 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
4090 u32, size, u64, flags)
4092 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
4093 u8 compat[sizeof(struct bpf_tunnel_key)];
4097 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
4101 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
4105 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
4108 case offsetof(struct bpf_tunnel_key, tunnel_label):
4109 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4111 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4112 /* Fixup deprecated structure layouts here, so we have
4113 * a common path later on.
4115 if (ip_tunnel_info_af(info) != AF_INET)
4118 to = (struct bpf_tunnel_key *)compat;
4125 to->tunnel_id = be64_to_cpu(info->key.tun_id);
4126 to->tunnel_tos = info->key.tos;
4127 to->tunnel_ttl = info->key.ttl;
4130 if (flags & BPF_F_TUNINFO_IPV6) {
4131 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
4132 sizeof(to->remote_ipv6));
4133 to->tunnel_label = be32_to_cpu(info->key.label);
4135 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
4136 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4137 to->tunnel_label = 0;
4140 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
4141 memcpy(to_orig, to, size);
4145 memset(to_orig, 0, size);
4149 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
4150 .func = bpf_skb_get_tunnel_key,
4152 .ret_type = RET_INTEGER,
4153 .arg1_type = ARG_PTR_TO_CTX,
4154 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
4155 .arg3_type = ARG_CONST_SIZE,
4156 .arg4_type = ARG_ANYTHING,
4159 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
4161 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
4164 if (unlikely(!info ||
4165 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
4169 if (unlikely(size < info->options_len)) {
4174 ip_tunnel_info_opts_get(to, info);
4175 if (size > info->options_len)
4176 memset(to + info->options_len, 0, size - info->options_len);
4178 return info->options_len;
4180 memset(to, 0, size);
4184 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
4185 .func = bpf_skb_get_tunnel_opt,
4187 .ret_type = RET_INTEGER,
4188 .arg1_type = ARG_PTR_TO_CTX,
4189 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
4190 .arg3_type = ARG_CONST_SIZE,
4193 static struct metadata_dst __percpu *md_dst;
4195 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
4196 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
4198 struct metadata_dst *md = this_cpu_ptr(md_dst);
4199 u8 compat[sizeof(struct bpf_tunnel_key)];
4200 struct ip_tunnel_info *info;
4202 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
4203 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
4205 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
4207 case offsetof(struct bpf_tunnel_key, tunnel_label):
4208 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4209 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4210 /* Fixup deprecated structure layouts here, so we have
4211 * a common path later on.
4213 memcpy(compat, from, size);
4214 memset(compat + size, 0, sizeof(compat) - size);
4215 from = (const struct bpf_tunnel_key *) compat;
4221 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
4226 dst_hold((struct dst_entry *) md);
4227 skb_dst_set(skb, (struct dst_entry *) md);
4229 info = &md->u.tun_info;
4230 memset(info, 0, sizeof(*info));
4231 info->mode = IP_TUNNEL_INFO_TX;
4233 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
4234 if (flags & BPF_F_DONT_FRAGMENT)
4235 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
4236 if (flags & BPF_F_ZERO_CSUM_TX)
4237 info->key.tun_flags &= ~TUNNEL_CSUM;
4238 if (flags & BPF_F_SEQ_NUMBER)
4239 info->key.tun_flags |= TUNNEL_SEQ;
4241 info->key.tun_id = cpu_to_be64(from->tunnel_id);
4242 info->key.tos = from->tunnel_tos;
4243 info->key.ttl = from->tunnel_ttl;
4245 if (flags & BPF_F_TUNINFO_IPV6) {
4246 info->mode |= IP_TUNNEL_INFO_IPV6;
4247 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
4248 sizeof(from->remote_ipv6));
4249 info->key.label = cpu_to_be32(from->tunnel_label) &
4250 IPV6_FLOWLABEL_MASK;
4252 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
4258 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
4259 .func = bpf_skb_set_tunnel_key,
4261 .ret_type = RET_INTEGER,
4262 .arg1_type = ARG_PTR_TO_CTX,
4263 .arg2_type = ARG_PTR_TO_MEM,
4264 .arg3_type = ARG_CONST_SIZE,
4265 .arg4_type = ARG_ANYTHING,
4268 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4269 const u8 *, from, u32, size)
4271 struct ip_tunnel_info *info = skb_tunnel_info(skb);
4272 const struct metadata_dst *md = this_cpu_ptr(md_dst);
4274 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4276 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
4279 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
4284 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4285 .func = bpf_skb_set_tunnel_opt,
4287 .ret_type = RET_INTEGER,
4288 .arg1_type = ARG_PTR_TO_CTX,
4289 .arg2_type = ARG_PTR_TO_MEM,
4290 .arg3_type = ARG_CONST_SIZE,
4293 static const struct bpf_func_proto *
4294 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
4297 struct metadata_dst __percpu *tmp;
4299 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4304 if (cmpxchg(&md_dst, NULL, tmp))
4305 metadata_dst_free_percpu(tmp);
4309 case BPF_FUNC_skb_set_tunnel_key:
4310 return &bpf_skb_set_tunnel_key_proto;
4311 case BPF_FUNC_skb_set_tunnel_opt:
4312 return &bpf_skb_set_tunnel_opt_proto;
4318 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4321 struct bpf_array *array = container_of(map, struct bpf_array, map);
4322 struct cgroup *cgrp;
4325 sk = skb_to_full_sk(skb);
4326 if (!sk || !sk_fullsock(sk))
4328 if (unlikely(idx >= array->map.max_entries))
4331 cgrp = READ_ONCE(array->ptrs[idx]);
4332 if (unlikely(!cgrp))
4335 return sk_under_cgroup_hierarchy(sk, cgrp);
4338 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4339 .func = bpf_skb_under_cgroup,
4341 .ret_type = RET_INTEGER,
4342 .arg1_type = ARG_PTR_TO_CTX,
4343 .arg2_type = ARG_CONST_MAP_PTR,
4344 .arg3_type = ARG_ANYTHING,
4347 #ifdef CONFIG_SOCK_CGROUP_DATA
4348 static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
4350 struct cgroup *cgrp;
4352 sk = sk_to_full_sk(sk);
4353 if (!sk || !sk_fullsock(sk))
4356 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4357 return cgroup_id(cgrp);
4360 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4362 return __bpf_sk_cgroup_id(skb->sk);
4365 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4366 .func = bpf_skb_cgroup_id,
4368 .ret_type = RET_INTEGER,
4369 .arg1_type = ARG_PTR_TO_CTX,
4372 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
4375 struct cgroup *ancestor;
4376 struct cgroup *cgrp;
4378 sk = sk_to_full_sk(sk);
4379 if (!sk || !sk_fullsock(sk))
4382 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4383 ancestor = cgroup_ancestor(cgrp, ancestor_level);
4387 return cgroup_id(ancestor);
4390 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4393 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
4396 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4397 .func = bpf_skb_ancestor_cgroup_id,
4399 .ret_type = RET_INTEGER,
4400 .arg1_type = ARG_PTR_TO_CTX,
4401 .arg2_type = ARG_ANYTHING,
4404 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
4406 return __bpf_sk_cgroup_id(sk);
4409 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
4410 .func = bpf_sk_cgroup_id,
4412 .ret_type = RET_INTEGER,
4413 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
4416 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
4418 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
4421 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
4422 .func = bpf_sk_ancestor_cgroup_id,
4424 .ret_type = RET_INTEGER,
4425 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
4426 .arg2_type = ARG_ANYTHING,
4430 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4431 unsigned long off, unsigned long len)
4433 memcpy(dst_buff, src_buff + off, len);
4437 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4438 u64, flags, void *, meta, u64, meta_size)
4440 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4442 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4444 if (unlikely(!xdp ||
4445 xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4448 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4449 xdp_size, bpf_xdp_copy);
4452 static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4453 .func = bpf_xdp_event_output,
4455 .ret_type = RET_INTEGER,
4456 .arg1_type = ARG_PTR_TO_CTX,
4457 .arg2_type = ARG_CONST_MAP_PTR,
4458 .arg3_type = ARG_ANYTHING,
4459 .arg4_type = ARG_PTR_TO_MEM,
4460 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4463 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
4465 const struct bpf_func_proto bpf_xdp_output_proto = {
4466 .func = bpf_xdp_event_output,
4468 .ret_type = RET_INTEGER,
4469 .arg1_type = ARG_PTR_TO_BTF_ID,
4470 .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
4471 .arg2_type = ARG_CONST_MAP_PTR,
4472 .arg3_type = ARG_ANYTHING,
4473 .arg4_type = ARG_PTR_TO_MEM,
4474 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4477 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4479 return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
4482 static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4483 .func = bpf_get_socket_cookie,
4485 .ret_type = RET_INTEGER,
4486 .arg1_type = ARG_PTR_TO_CTX,
4489 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4491 return __sock_gen_cookie(ctx->sk);
4494 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4495 .func = bpf_get_socket_cookie_sock_addr,
4497 .ret_type = RET_INTEGER,
4498 .arg1_type = ARG_PTR_TO_CTX,
4501 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
4503 return __sock_gen_cookie(ctx);
4506 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
4507 .func = bpf_get_socket_cookie_sock,
4509 .ret_type = RET_INTEGER,
4510 .arg1_type = ARG_PTR_TO_CTX,
4513 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4515 return __sock_gen_cookie(ctx->sk);
4518 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4519 .func = bpf_get_socket_cookie_sock_ops,
4521 .ret_type = RET_INTEGER,
4522 .arg1_type = ARG_PTR_TO_CTX,
4525 static u64 __bpf_get_netns_cookie(struct sock *sk)
4527 #ifdef CONFIG_NET_NS
4528 return __net_gen_cookie(sk ? sk->sk_net.net : &init_net);
4534 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
4536 return __bpf_get_netns_cookie(ctx);
4539 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
4540 .func = bpf_get_netns_cookie_sock,
4542 .ret_type = RET_INTEGER,
4543 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4546 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4548 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
4551 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
4552 .func = bpf_get_netns_cookie_sock_addr,
4554 .ret_type = RET_INTEGER,
4555 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4558 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4560 struct sock *sk = sk_to_full_sk(skb->sk);
4563 if (!sk || !sk_fullsock(sk))
4565 kuid = sock_net_uid(sock_net(sk), sk);
4566 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4569 static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4570 .func = bpf_get_socket_uid,
4572 .ret_type = RET_INTEGER,
4573 .arg1_type = ARG_PTR_TO_CTX,
4576 static int _bpf_setsockopt(struct sock *sk, int level, int optname,
4577 char *optval, int optlen)
4579 char devname[IFNAMSIZ];
4585 if (!sk_fullsock(sk))
4588 sock_owned_by_me(sk);
4590 if (level == SOL_SOCKET) {
4591 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
4593 val = *((int *)optval);
4594 valbool = val ? 1 : 0;
4596 /* Only some socketops are supported */
4599 val = min_t(u32, val, sysctl_rmem_max);
4600 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4601 WRITE_ONCE(sk->sk_rcvbuf,
4602 max_t(int, val * 2, SOCK_MIN_RCVBUF));
4605 val = min_t(u32, val, sysctl_wmem_max);
4606 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4607 WRITE_ONCE(sk->sk_sndbuf,
4608 max_t(int, val * 2, SOCK_MIN_SNDBUF));
4610 case SO_MAX_PACING_RATE: /* 32bit version */
4612 cmpxchg(&sk->sk_pacing_status,
4615 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4616 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4617 sk->sk_max_pacing_rate);
4620 sk->sk_priority = val;
4625 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
4628 if (sk->sk_mark != val) {
4633 case SO_BINDTODEVICE:
4634 optlen = min_t(long, optlen, IFNAMSIZ - 1);
4635 strncpy(devname, optval, optlen);
4636 devname[optlen] = 0;
4639 if (devname[0] != '\0') {
4640 struct net_device *dev;
4645 dev = dev_get_by_name(net, devname);
4648 ifindex = dev->ifindex;
4651 ret = sock_bindtoindex(sk, ifindex, false);
4654 if (sk->sk_prot->keepalive)
4655 sk->sk_prot->keepalive(sk, valbool);
4656 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
4662 } else if (level == SOL_IP) {
4663 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4666 val = *((int *)optval);
4667 /* Only some options are supported */
4670 if (val < -1 || val > 0xff) {
4673 struct inet_sock *inet = inet_sk(sk);
4683 #if IS_ENABLED(CONFIG_IPV6)
4684 } else if (level == SOL_IPV6) {
4685 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4688 val = *((int *)optval);
4689 /* Only some options are supported */
4692 if (val < -1 || val > 0xff) {
4695 struct ipv6_pinfo *np = inet6_sk(sk);
4706 } else if (level == SOL_TCP &&
4707 sk->sk_prot->setsockopt == tcp_setsockopt) {
4708 if (optname == TCP_CONGESTION) {
4709 char name[TCP_CA_NAME_MAX];
4711 strncpy(name, optval, min_t(long, optlen,
4712 TCP_CA_NAME_MAX-1));
4713 name[TCP_CA_NAME_MAX-1] = 0;
4714 ret = tcp_set_congestion_control(sk, name, false, true);
4716 struct inet_connection_sock *icsk = inet_csk(sk);
4717 struct tcp_sock *tp = tcp_sk(sk);
4718 unsigned long timeout;
4720 if (optlen != sizeof(int))
4723 val = *((int *)optval);
4724 /* Only some options are supported */
4727 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4732 case TCP_BPF_SNDCWND_CLAMP:
4736 tp->snd_cwnd_clamp = val;
4737 tp->snd_ssthresh = val;
4740 case TCP_BPF_DELACK_MAX:
4741 timeout = usecs_to_jiffies(val);
4742 if (timeout > TCP_DELACK_MAX ||
4743 timeout < TCP_TIMEOUT_MIN)
4745 inet_csk(sk)->icsk_delack_max = timeout;
4747 case TCP_BPF_RTO_MIN:
4748 timeout = usecs_to_jiffies(val);
4749 if (timeout > TCP_RTO_MIN ||
4750 timeout < TCP_TIMEOUT_MIN)
4752 inet_csk(sk)->icsk_rto_min = timeout;
4755 if (val < 0 || val > 1)
4761 ret = tcp_sock_set_keepidle_locked(sk, val);
4764 if (val < 1 || val > MAX_TCP_KEEPINTVL)
4767 tp->keepalive_intvl = val * HZ;
4770 if (val < 1 || val > MAX_TCP_KEEPCNT)
4773 tp->keepalive_probes = val;
4776 if (val < 1 || val > MAX_TCP_SYNCNT)
4779 icsk->icsk_syn_retries = val;
4781 case TCP_USER_TIMEOUT:
4785 icsk->icsk_user_timeout = val;
4798 static int _bpf_getsockopt(struct sock *sk, int level, int optname,
4799 char *optval, int optlen)
4801 if (!sk_fullsock(sk))
4804 sock_owned_by_me(sk);
4807 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
4808 struct inet_connection_sock *icsk;
4809 struct tcp_sock *tp;
4812 case TCP_CONGESTION:
4813 icsk = inet_csk(sk);
4815 if (!icsk->icsk_ca_ops || optlen <= 1)
4817 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4818 optval[optlen - 1] = 0;
4823 if (optlen <= 0 || !tp->saved_syn ||
4824 optlen > tcp_saved_syn_len(tp->saved_syn))
4826 memcpy(optval, tp->saved_syn->data, optlen);
4831 } else if (level == SOL_IP) {
4832 struct inet_sock *inet = inet_sk(sk);
4834 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4837 /* Only some options are supported */
4840 *((int *)optval) = (int)inet->tos;
4845 #if IS_ENABLED(CONFIG_IPV6)
4846 } else if (level == SOL_IPV6) {
4847 struct ipv6_pinfo *np = inet6_sk(sk);
4849 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4852 /* Only some options are supported */
4855 *((int *)optval) = (int)np->tclass;
4867 memset(optval, 0, optlen);
4871 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
4872 int, level, int, optname, char *, optval, int, optlen)
4874 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
4877 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
4878 .func = bpf_sock_addr_setsockopt,
4880 .ret_type = RET_INTEGER,
4881 .arg1_type = ARG_PTR_TO_CTX,
4882 .arg2_type = ARG_ANYTHING,
4883 .arg3_type = ARG_ANYTHING,
4884 .arg4_type = ARG_PTR_TO_MEM,
4885 .arg5_type = ARG_CONST_SIZE,
4888 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
4889 int, level, int, optname, char *, optval, int, optlen)
4891 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
4894 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
4895 .func = bpf_sock_addr_getsockopt,
4897 .ret_type = RET_INTEGER,
4898 .arg1_type = ARG_PTR_TO_CTX,
4899 .arg2_type = ARG_ANYTHING,
4900 .arg3_type = ARG_ANYTHING,
4901 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
4902 .arg5_type = ARG_CONST_SIZE,
4905 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4906 int, level, int, optname, char *, optval, int, optlen)
4908 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
4911 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
4912 .func = bpf_sock_ops_setsockopt,
4914 .ret_type = RET_INTEGER,
4915 .arg1_type = ARG_PTR_TO_CTX,
4916 .arg2_type = ARG_ANYTHING,
4917 .arg3_type = ARG_ANYTHING,
4918 .arg4_type = ARG_PTR_TO_MEM,
4919 .arg5_type = ARG_CONST_SIZE,
4922 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
4923 int optname, const u8 **start)
4925 struct sk_buff *syn_skb = bpf_sock->syn_skb;
4926 const u8 *hdr_start;
4930 /* sk is a request_sock here */
4932 if (optname == TCP_BPF_SYN) {
4933 hdr_start = syn_skb->data;
4934 ret = tcp_hdrlen(syn_skb);
4935 } else if (optname == TCP_BPF_SYN_IP) {
4936 hdr_start = skb_network_header(syn_skb);
4937 ret = skb_network_header_len(syn_skb) +
4938 tcp_hdrlen(syn_skb);
4940 /* optname == TCP_BPF_SYN_MAC */
4941 hdr_start = skb_mac_header(syn_skb);
4942 ret = skb_mac_header_len(syn_skb) +
4943 skb_network_header_len(syn_skb) +
4944 tcp_hdrlen(syn_skb);
4947 struct sock *sk = bpf_sock->sk;
4948 struct saved_syn *saved_syn;
4950 if (sk->sk_state == TCP_NEW_SYN_RECV)
4951 /* synack retransmit. bpf_sock->syn_skb will
4952 * not be available. It has to resort to
4953 * saved_syn (if it is saved).
4955 saved_syn = inet_reqsk(sk)->saved_syn;
4957 saved_syn = tcp_sk(sk)->saved_syn;
4962 if (optname == TCP_BPF_SYN) {
4963 hdr_start = saved_syn->data +
4964 saved_syn->mac_hdrlen +
4965 saved_syn->network_hdrlen;
4966 ret = saved_syn->tcp_hdrlen;
4967 } else if (optname == TCP_BPF_SYN_IP) {
4968 hdr_start = saved_syn->data +
4969 saved_syn->mac_hdrlen;
4970 ret = saved_syn->network_hdrlen +
4971 saved_syn->tcp_hdrlen;
4973 /* optname == TCP_BPF_SYN_MAC */
4975 /* TCP_SAVE_SYN may not have saved the mac hdr */
4976 if (!saved_syn->mac_hdrlen)
4979 hdr_start = saved_syn->data;
4980 ret = saved_syn->mac_hdrlen +
4981 saved_syn->network_hdrlen +
4982 saved_syn->tcp_hdrlen;
4990 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4991 int, level, int, optname, char *, optval, int, optlen)
4993 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
4994 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
4995 int ret, copy_len = 0;
4998 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
5001 if (optlen < copy_len) {
5006 memcpy(optval, start, copy_len);
5009 /* Zero out unused buffer at the end */
5010 memset(optval + copy_len, 0, optlen - copy_len);
5015 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
5018 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
5019 .func = bpf_sock_ops_getsockopt,
5021 .ret_type = RET_INTEGER,
5022 .arg1_type = ARG_PTR_TO_CTX,
5023 .arg2_type = ARG_ANYTHING,
5024 .arg3_type = ARG_ANYTHING,
5025 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
5026 .arg5_type = ARG_CONST_SIZE,
5029 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
5032 struct sock *sk = bpf_sock->sk;
5033 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
5035 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
5038 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
5040 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
5043 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
5044 .func = bpf_sock_ops_cb_flags_set,
5046 .ret_type = RET_INTEGER,
5047 .arg1_type = ARG_PTR_TO_CTX,
5048 .arg2_type = ARG_ANYTHING,
5051 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
5052 EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
5054 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
5058 struct sock *sk = ctx->sk;
5059 u32 flags = BIND_FROM_BPF;
5063 if (addr_len < offsetofend(struct sockaddr, sa_family))
5065 if (addr->sa_family == AF_INET) {
5066 if (addr_len < sizeof(struct sockaddr_in))
5068 if (((struct sockaddr_in *)addr)->sin_port == htons(0))
5069 flags |= BIND_FORCE_ADDRESS_NO_PORT;
5070 return __inet_bind(sk, addr, addr_len, flags);
5071 #if IS_ENABLED(CONFIG_IPV6)
5072 } else if (addr->sa_family == AF_INET6) {
5073 if (addr_len < SIN6_LEN_RFC2133)
5075 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
5076 flags |= BIND_FORCE_ADDRESS_NO_PORT;
5077 /* ipv6_bpf_stub cannot be NULL, since it's called from
5078 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
5080 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
5081 #endif /* CONFIG_IPV6 */
5083 #endif /* CONFIG_INET */
5085 return -EAFNOSUPPORT;
5088 static const struct bpf_func_proto bpf_bind_proto = {
5091 .ret_type = RET_INTEGER,
5092 .arg1_type = ARG_PTR_TO_CTX,
5093 .arg2_type = ARG_PTR_TO_MEM,
5094 .arg3_type = ARG_CONST_SIZE,
5098 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
5099 struct bpf_xfrm_state *, to, u32, size, u64, flags)
5101 const struct sec_path *sp = skb_sec_path(skb);
5102 const struct xfrm_state *x;
5104 if (!sp || unlikely(index >= sp->len || flags))
5107 x = sp->xvec[index];
5109 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
5112 to->reqid = x->props.reqid;
5113 to->spi = x->id.spi;
5114 to->family = x->props.family;
5117 if (to->family == AF_INET6) {
5118 memcpy(to->remote_ipv6, x->props.saddr.a6,
5119 sizeof(to->remote_ipv6));
5121 to->remote_ipv4 = x->props.saddr.a4;
5122 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
5127 memset(to, 0, size);
5131 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
5132 .func = bpf_skb_get_xfrm_state,
5134 .ret_type = RET_INTEGER,
5135 .arg1_type = ARG_PTR_TO_CTX,
5136 .arg2_type = ARG_ANYTHING,
5137 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
5138 .arg4_type = ARG_CONST_SIZE,
5139 .arg5_type = ARG_ANYTHING,
5143 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
5144 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
5145 const struct neighbour *neigh,
5146 const struct net_device *dev)
5148 memcpy(params->dmac, neigh->ha, ETH_ALEN);
5149 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
5150 params->h_vlan_TCI = 0;
5151 params->h_vlan_proto = 0;
5152 params->ifindex = dev->ifindex;
5158 #if IS_ENABLED(CONFIG_INET)
5159 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
5160 u32 flags, bool check_mtu)
5162 struct fib_nh_common *nhc;
5163 struct in_device *in_dev;
5164 struct neighbour *neigh;
5165 struct net_device *dev;
5166 struct fib_result res;
5171 dev = dev_get_by_index_rcu(net, params->ifindex);
5175 /* verify forwarding is enabled on this interface */
5176 in_dev = __in_dev_get_rcu(dev);
5177 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
5178 return BPF_FIB_LKUP_RET_FWD_DISABLED;
5180 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5182 fl4.flowi4_oif = params->ifindex;
5184 fl4.flowi4_iif = params->ifindex;
5187 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
5188 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
5189 fl4.flowi4_flags = 0;
5191 fl4.flowi4_proto = params->l4_protocol;
5192 fl4.daddr = params->ipv4_dst;
5193 fl4.saddr = params->ipv4_src;
5194 fl4.fl4_sport = params->sport;
5195 fl4.fl4_dport = params->dport;
5196 fl4.flowi4_multipath_hash = 0;
5198 if (flags & BPF_FIB_LOOKUP_DIRECT) {
5199 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5200 struct fib_table *tb;
5202 tb = fib_get_table(net, tbid);
5204 return BPF_FIB_LKUP_RET_NOT_FWDED;
5206 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
5208 fl4.flowi4_mark = 0;
5209 fl4.flowi4_secid = 0;
5210 fl4.flowi4_tun_key.tun_id = 0;
5211 fl4.flowi4_uid = sock_net_uid(net, NULL);
5213 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
5217 /* map fib lookup errors to RTN_ type */
5219 return BPF_FIB_LKUP_RET_BLACKHOLE;
5220 if (err == -EHOSTUNREACH)
5221 return BPF_FIB_LKUP_RET_UNREACHABLE;
5223 return BPF_FIB_LKUP_RET_PROHIBIT;
5225 return BPF_FIB_LKUP_RET_NOT_FWDED;
5228 if (res.type != RTN_UNICAST)
5229 return BPF_FIB_LKUP_RET_NOT_FWDED;
5231 if (fib_info_num_path(res.fi) > 1)
5232 fib_select_path(net, &res, &fl4, NULL);
5235 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
5236 if (params->tot_len > mtu)
5237 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
5242 /* do not handle lwt encaps right now */
5243 if (nhc->nhc_lwtstate)
5244 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
5248 params->rt_metric = res.fi->fib_priority;
5250 /* xdp and cls_bpf programs are run in RCU-bh so
5251 * rcu_read_lock_bh is not needed here
5253 if (likely(nhc->nhc_gw_family != AF_INET6)) {
5254 if (nhc->nhc_gw_family)
5255 params->ipv4_dst = nhc->nhc_gw.ipv4;
5257 neigh = __ipv4_neigh_lookup_noref(dev,
5258 (__force u32)params->ipv4_dst);
5260 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
5262 params->family = AF_INET6;
5263 *dst = nhc->nhc_gw.ipv6;
5264 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
5268 return BPF_FIB_LKUP_RET_NO_NEIGH;
5270 return bpf_fib_set_fwd_params(params, neigh, dev);
5274 #if IS_ENABLED(CONFIG_IPV6)
5275 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
5276 u32 flags, bool check_mtu)
5278 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
5279 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
5280 struct fib6_result res = {};
5281 struct neighbour *neigh;
5282 struct net_device *dev;
5283 struct inet6_dev *idev;
5289 /* link local addresses are never forwarded */
5290 if (rt6_need_strict(dst) || rt6_need_strict(src))
5291 return BPF_FIB_LKUP_RET_NOT_FWDED;
5293 dev = dev_get_by_index_rcu(net, params->ifindex);
5297 idev = __in6_dev_get_safely(dev);
5298 if (unlikely(!idev || !idev->cnf.forwarding))
5299 return BPF_FIB_LKUP_RET_FWD_DISABLED;
5301 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5303 oif = fl6.flowi6_oif = params->ifindex;
5305 oif = fl6.flowi6_iif = params->ifindex;
5307 strict = RT6_LOOKUP_F_HAS_SADDR;
5309 fl6.flowlabel = params->flowinfo;
5310 fl6.flowi6_scope = 0;
5311 fl6.flowi6_flags = 0;
5314 fl6.flowi6_proto = params->l4_protocol;
5317 fl6.fl6_sport = params->sport;
5318 fl6.fl6_dport = params->dport;
5320 if (flags & BPF_FIB_LOOKUP_DIRECT) {
5321 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5322 struct fib6_table *tb;
5324 tb = ipv6_stub->fib6_get_table(net, tbid);
5326 return BPF_FIB_LKUP_RET_NOT_FWDED;
5328 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
5331 fl6.flowi6_mark = 0;
5332 fl6.flowi6_secid = 0;
5333 fl6.flowi6_tun_key.tun_id = 0;
5334 fl6.flowi6_uid = sock_net_uid(net, NULL);
5336 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
5339 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
5340 res.f6i == net->ipv6.fib6_null_entry))
5341 return BPF_FIB_LKUP_RET_NOT_FWDED;
5343 switch (res.fib6_type) {
5344 /* only unicast is forwarded */
5348 return BPF_FIB_LKUP_RET_BLACKHOLE;
5349 case RTN_UNREACHABLE:
5350 return BPF_FIB_LKUP_RET_UNREACHABLE;
5352 return BPF_FIB_LKUP_RET_PROHIBIT;
5354 return BPF_FIB_LKUP_RET_NOT_FWDED;
5357 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
5358 fl6.flowi6_oif != 0, NULL, strict);
5361 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
5362 if (params->tot_len > mtu)
5363 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
5366 if (res.nh->fib_nh_lws)
5367 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
5369 if (res.nh->fib_nh_gw_family)
5370 *dst = res.nh->fib_nh_gw6;
5372 dev = res.nh->fib_nh_dev;
5373 params->rt_metric = res.f6i->fib6_metric;
5375 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
5378 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
5380 return BPF_FIB_LKUP_RET_NO_NEIGH;
5382 return bpf_fib_set_fwd_params(params, neigh, dev);
5386 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
5387 struct bpf_fib_lookup *, params, int, plen, u32, flags)
5389 if (plen < sizeof(*params))
5392 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5395 switch (params->family) {
5396 #if IS_ENABLED(CONFIG_INET)
5398 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
5401 #if IS_ENABLED(CONFIG_IPV6)
5403 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
5407 return -EAFNOSUPPORT;
5410 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
5411 .func = bpf_xdp_fib_lookup,
5413 .ret_type = RET_INTEGER,
5414 .arg1_type = ARG_PTR_TO_CTX,
5415 .arg2_type = ARG_PTR_TO_MEM,
5416 .arg3_type = ARG_CONST_SIZE,
5417 .arg4_type = ARG_ANYTHING,
5420 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
5421 struct bpf_fib_lookup *, params, int, plen, u32, flags)
5423 struct net *net = dev_net(skb->dev);
5424 int rc = -EAFNOSUPPORT;
5426 if (plen < sizeof(*params))
5429 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5432 switch (params->family) {
5433 #if IS_ENABLED(CONFIG_INET)
5435 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
5438 #if IS_ENABLED(CONFIG_IPV6)
5440 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
5446 struct net_device *dev;
5448 dev = dev_get_by_index_rcu(net, params->ifindex);
5449 if (!is_skb_forwardable(dev, skb))
5450 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
5456 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
5457 .func = bpf_skb_fib_lookup,
5459 .ret_type = RET_INTEGER,
5460 .arg1_type = ARG_PTR_TO_CTX,
5461 .arg2_type = ARG_PTR_TO_MEM,
5462 .arg3_type = ARG_CONST_SIZE,
5463 .arg4_type = ARG_ANYTHING,
5466 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5467 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
5470 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
5472 if (!seg6_validate_srh(srh, len, false))
5476 case BPF_LWT_ENCAP_SEG6_INLINE:
5477 if (skb->protocol != htons(ETH_P_IPV6))
5480 err = seg6_do_srh_inline(skb, srh);
5482 case BPF_LWT_ENCAP_SEG6:
5483 skb_reset_inner_headers(skb);
5484 skb->encapsulation = 1;
5485 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
5491 bpf_compute_data_pointers(skb);
5495 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5496 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
5498 return seg6_lookup_nexthop(skb, NULL, 0);
5500 #endif /* CONFIG_IPV6_SEG6_BPF */
5502 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5503 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
5506 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
5510 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
5514 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5515 case BPF_LWT_ENCAP_SEG6:
5516 case BPF_LWT_ENCAP_SEG6_INLINE:
5517 return bpf_push_seg6_encap(skb, type, hdr, len);
5519 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5520 case BPF_LWT_ENCAP_IP:
5521 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
5528 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
5529 void *, hdr, u32, len)
5532 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5533 case BPF_LWT_ENCAP_IP:
5534 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
5541 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
5542 .func = bpf_lwt_in_push_encap,
5544 .ret_type = RET_INTEGER,
5545 .arg1_type = ARG_PTR_TO_CTX,
5546 .arg2_type = ARG_ANYTHING,
5547 .arg3_type = ARG_PTR_TO_MEM,
5548 .arg4_type = ARG_CONST_SIZE
5551 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5552 .func = bpf_lwt_xmit_push_encap,
5554 .ret_type = RET_INTEGER,
5555 .arg1_type = ARG_PTR_TO_CTX,
5556 .arg2_type = ARG_ANYTHING,
5557 .arg3_type = ARG_PTR_TO_MEM,
5558 .arg4_type = ARG_CONST_SIZE
5561 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5562 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5563 const void *, from, u32, len)
5565 struct seg6_bpf_srh_state *srh_state =
5566 this_cpu_ptr(&seg6_bpf_srh_states);
5567 struct ipv6_sr_hdr *srh = srh_state->srh;
5568 void *srh_tlvs, *srh_end, *ptr;
5574 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5575 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5577 ptr = skb->data + offset;
5578 if (ptr >= srh_tlvs && ptr + len <= srh_end)
5579 srh_state->valid = false;
5580 else if (ptr < (void *)&srh->flags ||
5581 ptr + len > (void *)&srh->segments)
5584 if (unlikely(bpf_try_make_writable(skb, offset + len)))
5586 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5588 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5590 memcpy(skb->data + offset, from, len);
5594 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5595 .func = bpf_lwt_seg6_store_bytes,
5597 .ret_type = RET_INTEGER,
5598 .arg1_type = ARG_PTR_TO_CTX,
5599 .arg2_type = ARG_ANYTHING,
5600 .arg3_type = ARG_PTR_TO_MEM,
5601 .arg4_type = ARG_CONST_SIZE
5604 static void bpf_update_srh_state(struct sk_buff *skb)
5606 struct seg6_bpf_srh_state *srh_state =
5607 this_cpu_ptr(&seg6_bpf_srh_states);
5610 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5611 srh_state->srh = NULL;
5613 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5614 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5615 srh_state->valid = true;
5619 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5620 u32, action, void *, param, u32, param_len)
5622 struct seg6_bpf_srh_state *srh_state =
5623 this_cpu_ptr(&seg6_bpf_srh_states);
5628 case SEG6_LOCAL_ACTION_END_X:
5629 if (!seg6_bpf_has_valid_srh(skb))
5631 if (param_len != sizeof(struct in6_addr))
5633 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5634 case SEG6_LOCAL_ACTION_END_T:
5635 if (!seg6_bpf_has_valid_srh(skb))
5637 if (param_len != sizeof(int))
5639 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5640 case SEG6_LOCAL_ACTION_END_DT6:
5641 if (!seg6_bpf_has_valid_srh(skb))
5643 if (param_len != sizeof(int))
5646 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5648 if (!pskb_pull(skb, hdroff))
5651 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5652 skb_reset_network_header(skb);
5653 skb_reset_transport_header(skb);
5654 skb->encapsulation = 0;
5656 bpf_compute_data_pointers(skb);
5657 bpf_update_srh_state(skb);
5658 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5659 case SEG6_LOCAL_ACTION_END_B6:
5660 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5662 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5665 bpf_update_srh_state(skb);
5668 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
5669 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5671 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5674 bpf_update_srh_state(skb);
5682 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5683 .func = bpf_lwt_seg6_action,
5685 .ret_type = RET_INTEGER,
5686 .arg1_type = ARG_PTR_TO_CTX,
5687 .arg2_type = ARG_ANYTHING,
5688 .arg3_type = ARG_PTR_TO_MEM,
5689 .arg4_type = ARG_CONST_SIZE
5692 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5695 struct seg6_bpf_srh_state *srh_state =
5696 this_cpu_ptr(&seg6_bpf_srh_states);
5697 struct ipv6_sr_hdr *srh = srh_state->srh;
5698 void *srh_end, *srh_tlvs, *ptr;
5699 struct ipv6hdr *hdr;
5703 if (unlikely(srh == NULL))
5706 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5707 ((srh->first_segment + 1) << 4));
5708 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5710 ptr = skb->data + offset;
5712 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5714 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5718 ret = skb_cow_head(skb, len);
5719 if (unlikely(ret < 0))
5722 ret = bpf_skb_net_hdr_push(skb, offset, len);
5724 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5727 bpf_compute_data_pointers(skb);
5728 if (unlikely(ret < 0))
5731 hdr = (struct ipv6hdr *)skb->data;
5732 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5734 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5736 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5737 srh_state->hdrlen += len;
5738 srh_state->valid = false;
5742 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5743 .func = bpf_lwt_seg6_adjust_srh,
5745 .ret_type = RET_INTEGER,
5746 .arg1_type = ARG_PTR_TO_CTX,
5747 .arg2_type = ARG_ANYTHING,
5748 .arg3_type = ARG_ANYTHING,
5750 #endif /* CONFIG_IPV6_SEG6_BPF */
5753 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
5754 int dif, int sdif, u8 family, u8 proto)
5756 bool refcounted = false;
5757 struct sock *sk = NULL;
5759 if (family == AF_INET) {
5760 __be32 src4 = tuple->ipv4.saddr;
5761 __be32 dst4 = tuple->ipv4.daddr;
5763 if (proto == IPPROTO_TCP)
5764 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
5765 src4, tuple->ipv4.sport,
5766 dst4, tuple->ipv4.dport,
5767 dif, sdif, &refcounted);
5769 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5770 dst4, tuple->ipv4.dport,
5771 dif, sdif, &udp_table, NULL);
5772 #if IS_ENABLED(CONFIG_IPV6)
5774 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5775 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
5777 if (proto == IPPROTO_TCP)
5778 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
5779 src6, tuple->ipv6.sport,
5780 dst6, ntohs(tuple->ipv6.dport),
5781 dif, sdif, &refcounted);
5782 else if (likely(ipv6_bpf_stub))
5783 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5784 src6, tuple->ipv6.sport,
5785 dst6, tuple->ipv6.dport,
5791 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5792 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5798 /* bpf_skc_lookup performs the core lookup for different types of sockets,
5799 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5800 * Returns the socket as an 'unsigned long' to simplify the casting in the
5801 * callers to satisfy BPF_CALL declarations.
5803 static struct sock *
5804 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5805 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5808 struct sock *sk = NULL;
5809 u8 family = AF_UNSPEC;
5813 if (len == sizeof(tuple->ipv4))
5815 else if (len == sizeof(tuple->ipv6))
5820 if (unlikely(family == AF_UNSPEC || flags ||
5821 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
5824 if (family == AF_INET)
5825 sdif = inet_sdif(skb);
5827 sdif = inet6_sdif(skb);
5829 if ((s32)netns_id < 0) {
5831 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5833 net = get_net_ns_by_id(caller_net, netns_id);
5836 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5844 static struct sock *
5845 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5846 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5849 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5850 ifindex, proto, netns_id, flags);
5853 sk = sk_to_full_sk(sk);
5854 if (!sk_fullsock(sk)) {
5863 static struct sock *
5864 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5865 u8 proto, u64 netns_id, u64 flags)
5867 struct net *caller_net;
5871 caller_net = dev_net(skb->dev);
5872 ifindex = skb->dev->ifindex;
5874 caller_net = sock_net(skb->sk);
5878 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5882 static struct sock *
5883 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5884 u8 proto, u64 netns_id, u64 flags)
5886 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5890 sk = sk_to_full_sk(sk);
5891 if (!sk_fullsock(sk)) {
5900 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5901 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5903 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5907 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5908 .func = bpf_skc_lookup_tcp,
5911 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5912 .arg1_type = ARG_PTR_TO_CTX,
5913 .arg2_type = ARG_PTR_TO_MEM,
5914 .arg3_type = ARG_CONST_SIZE,
5915 .arg4_type = ARG_ANYTHING,
5916 .arg5_type = ARG_ANYTHING,
5919 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5920 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5922 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5926 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5927 .func = bpf_sk_lookup_tcp,
5930 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5931 .arg1_type = ARG_PTR_TO_CTX,
5932 .arg2_type = ARG_PTR_TO_MEM,
5933 .arg3_type = ARG_CONST_SIZE,
5934 .arg4_type = ARG_ANYTHING,
5935 .arg5_type = ARG_ANYTHING,
5938 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5939 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5941 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5945 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5946 .func = bpf_sk_lookup_udp,
5949 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5950 .arg1_type = ARG_PTR_TO_CTX,
5951 .arg2_type = ARG_PTR_TO_MEM,
5952 .arg3_type = ARG_CONST_SIZE,
5953 .arg4_type = ARG_ANYTHING,
5954 .arg5_type = ARG_ANYTHING,
5957 BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5959 if (sk && sk_is_refcounted(sk))
5964 static const struct bpf_func_proto bpf_sk_release_proto = {
5965 .func = bpf_sk_release,
5967 .ret_type = RET_INTEGER,
5968 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
5971 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5972 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5974 struct net *caller_net = dev_net(ctx->rxq->dev);
5975 int ifindex = ctx->rxq->dev->ifindex;
5977 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5978 ifindex, IPPROTO_UDP, netns_id,
5982 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5983 .func = bpf_xdp_sk_lookup_udp,
5986 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5987 .arg1_type = ARG_PTR_TO_CTX,
5988 .arg2_type = ARG_PTR_TO_MEM,
5989 .arg3_type = ARG_CONST_SIZE,
5990 .arg4_type = ARG_ANYTHING,
5991 .arg5_type = ARG_ANYTHING,
5994 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5995 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5997 struct net *caller_net = dev_net(ctx->rxq->dev);
5998 int ifindex = ctx->rxq->dev->ifindex;
6000 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
6001 ifindex, IPPROTO_TCP, netns_id,
6005 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
6006 .func = bpf_xdp_skc_lookup_tcp,
6009 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6010 .arg1_type = ARG_PTR_TO_CTX,
6011 .arg2_type = ARG_PTR_TO_MEM,
6012 .arg3_type = ARG_CONST_SIZE,
6013 .arg4_type = ARG_ANYTHING,
6014 .arg5_type = ARG_ANYTHING,
6017 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
6018 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6020 struct net *caller_net = dev_net(ctx->rxq->dev);
6021 int ifindex = ctx->rxq->dev->ifindex;
6023 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
6024 ifindex, IPPROTO_TCP, netns_id,
6028 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
6029 .func = bpf_xdp_sk_lookup_tcp,
6032 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6033 .arg1_type = ARG_PTR_TO_CTX,
6034 .arg2_type = ARG_PTR_TO_MEM,
6035 .arg3_type = ARG_CONST_SIZE,
6036 .arg4_type = ARG_ANYTHING,
6037 .arg5_type = ARG_ANYTHING,
6040 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6041 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6043 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
6044 sock_net(ctx->sk), 0,
6045 IPPROTO_TCP, netns_id, flags);
6048 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
6049 .func = bpf_sock_addr_skc_lookup_tcp,
6051 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6052 .arg1_type = ARG_PTR_TO_CTX,
6053 .arg2_type = ARG_PTR_TO_MEM,
6054 .arg3_type = ARG_CONST_SIZE,
6055 .arg4_type = ARG_ANYTHING,
6056 .arg5_type = ARG_ANYTHING,
6059 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6060 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6062 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6063 sock_net(ctx->sk), 0, IPPROTO_TCP,
6067 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
6068 .func = bpf_sock_addr_sk_lookup_tcp,
6070 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6071 .arg1_type = ARG_PTR_TO_CTX,
6072 .arg2_type = ARG_PTR_TO_MEM,
6073 .arg3_type = ARG_CONST_SIZE,
6074 .arg4_type = ARG_ANYTHING,
6075 .arg5_type = ARG_ANYTHING,
6078 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
6079 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6081 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6082 sock_net(ctx->sk), 0, IPPROTO_UDP,
6086 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
6087 .func = bpf_sock_addr_sk_lookup_udp,
6089 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6090 .arg1_type = ARG_PTR_TO_CTX,
6091 .arg2_type = ARG_PTR_TO_MEM,
6092 .arg3_type = ARG_CONST_SIZE,
6093 .arg4_type = ARG_ANYTHING,
6094 .arg5_type = ARG_ANYTHING,
6097 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6098 struct bpf_insn_access_aux *info)
6100 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
6104 if (off % size != 0)
6108 case offsetof(struct bpf_tcp_sock, bytes_received):
6109 case offsetof(struct bpf_tcp_sock, bytes_acked):
6110 return size == sizeof(__u64);
6112 return size == sizeof(__u32);
6116 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
6117 const struct bpf_insn *si,
6118 struct bpf_insn *insn_buf,
6119 struct bpf_prog *prog, u32 *target_size)
6121 struct bpf_insn *insn = insn_buf;
6123 #define BPF_TCP_SOCK_GET_COMMON(FIELD) \
6125 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \
6126 sizeof_field(struct bpf_tcp_sock, FIELD)); \
6127 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
6128 si->dst_reg, si->src_reg, \
6129 offsetof(struct tcp_sock, FIELD)); \
6132 #define BPF_INET_SOCK_GET_COMMON(FIELD) \
6134 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \
6136 sizeof_field(struct bpf_tcp_sock, FIELD)); \
6137 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6138 struct inet_connection_sock, \
6140 si->dst_reg, si->src_reg, \
6142 struct inet_connection_sock, \
6146 if (insn > insn_buf)
6147 return insn - insn_buf;
6150 case offsetof(struct bpf_tcp_sock, rtt_min):
6151 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
6152 sizeof(struct minmax));
6153 BUILD_BUG_ON(sizeof(struct minmax) <
6154 sizeof(struct minmax_sample));
6156 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
6157 offsetof(struct tcp_sock, rtt_min) +
6158 offsetof(struct minmax_sample, v));
6160 case offsetof(struct bpf_tcp_sock, snd_cwnd):
6161 BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
6163 case offsetof(struct bpf_tcp_sock, srtt_us):
6164 BPF_TCP_SOCK_GET_COMMON(srtt_us);
6166 case offsetof(struct bpf_tcp_sock, snd_ssthresh):
6167 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
6169 case offsetof(struct bpf_tcp_sock, rcv_nxt):
6170 BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
6172 case offsetof(struct bpf_tcp_sock, snd_nxt):
6173 BPF_TCP_SOCK_GET_COMMON(snd_nxt);
6175 case offsetof(struct bpf_tcp_sock, snd_una):
6176 BPF_TCP_SOCK_GET_COMMON(snd_una);
6178 case offsetof(struct bpf_tcp_sock, mss_cache):
6179 BPF_TCP_SOCK_GET_COMMON(mss_cache);
6181 case offsetof(struct bpf_tcp_sock, ecn_flags):
6182 BPF_TCP_SOCK_GET_COMMON(ecn_flags);
6184 case offsetof(struct bpf_tcp_sock, rate_delivered):
6185 BPF_TCP_SOCK_GET_COMMON(rate_delivered);
6187 case offsetof(struct bpf_tcp_sock, rate_interval_us):
6188 BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
6190 case offsetof(struct bpf_tcp_sock, packets_out):
6191 BPF_TCP_SOCK_GET_COMMON(packets_out);
6193 case offsetof(struct bpf_tcp_sock, retrans_out):
6194 BPF_TCP_SOCK_GET_COMMON(retrans_out);
6196 case offsetof(struct bpf_tcp_sock, total_retrans):
6197 BPF_TCP_SOCK_GET_COMMON(total_retrans);
6199 case offsetof(struct bpf_tcp_sock, segs_in):
6200 BPF_TCP_SOCK_GET_COMMON(segs_in);
6202 case offsetof(struct bpf_tcp_sock, data_segs_in):
6203 BPF_TCP_SOCK_GET_COMMON(data_segs_in);
6205 case offsetof(struct bpf_tcp_sock, segs_out):
6206 BPF_TCP_SOCK_GET_COMMON(segs_out);
6208 case offsetof(struct bpf_tcp_sock, data_segs_out):
6209 BPF_TCP_SOCK_GET_COMMON(data_segs_out);
6211 case offsetof(struct bpf_tcp_sock, lost_out):
6212 BPF_TCP_SOCK_GET_COMMON(lost_out);
6214 case offsetof(struct bpf_tcp_sock, sacked_out):
6215 BPF_TCP_SOCK_GET_COMMON(sacked_out);
6217 case offsetof(struct bpf_tcp_sock, bytes_received):
6218 BPF_TCP_SOCK_GET_COMMON(bytes_received);
6220 case offsetof(struct bpf_tcp_sock, bytes_acked):
6221 BPF_TCP_SOCK_GET_COMMON(bytes_acked);
6223 case offsetof(struct bpf_tcp_sock, dsack_dups):
6224 BPF_TCP_SOCK_GET_COMMON(dsack_dups);
6226 case offsetof(struct bpf_tcp_sock, delivered):
6227 BPF_TCP_SOCK_GET_COMMON(delivered);
6229 case offsetof(struct bpf_tcp_sock, delivered_ce):
6230 BPF_TCP_SOCK_GET_COMMON(delivered_ce);
6232 case offsetof(struct bpf_tcp_sock, icsk_retransmits):
6233 BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
6237 return insn - insn_buf;
6240 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
6242 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
6243 return (unsigned long)sk;
6245 return (unsigned long)NULL;
6248 const struct bpf_func_proto bpf_tcp_sock_proto = {
6249 .func = bpf_tcp_sock,
6251 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
6252 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6255 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
6257 sk = sk_to_full_sk(sk);
6259 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
6260 return (unsigned long)sk;
6262 return (unsigned long)NULL;
6265 static const struct bpf_func_proto bpf_get_listener_sock_proto = {
6266 .func = bpf_get_listener_sock,
6268 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6269 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6272 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
6274 unsigned int iphdr_len;
6276 switch (skb_protocol(skb, true)) {
6277 case cpu_to_be16(ETH_P_IP):
6278 iphdr_len = sizeof(struct iphdr);
6280 case cpu_to_be16(ETH_P_IPV6):
6281 iphdr_len = sizeof(struct ipv6hdr);
6287 if (skb_headlen(skb) < iphdr_len)
6290 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
6293 return INET_ECN_set_ce(skb);
6296 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6297 struct bpf_insn_access_aux *info)
6299 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
6302 if (off % size != 0)
6307 return size == sizeof(__u32);
6311 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
6312 const struct bpf_insn *si,
6313 struct bpf_insn *insn_buf,
6314 struct bpf_prog *prog, u32 *target_size)
6316 struct bpf_insn *insn = insn_buf;
6318 #define BPF_XDP_SOCK_GET(FIELD) \
6320 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \
6321 sizeof_field(struct bpf_xdp_sock, FIELD)); \
6322 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
6323 si->dst_reg, si->src_reg, \
6324 offsetof(struct xdp_sock, FIELD)); \
6328 case offsetof(struct bpf_xdp_sock, queue_id):
6329 BPF_XDP_SOCK_GET(queue_id);
6333 return insn - insn_buf;
6336 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
6337 .func = bpf_skb_ecn_set_ce,
6339 .ret_type = RET_INTEGER,
6340 .arg1_type = ARG_PTR_TO_CTX,
6343 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6344 struct tcphdr *, th, u32, th_len)
6346 #ifdef CONFIG_SYN_COOKIES
6350 if (unlikely(!sk || th_len < sizeof(*th)))
6353 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
6354 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6357 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6360 if (!th->ack || th->rst || th->syn)
6363 if (tcp_synq_no_recent_overflow(sk))
6366 cookie = ntohl(th->ack_seq) - 1;
6368 switch (sk->sk_family) {
6370 if (unlikely(iph_len < sizeof(struct iphdr)))
6373 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
6376 #if IS_BUILTIN(CONFIG_IPV6)
6378 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6381 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
6383 #endif /* CONFIG_IPV6 */
6386 return -EPROTONOSUPPORT;
6398 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
6399 .func = bpf_tcp_check_syncookie,
6402 .ret_type = RET_INTEGER,
6403 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6404 .arg2_type = ARG_PTR_TO_MEM,
6405 .arg3_type = ARG_CONST_SIZE,
6406 .arg4_type = ARG_PTR_TO_MEM,
6407 .arg5_type = ARG_CONST_SIZE,
6410 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6411 struct tcphdr *, th, u32, th_len)
6413 #ifdef CONFIG_SYN_COOKIES
6417 if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
6420 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6423 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6426 if (!th->syn || th->ack || th->fin || th->rst)
6429 if (unlikely(iph_len < sizeof(struct iphdr)))
6432 /* Both struct iphdr and struct ipv6hdr have the version field at the
6433 * same offset so we can cast to the shorter header (struct iphdr).
6435 switch (((struct iphdr *)iph)->version) {
6437 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
6440 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
6443 #if IS_BUILTIN(CONFIG_IPV6)
6445 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6448 if (sk->sk_family != AF_INET6)
6451 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
6453 #endif /* CONFIG_IPV6 */
6456 return -EPROTONOSUPPORT;
6461 return cookie | ((u64)mss << 32);
6464 #endif /* CONFIG_SYN_COOKIES */
6467 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
6468 .func = bpf_tcp_gen_syncookie,
6469 .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */
6471 .ret_type = RET_INTEGER,
6472 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6473 .arg2_type = ARG_PTR_TO_MEM,
6474 .arg3_type = ARG_CONST_SIZE,
6475 .arg4_type = ARG_PTR_TO_MEM,
6476 .arg5_type = ARG_CONST_SIZE,
6479 BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
6481 if (!sk || flags != 0)
6483 if (!skb_at_tc_ingress(skb))
6485 if (unlikely(dev_net(skb->dev) != sock_net(sk)))
6486 return -ENETUNREACH;
6487 if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
6488 return -ESOCKTNOSUPPORT;
6489 if (sk_is_refcounted(sk) &&
6490 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
6495 skb->destructor = sock_pfree;
6500 static const struct bpf_func_proto bpf_sk_assign_proto = {
6501 .func = bpf_sk_assign,
6503 .ret_type = RET_INTEGER,
6504 .arg1_type = ARG_PTR_TO_CTX,
6505 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6506 .arg3_type = ARG_ANYTHING,
6509 static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
6510 u8 search_kind, const u8 *magic,
6511 u8 magic_len, bool *eol)
6517 while (op < opend) {
6520 if (kind == TCPOPT_EOL) {
6522 return ERR_PTR(-ENOMSG);
6523 } else if (kind == TCPOPT_NOP) {
6528 if (opend - op < 2 || opend - op < op[1] || op[1] < 2)
6529 /* Something is wrong in the received header.
6530 * Follow the TCP stack's tcp_parse_options()
6531 * and just bail here.
6533 return ERR_PTR(-EFAULT);
6536 if (search_kind == kind) {
6540 if (magic_len > kind_len - 2)
6541 return ERR_PTR(-ENOMSG);
6543 if (!memcmp(&op[2], magic, magic_len))
6550 return ERR_PTR(-ENOMSG);
6553 BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6554 void *, search_res, u32, len, u64, flags)
6556 bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN;
6557 const u8 *op, *opend, *magic, *search = search_res;
6558 u8 search_kind, search_len, copy_len, magic_len;
6561 /* 2 byte is the minimal option len except TCPOPT_NOP and
6562 * TCPOPT_EOL which are useless for the bpf prog to learn
6563 * and this helper disallow loading them also.
6565 if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN)
6568 search_kind = search[0];
6569 search_len = search[1];
6571 if (search_len > len || search_kind == TCPOPT_NOP ||
6572 search_kind == TCPOPT_EOL)
6575 if (search_kind == TCPOPT_EXP || search_kind == 253) {
6576 /* 16 or 32 bit magic. +2 for kind and kind length */
6577 if (search_len != 4 && search_len != 6)
6580 magic_len = search_len - 2;
6589 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op);
6594 op += sizeof(struct tcphdr);
6596 if (!bpf_sock->skb ||
6597 bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB)
6598 /* This bpf_sock->op cannot call this helper */
6601 opend = bpf_sock->skb_data_end;
6602 op = bpf_sock->skb->data + sizeof(struct tcphdr);
6605 op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len,
6612 if (copy_len > len) {
6617 memcpy(search_res, op, copy_len);
6621 static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
6622 .func = bpf_sock_ops_load_hdr_opt,
6624 .ret_type = RET_INTEGER,
6625 .arg1_type = ARG_PTR_TO_CTX,
6626 .arg2_type = ARG_PTR_TO_MEM,
6627 .arg3_type = ARG_CONST_SIZE,
6628 .arg4_type = ARG_ANYTHING,
6631 BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6632 const void *, from, u32, len, u64, flags)
6634 u8 new_kind, new_kind_len, magic_len = 0, *opend;
6635 const u8 *op, *new_op, *magic = NULL;
6636 struct sk_buff *skb;
6639 if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB)
6642 if (len < 2 || flags)
6646 new_kind = new_op[0];
6647 new_kind_len = new_op[1];
6649 if (new_kind_len > len || new_kind == TCPOPT_NOP ||
6650 new_kind == TCPOPT_EOL)
6653 if (new_kind_len > bpf_sock->remaining_opt_len)
6656 /* 253 is another experimental kind */
6657 if (new_kind == TCPOPT_EXP || new_kind == 253) {
6658 if (new_kind_len < 4)
6660 /* Match for the 2 byte magic also.
6661 * RFC 6994: the magic could be 2 or 4 bytes.
6662 * Hence, matching by 2 byte only is on the
6663 * conservative side but it is the right
6664 * thing to do for the 'search-for-duplication'
6671 /* Check for duplication */
6672 skb = bpf_sock->skb;
6673 op = skb->data + sizeof(struct tcphdr);
6674 opend = bpf_sock->skb_data_end;
6676 op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len,
6681 if (PTR_ERR(op) != -ENOMSG)
6685 /* The option has been ended. Treat it as no more
6686 * header option can be written.
6690 /* No duplication found. Store the header option. */
6691 memcpy(opend, from, new_kind_len);
6693 bpf_sock->remaining_opt_len -= new_kind_len;
6694 bpf_sock->skb_data_end += new_kind_len;
6699 static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
6700 .func = bpf_sock_ops_store_hdr_opt,
6702 .ret_type = RET_INTEGER,
6703 .arg1_type = ARG_PTR_TO_CTX,
6704 .arg2_type = ARG_PTR_TO_MEM,
6705 .arg3_type = ARG_CONST_SIZE,
6706 .arg4_type = ARG_ANYTHING,
6709 BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6710 u32, len, u64, flags)
6712 if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB)
6715 if (flags || len < 2)
6718 if (len > bpf_sock->remaining_opt_len)
6721 bpf_sock->remaining_opt_len -= len;
6726 static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = {
6727 .func = bpf_sock_ops_reserve_hdr_opt,
6729 .ret_type = RET_INTEGER,
6730 .arg1_type = ARG_PTR_TO_CTX,
6731 .arg2_type = ARG_ANYTHING,
6732 .arg3_type = ARG_ANYTHING,
6735 #endif /* CONFIG_INET */
6737 bool bpf_helper_changes_pkt_data(void *func)
6739 if (func == bpf_skb_vlan_push ||
6740 func == bpf_skb_vlan_pop ||
6741 func == bpf_skb_store_bytes ||
6742 func == bpf_skb_change_proto ||
6743 func == bpf_skb_change_head ||
6744 func == sk_skb_change_head ||
6745 func == bpf_skb_change_tail ||
6746 func == sk_skb_change_tail ||
6747 func == bpf_skb_adjust_room ||
6748 func == bpf_skb_pull_data ||
6749 func == sk_skb_pull_data ||
6750 func == bpf_clone_redirect ||
6751 func == bpf_l3_csum_replace ||
6752 func == bpf_l4_csum_replace ||
6753 func == bpf_xdp_adjust_head ||
6754 func == bpf_xdp_adjust_meta ||
6755 func == bpf_msg_pull_data ||
6756 func == bpf_msg_push_data ||
6757 func == bpf_msg_pop_data ||
6758 func == bpf_xdp_adjust_tail ||
6759 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
6760 func == bpf_lwt_seg6_store_bytes ||
6761 func == bpf_lwt_seg6_adjust_srh ||
6762 func == bpf_lwt_seg6_action ||
6765 func == bpf_sock_ops_store_hdr_opt ||
6767 func == bpf_lwt_in_push_encap ||
6768 func == bpf_lwt_xmit_push_encap)
6774 const struct bpf_func_proto bpf_event_output_data_proto __weak;
6775 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak;
6777 static const struct bpf_func_proto *
6778 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6781 /* inet and inet6 sockets are created in a process
6782 * context so there is always a valid uid/gid
6784 case BPF_FUNC_get_current_uid_gid:
6785 return &bpf_get_current_uid_gid_proto;
6786 case BPF_FUNC_get_local_storage:
6787 return &bpf_get_local_storage_proto;
6788 case BPF_FUNC_get_socket_cookie:
6789 return &bpf_get_socket_cookie_sock_proto;
6790 case BPF_FUNC_get_netns_cookie:
6791 return &bpf_get_netns_cookie_sock_proto;
6792 case BPF_FUNC_perf_event_output:
6793 return &bpf_event_output_data_proto;
6794 case BPF_FUNC_get_current_pid_tgid:
6795 return &bpf_get_current_pid_tgid_proto;
6796 case BPF_FUNC_get_current_comm:
6797 return &bpf_get_current_comm_proto;
6798 #ifdef CONFIG_CGROUPS
6799 case BPF_FUNC_get_current_cgroup_id:
6800 return &bpf_get_current_cgroup_id_proto;
6801 case BPF_FUNC_get_current_ancestor_cgroup_id:
6802 return &bpf_get_current_ancestor_cgroup_id_proto;
6804 #ifdef CONFIG_CGROUP_NET_CLASSID
6805 case BPF_FUNC_get_cgroup_classid:
6806 return &bpf_get_cgroup_classid_curr_proto;
6808 case BPF_FUNC_sk_storage_get:
6809 return &bpf_sk_storage_get_cg_sock_proto;
6811 return bpf_base_func_proto(func_id);
6815 static const struct bpf_func_proto *
6816 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6819 /* inet and inet6 sockets are created in a process
6820 * context so there is always a valid uid/gid
6822 case BPF_FUNC_get_current_uid_gid:
6823 return &bpf_get_current_uid_gid_proto;
6825 switch (prog->expected_attach_type) {
6826 case BPF_CGROUP_INET4_CONNECT:
6827 case BPF_CGROUP_INET6_CONNECT:
6828 return &bpf_bind_proto;
6832 case BPF_FUNC_get_socket_cookie:
6833 return &bpf_get_socket_cookie_sock_addr_proto;
6834 case BPF_FUNC_get_netns_cookie:
6835 return &bpf_get_netns_cookie_sock_addr_proto;
6836 case BPF_FUNC_get_local_storage:
6837 return &bpf_get_local_storage_proto;
6838 case BPF_FUNC_perf_event_output:
6839 return &bpf_event_output_data_proto;
6840 case BPF_FUNC_get_current_pid_tgid:
6841 return &bpf_get_current_pid_tgid_proto;
6842 case BPF_FUNC_get_current_comm:
6843 return &bpf_get_current_comm_proto;
6844 #ifdef CONFIG_CGROUPS
6845 case BPF_FUNC_get_current_cgroup_id:
6846 return &bpf_get_current_cgroup_id_proto;
6847 case BPF_FUNC_get_current_ancestor_cgroup_id:
6848 return &bpf_get_current_ancestor_cgroup_id_proto;
6850 #ifdef CONFIG_CGROUP_NET_CLASSID
6851 case BPF_FUNC_get_cgroup_classid:
6852 return &bpf_get_cgroup_classid_curr_proto;
6855 case BPF_FUNC_sk_lookup_tcp:
6856 return &bpf_sock_addr_sk_lookup_tcp_proto;
6857 case BPF_FUNC_sk_lookup_udp:
6858 return &bpf_sock_addr_sk_lookup_udp_proto;
6859 case BPF_FUNC_sk_release:
6860 return &bpf_sk_release_proto;
6861 case BPF_FUNC_skc_lookup_tcp:
6862 return &bpf_sock_addr_skc_lookup_tcp_proto;
6863 #endif /* CONFIG_INET */
6864 case BPF_FUNC_sk_storage_get:
6865 return &bpf_sk_storage_get_proto;
6866 case BPF_FUNC_sk_storage_delete:
6867 return &bpf_sk_storage_delete_proto;
6868 case BPF_FUNC_setsockopt:
6869 switch (prog->expected_attach_type) {
6870 case BPF_CGROUP_INET4_CONNECT:
6871 case BPF_CGROUP_INET6_CONNECT:
6872 return &bpf_sock_addr_setsockopt_proto;
6876 case BPF_FUNC_getsockopt:
6877 switch (prog->expected_attach_type) {
6878 case BPF_CGROUP_INET4_CONNECT:
6879 case BPF_CGROUP_INET6_CONNECT:
6880 return &bpf_sock_addr_getsockopt_proto;
6885 return bpf_sk_base_func_proto(func_id);
6889 static const struct bpf_func_proto *
6890 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6893 case BPF_FUNC_skb_load_bytes:
6894 return &bpf_skb_load_bytes_proto;
6895 case BPF_FUNC_skb_load_bytes_relative:
6896 return &bpf_skb_load_bytes_relative_proto;
6897 case BPF_FUNC_get_socket_cookie:
6898 return &bpf_get_socket_cookie_proto;
6899 case BPF_FUNC_get_socket_uid:
6900 return &bpf_get_socket_uid_proto;
6901 case BPF_FUNC_perf_event_output:
6902 return &bpf_skb_event_output_proto;
6904 return bpf_sk_base_func_proto(func_id);
6908 const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
6909 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
6911 static const struct bpf_func_proto *
6912 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6915 case BPF_FUNC_get_local_storage:
6916 return &bpf_get_local_storage_proto;
6917 case BPF_FUNC_sk_fullsock:
6918 return &bpf_sk_fullsock_proto;
6919 case BPF_FUNC_sk_storage_get:
6920 return &bpf_sk_storage_get_proto;
6921 case BPF_FUNC_sk_storage_delete:
6922 return &bpf_sk_storage_delete_proto;
6923 case BPF_FUNC_perf_event_output:
6924 return &bpf_skb_event_output_proto;
6925 #ifdef CONFIG_SOCK_CGROUP_DATA
6926 case BPF_FUNC_skb_cgroup_id:
6927 return &bpf_skb_cgroup_id_proto;
6928 case BPF_FUNC_skb_ancestor_cgroup_id:
6929 return &bpf_skb_ancestor_cgroup_id_proto;
6930 case BPF_FUNC_sk_cgroup_id:
6931 return &bpf_sk_cgroup_id_proto;
6932 case BPF_FUNC_sk_ancestor_cgroup_id:
6933 return &bpf_sk_ancestor_cgroup_id_proto;
6936 case BPF_FUNC_sk_lookup_tcp:
6937 return &bpf_sk_lookup_tcp_proto;
6938 case BPF_FUNC_sk_lookup_udp:
6939 return &bpf_sk_lookup_udp_proto;
6940 case BPF_FUNC_sk_release:
6941 return &bpf_sk_release_proto;
6942 case BPF_FUNC_skc_lookup_tcp:
6943 return &bpf_skc_lookup_tcp_proto;
6944 case BPF_FUNC_tcp_sock:
6945 return &bpf_tcp_sock_proto;
6946 case BPF_FUNC_get_listener_sock:
6947 return &bpf_get_listener_sock_proto;
6948 case BPF_FUNC_skb_ecn_set_ce:
6949 return &bpf_skb_ecn_set_ce_proto;
6952 return sk_filter_func_proto(func_id, prog);
6956 static const struct bpf_func_proto *
6957 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6960 case BPF_FUNC_skb_store_bytes:
6961 return &bpf_skb_store_bytes_proto;
6962 case BPF_FUNC_skb_load_bytes:
6963 return &bpf_skb_load_bytes_proto;
6964 case BPF_FUNC_skb_load_bytes_relative:
6965 return &bpf_skb_load_bytes_relative_proto;
6966 case BPF_FUNC_skb_pull_data:
6967 return &bpf_skb_pull_data_proto;
6968 case BPF_FUNC_csum_diff:
6969 return &bpf_csum_diff_proto;
6970 case BPF_FUNC_csum_update:
6971 return &bpf_csum_update_proto;
6972 case BPF_FUNC_csum_level:
6973 return &bpf_csum_level_proto;
6974 case BPF_FUNC_l3_csum_replace:
6975 return &bpf_l3_csum_replace_proto;
6976 case BPF_FUNC_l4_csum_replace:
6977 return &bpf_l4_csum_replace_proto;
6978 case BPF_FUNC_clone_redirect:
6979 return &bpf_clone_redirect_proto;
6980 case BPF_FUNC_get_cgroup_classid:
6981 return &bpf_get_cgroup_classid_proto;
6982 case BPF_FUNC_skb_vlan_push:
6983 return &bpf_skb_vlan_push_proto;
6984 case BPF_FUNC_skb_vlan_pop:
6985 return &bpf_skb_vlan_pop_proto;
6986 case BPF_FUNC_skb_change_proto:
6987 return &bpf_skb_change_proto_proto;
6988 case BPF_FUNC_skb_change_type:
6989 return &bpf_skb_change_type_proto;
6990 case BPF_FUNC_skb_adjust_room:
6991 return &bpf_skb_adjust_room_proto;
6992 case BPF_FUNC_skb_change_tail:
6993 return &bpf_skb_change_tail_proto;
6994 case BPF_FUNC_skb_change_head:
6995 return &bpf_skb_change_head_proto;
6996 case BPF_FUNC_skb_get_tunnel_key:
6997 return &bpf_skb_get_tunnel_key_proto;
6998 case BPF_FUNC_skb_set_tunnel_key:
6999 return bpf_get_skb_set_tunnel_proto(func_id);
7000 case BPF_FUNC_skb_get_tunnel_opt:
7001 return &bpf_skb_get_tunnel_opt_proto;
7002 case BPF_FUNC_skb_set_tunnel_opt:
7003 return bpf_get_skb_set_tunnel_proto(func_id);
7004 case BPF_FUNC_redirect:
7005 return &bpf_redirect_proto;
7006 case BPF_FUNC_redirect_neigh:
7007 return &bpf_redirect_neigh_proto;
7008 case BPF_FUNC_get_route_realm:
7009 return &bpf_get_route_realm_proto;
7010 case BPF_FUNC_get_hash_recalc:
7011 return &bpf_get_hash_recalc_proto;
7012 case BPF_FUNC_set_hash_invalid:
7013 return &bpf_set_hash_invalid_proto;
7014 case BPF_FUNC_set_hash:
7015 return &bpf_set_hash_proto;
7016 case BPF_FUNC_perf_event_output:
7017 return &bpf_skb_event_output_proto;
7018 case BPF_FUNC_get_smp_processor_id:
7019 return &bpf_get_smp_processor_id_proto;
7020 case BPF_FUNC_skb_under_cgroup:
7021 return &bpf_skb_under_cgroup_proto;
7022 case BPF_FUNC_get_socket_cookie:
7023 return &bpf_get_socket_cookie_proto;
7024 case BPF_FUNC_get_socket_uid:
7025 return &bpf_get_socket_uid_proto;
7026 case BPF_FUNC_fib_lookup:
7027 return &bpf_skb_fib_lookup_proto;
7028 case BPF_FUNC_sk_fullsock:
7029 return &bpf_sk_fullsock_proto;
7030 case BPF_FUNC_sk_storage_get:
7031 return &bpf_sk_storage_get_proto;
7032 case BPF_FUNC_sk_storage_delete:
7033 return &bpf_sk_storage_delete_proto;
7035 case BPF_FUNC_skb_get_xfrm_state:
7036 return &bpf_skb_get_xfrm_state_proto;
7038 #ifdef CONFIG_CGROUP_NET_CLASSID
7039 case BPF_FUNC_skb_cgroup_classid:
7040 return &bpf_skb_cgroup_classid_proto;
7042 #ifdef CONFIG_SOCK_CGROUP_DATA
7043 case BPF_FUNC_skb_cgroup_id:
7044 return &bpf_skb_cgroup_id_proto;
7045 case BPF_FUNC_skb_ancestor_cgroup_id:
7046 return &bpf_skb_ancestor_cgroup_id_proto;
7049 case BPF_FUNC_sk_lookup_tcp:
7050 return &bpf_sk_lookup_tcp_proto;
7051 case BPF_FUNC_sk_lookup_udp:
7052 return &bpf_sk_lookup_udp_proto;
7053 case BPF_FUNC_sk_release:
7054 return &bpf_sk_release_proto;
7055 case BPF_FUNC_tcp_sock:
7056 return &bpf_tcp_sock_proto;
7057 case BPF_FUNC_get_listener_sock:
7058 return &bpf_get_listener_sock_proto;
7059 case BPF_FUNC_skc_lookup_tcp:
7060 return &bpf_skc_lookup_tcp_proto;
7061 case BPF_FUNC_tcp_check_syncookie:
7062 return &bpf_tcp_check_syncookie_proto;
7063 case BPF_FUNC_skb_ecn_set_ce:
7064 return &bpf_skb_ecn_set_ce_proto;
7065 case BPF_FUNC_tcp_gen_syncookie:
7066 return &bpf_tcp_gen_syncookie_proto;
7067 case BPF_FUNC_sk_assign:
7068 return &bpf_sk_assign_proto;
7071 return bpf_sk_base_func_proto(func_id);
7075 static const struct bpf_func_proto *
7076 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7079 case BPF_FUNC_perf_event_output:
7080 return &bpf_xdp_event_output_proto;
7081 case BPF_FUNC_get_smp_processor_id:
7082 return &bpf_get_smp_processor_id_proto;
7083 case BPF_FUNC_csum_diff:
7084 return &bpf_csum_diff_proto;
7085 case BPF_FUNC_xdp_adjust_head:
7086 return &bpf_xdp_adjust_head_proto;
7087 case BPF_FUNC_xdp_adjust_meta:
7088 return &bpf_xdp_adjust_meta_proto;
7089 case BPF_FUNC_redirect:
7090 return &bpf_xdp_redirect_proto;
7091 case BPF_FUNC_redirect_map:
7092 return &bpf_xdp_redirect_map_proto;
7093 case BPF_FUNC_xdp_adjust_tail:
7094 return &bpf_xdp_adjust_tail_proto;
7095 case BPF_FUNC_fib_lookup:
7096 return &bpf_xdp_fib_lookup_proto;
7098 case BPF_FUNC_sk_lookup_udp:
7099 return &bpf_xdp_sk_lookup_udp_proto;
7100 case BPF_FUNC_sk_lookup_tcp:
7101 return &bpf_xdp_sk_lookup_tcp_proto;
7102 case BPF_FUNC_sk_release:
7103 return &bpf_sk_release_proto;
7104 case BPF_FUNC_skc_lookup_tcp:
7105 return &bpf_xdp_skc_lookup_tcp_proto;
7106 case BPF_FUNC_tcp_check_syncookie:
7107 return &bpf_tcp_check_syncookie_proto;
7108 case BPF_FUNC_tcp_gen_syncookie:
7109 return &bpf_tcp_gen_syncookie_proto;
7112 return bpf_sk_base_func_proto(func_id);
7116 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
7117 const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
7119 static const struct bpf_func_proto *
7120 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7123 case BPF_FUNC_setsockopt:
7124 return &bpf_sock_ops_setsockopt_proto;
7125 case BPF_FUNC_getsockopt:
7126 return &bpf_sock_ops_getsockopt_proto;
7127 case BPF_FUNC_sock_ops_cb_flags_set:
7128 return &bpf_sock_ops_cb_flags_set_proto;
7129 case BPF_FUNC_sock_map_update:
7130 return &bpf_sock_map_update_proto;
7131 case BPF_FUNC_sock_hash_update:
7132 return &bpf_sock_hash_update_proto;
7133 case BPF_FUNC_get_socket_cookie:
7134 return &bpf_get_socket_cookie_sock_ops_proto;
7135 case BPF_FUNC_get_local_storage:
7136 return &bpf_get_local_storage_proto;
7137 case BPF_FUNC_perf_event_output:
7138 return &bpf_event_output_data_proto;
7139 case BPF_FUNC_sk_storage_get:
7140 return &bpf_sk_storage_get_proto;
7141 case BPF_FUNC_sk_storage_delete:
7142 return &bpf_sk_storage_delete_proto;
7144 case BPF_FUNC_load_hdr_opt:
7145 return &bpf_sock_ops_load_hdr_opt_proto;
7146 case BPF_FUNC_store_hdr_opt:
7147 return &bpf_sock_ops_store_hdr_opt_proto;
7148 case BPF_FUNC_reserve_hdr_opt:
7149 return &bpf_sock_ops_reserve_hdr_opt_proto;
7150 case BPF_FUNC_tcp_sock:
7151 return &bpf_tcp_sock_proto;
7152 #endif /* CONFIG_INET */
7154 return bpf_sk_base_func_proto(func_id);
7158 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
7159 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
7161 static const struct bpf_func_proto *
7162 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7165 case BPF_FUNC_msg_redirect_map:
7166 return &bpf_msg_redirect_map_proto;
7167 case BPF_FUNC_msg_redirect_hash:
7168 return &bpf_msg_redirect_hash_proto;
7169 case BPF_FUNC_msg_apply_bytes:
7170 return &bpf_msg_apply_bytes_proto;
7171 case BPF_FUNC_msg_cork_bytes:
7172 return &bpf_msg_cork_bytes_proto;
7173 case BPF_FUNC_msg_pull_data:
7174 return &bpf_msg_pull_data_proto;
7175 case BPF_FUNC_msg_push_data:
7176 return &bpf_msg_push_data_proto;
7177 case BPF_FUNC_msg_pop_data:
7178 return &bpf_msg_pop_data_proto;
7179 case BPF_FUNC_perf_event_output:
7180 return &bpf_event_output_data_proto;
7181 case BPF_FUNC_get_current_uid_gid:
7182 return &bpf_get_current_uid_gid_proto;
7183 case BPF_FUNC_get_current_pid_tgid:
7184 return &bpf_get_current_pid_tgid_proto;
7185 case BPF_FUNC_sk_storage_get:
7186 return &bpf_sk_storage_get_proto;
7187 case BPF_FUNC_sk_storage_delete:
7188 return &bpf_sk_storage_delete_proto;
7189 #ifdef CONFIG_CGROUPS
7190 case BPF_FUNC_get_current_cgroup_id:
7191 return &bpf_get_current_cgroup_id_proto;
7192 case BPF_FUNC_get_current_ancestor_cgroup_id:
7193 return &bpf_get_current_ancestor_cgroup_id_proto;
7195 #ifdef CONFIG_CGROUP_NET_CLASSID
7196 case BPF_FUNC_get_cgroup_classid:
7197 return &bpf_get_cgroup_classid_curr_proto;
7200 return bpf_sk_base_func_proto(func_id);
7204 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
7205 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
7207 static const struct bpf_func_proto *
7208 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7211 case BPF_FUNC_skb_store_bytes:
7212 return &bpf_skb_store_bytes_proto;
7213 case BPF_FUNC_skb_load_bytes:
7214 return &bpf_skb_load_bytes_proto;
7215 case BPF_FUNC_skb_pull_data:
7216 return &sk_skb_pull_data_proto;
7217 case BPF_FUNC_skb_change_tail:
7218 return &sk_skb_change_tail_proto;
7219 case BPF_FUNC_skb_change_head:
7220 return &sk_skb_change_head_proto;
7221 case BPF_FUNC_get_socket_cookie:
7222 return &bpf_get_socket_cookie_proto;
7223 case BPF_FUNC_get_socket_uid:
7224 return &bpf_get_socket_uid_proto;
7225 case BPF_FUNC_sk_redirect_map:
7226 return &bpf_sk_redirect_map_proto;
7227 case BPF_FUNC_sk_redirect_hash:
7228 return &bpf_sk_redirect_hash_proto;
7229 case BPF_FUNC_perf_event_output:
7230 return &bpf_skb_event_output_proto;
7232 case BPF_FUNC_sk_lookup_tcp:
7233 return &bpf_sk_lookup_tcp_proto;
7234 case BPF_FUNC_sk_lookup_udp:
7235 return &bpf_sk_lookup_udp_proto;
7236 case BPF_FUNC_sk_release:
7237 return &bpf_sk_release_proto;
7238 case BPF_FUNC_skc_lookup_tcp:
7239 return &bpf_skc_lookup_tcp_proto;
7242 return bpf_sk_base_func_proto(func_id);
7246 static const struct bpf_func_proto *
7247 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7250 case BPF_FUNC_skb_load_bytes:
7251 return &bpf_flow_dissector_load_bytes_proto;
7253 return bpf_sk_base_func_proto(func_id);
7257 static const struct bpf_func_proto *
7258 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7261 case BPF_FUNC_skb_load_bytes:
7262 return &bpf_skb_load_bytes_proto;
7263 case BPF_FUNC_skb_pull_data:
7264 return &bpf_skb_pull_data_proto;
7265 case BPF_FUNC_csum_diff:
7266 return &bpf_csum_diff_proto;
7267 case BPF_FUNC_get_cgroup_classid:
7268 return &bpf_get_cgroup_classid_proto;
7269 case BPF_FUNC_get_route_realm:
7270 return &bpf_get_route_realm_proto;
7271 case BPF_FUNC_get_hash_recalc:
7272 return &bpf_get_hash_recalc_proto;
7273 case BPF_FUNC_perf_event_output:
7274 return &bpf_skb_event_output_proto;
7275 case BPF_FUNC_get_smp_processor_id:
7276 return &bpf_get_smp_processor_id_proto;
7277 case BPF_FUNC_skb_under_cgroup:
7278 return &bpf_skb_under_cgroup_proto;
7280 return bpf_sk_base_func_proto(func_id);
7284 static const struct bpf_func_proto *
7285 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7288 case BPF_FUNC_lwt_push_encap:
7289 return &bpf_lwt_in_push_encap_proto;
7291 return lwt_out_func_proto(func_id, prog);
7295 static const struct bpf_func_proto *
7296 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7299 case BPF_FUNC_skb_get_tunnel_key:
7300 return &bpf_skb_get_tunnel_key_proto;
7301 case BPF_FUNC_skb_set_tunnel_key:
7302 return bpf_get_skb_set_tunnel_proto(func_id);
7303 case BPF_FUNC_skb_get_tunnel_opt:
7304 return &bpf_skb_get_tunnel_opt_proto;
7305 case BPF_FUNC_skb_set_tunnel_opt:
7306 return bpf_get_skb_set_tunnel_proto(func_id);
7307 case BPF_FUNC_redirect:
7308 return &bpf_redirect_proto;
7309 case BPF_FUNC_clone_redirect:
7310 return &bpf_clone_redirect_proto;
7311 case BPF_FUNC_skb_change_tail:
7312 return &bpf_skb_change_tail_proto;
7313 case BPF_FUNC_skb_change_head:
7314 return &bpf_skb_change_head_proto;
7315 case BPF_FUNC_skb_store_bytes:
7316 return &bpf_skb_store_bytes_proto;
7317 case BPF_FUNC_csum_update:
7318 return &bpf_csum_update_proto;
7319 case BPF_FUNC_csum_level:
7320 return &bpf_csum_level_proto;
7321 case BPF_FUNC_l3_csum_replace:
7322 return &bpf_l3_csum_replace_proto;
7323 case BPF_FUNC_l4_csum_replace:
7324 return &bpf_l4_csum_replace_proto;
7325 case BPF_FUNC_set_hash_invalid:
7326 return &bpf_set_hash_invalid_proto;
7327 case BPF_FUNC_lwt_push_encap:
7328 return &bpf_lwt_xmit_push_encap_proto;
7330 return lwt_out_func_proto(func_id, prog);
7334 static const struct bpf_func_proto *
7335 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7338 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
7339 case BPF_FUNC_lwt_seg6_store_bytes:
7340 return &bpf_lwt_seg6_store_bytes_proto;
7341 case BPF_FUNC_lwt_seg6_action:
7342 return &bpf_lwt_seg6_action_proto;
7343 case BPF_FUNC_lwt_seg6_adjust_srh:
7344 return &bpf_lwt_seg6_adjust_srh_proto;
7347 return lwt_out_func_proto(func_id, prog);
7351 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
7352 const struct bpf_prog *prog,
7353 struct bpf_insn_access_aux *info)
7355 const int size_default = sizeof(__u32);
7357 if (off < 0 || off >= sizeof(struct __sk_buff))
7360 /* The verifier guarantees that size > 0. */
7361 if (off % size != 0)
7365 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7366 if (off + size > offsetofend(struct __sk_buff, cb[4]))
7369 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
7370 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
7371 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
7372 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
7373 case bpf_ctx_range(struct __sk_buff, data):
7374 case bpf_ctx_range(struct __sk_buff, data_meta):
7375 case bpf_ctx_range(struct __sk_buff, data_end):
7376 if (size != size_default)
7379 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
7381 case bpf_ctx_range(struct __sk_buff, tstamp):
7382 if (size != sizeof(__u64))
7385 case offsetof(struct __sk_buff, sk):
7386 if (type == BPF_WRITE || size != sizeof(__u64))
7388 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
7391 /* Only narrow read access allowed for now. */
7392 if (type == BPF_WRITE) {
7393 if (size != size_default)
7396 bpf_ctx_record_field_size(info, size_default);
7397 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
7405 static bool sk_filter_is_valid_access(int off, int size,
7406 enum bpf_access_type type,
7407 const struct bpf_prog *prog,
7408 struct bpf_insn_access_aux *info)
7411 case bpf_ctx_range(struct __sk_buff, tc_classid):
7412 case bpf_ctx_range(struct __sk_buff, data):
7413 case bpf_ctx_range(struct __sk_buff, data_meta):
7414 case bpf_ctx_range(struct __sk_buff, data_end):
7415 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7416 case bpf_ctx_range(struct __sk_buff, tstamp):
7417 case bpf_ctx_range(struct __sk_buff, wire_len):
7421 if (type == BPF_WRITE) {
7423 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7430 return bpf_skb_is_valid_access(off, size, type, prog, info);
7433 static bool cg_skb_is_valid_access(int off, int size,
7434 enum bpf_access_type type,
7435 const struct bpf_prog *prog,
7436 struct bpf_insn_access_aux *info)
7439 case bpf_ctx_range(struct __sk_buff, tc_classid):
7440 case bpf_ctx_range(struct __sk_buff, data_meta):
7441 case bpf_ctx_range(struct __sk_buff, wire_len):
7443 case bpf_ctx_range(struct __sk_buff, data):
7444 case bpf_ctx_range(struct __sk_buff, data_end):
7450 if (type == BPF_WRITE) {
7452 case bpf_ctx_range(struct __sk_buff, mark):
7453 case bpf_ctx_range(struct __sk_buff, priority):
7454 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7456 case bpf_ctx_range(struct __sk_buff, tstamp):
7466 case bpf_ctx_range(struct __sk_buff, data):
7467 info->reg_type = PTR_TO_PACKET;
7469 case bpf_ctx_range(struct __sk_buff, data_end):
7470 info->reg_type = PTR_TO_PACKET_END;
7474 return bpf_skb_is_valid_access(off, size, type, prog, info);
7477 static bool lwt_is_valid_access(int off, int size,
7478 enum bpf_access_type type,
7479 const struct bpf_prog *prog,
7480 struct bpf_insn_access_aux *info)
7483 case bpf_ctx_range(struct __sk_buff, tc_classid):
7484 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7485 case bpf_ctx_range(struct __sk_buff, data_meta):
7486 case bpf_ctx_range(struct __sk_buff, tstamp):
7487 case bpf_ctx_range(struct __sk_buff, wire_len):
7491 if (type == BPF_WRITE) {
7493 case bpf_ctx_range(struct __sk_buff, mark):
7494 case bpf_ctx_range(struct __sk_buff, priority):
7495 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7503 case bpf_ctx_range(struct __sk_buff, data):
7504 info->reg_type = PTR_TO_PACKET;
7506 case bpf_ctx_range(struct __sk_buff, data_end):
7507 info->reg_type = PTR_TO_PACKET_END;
7511 return bpf_skb_is_valid_access(off, size, type, prog, info);
7514 /* Attach type specific accesses */
7515 static bool __sock_filter_check_attach_type(int off,
7516 enum bpf_access_type access_type,
7517 enum bpf_attach_type attach_type)
7520 case offsetof(struct bpf_sock, bound_dev_if):
7521 case offsetof(struct bpf_sock, mark):
7522 case offsetof(struct bpf_sock, priority):
7523 switch (attach_type) {
7524 case BPF_CGROUP_INET_SOCK_CREATE:
7525 case BPF_CGROUP_INET_SOCK_RELEASE:
7530 case bpf_ctx_range(struct bpf_sock, src_ip4):
7531 switch (attach_type) {
7532 case BPF_CGROUP_INET4_POST_BIND:
7537 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7538 switch (attach_type) {
7539 case BPF_CGROUP_INET6_POST_BIND:
7544 case bpf_ctx_range(struct bpf_sock, src_port):
7545 switch (attach_type) {
7546 case BPF_CGROUP_INET4_POST_BIND:
7547 case BPF_CGROUP_INET6_POST_BIND:
7554 return access_type == BPF_READ;
7559 bool bpf_sock_common_is_valid_access(int off, int size,
7560 enum bpf_access_type type,
7561 struct bpf_insn_access_aux *info)
7564 case bpf_ctx_range_till(struct bpf_sock, type, priority):
7567 return bpf_sock_is_valid_access(off, size, type, info);
7571 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
7572 struct bpf_insn_access_aux *info)
7574 const int size_default = sizeof(__u32);
7576 if (off < 0 || off >= sizeof(struct bpf_sock))
7578 if (off % size != 0)
7582 case offsetof(struct bpf_sock, state):
7583 case offsetof(struct bpf_sock, family):
7584 case offsetof(struct bpf_sock, type):
7585 case offsetof(struct bpf_sock, protocol):
7586 case offsetof(struct bpf_sock, dst_port):
7587 case offsetof(struct bpf_sock, src_port):
7588 case offsetof(struct bpf_sock, rx_queue_mapping):
7589 case bpf_ctx_range(struct bpf_sock, src_ip4):
7590 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7591 case bpf_ctx_range(struct bpf_sock, dst_ip4):
7592 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7593 bpf_ctx_record_field_size(info, size_default);
7594 return bpf_ctx_narrow_access_ok(off, size, size_default);
7597 return size == size_default;
7600 static bool sock_filter_is_valid_access(int off, int size,
7601 enum bpf_access_type type,
7602 const struct bpf_prog *prog,
7603 struct bpf_insn_access_aux *info)
7605 if (!bpf_sock_is_valid_access(off, size, type, info))
7607 return __sock_filter_check_attach_type(off, type,
7608 prog->expected_attach_type);
7611 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
7612 const struct bpf_prog *prog)
7614 /* Neither direct read nor direct write requires any preliminary
7620 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
7621 const struct bpf_prog *prog, int drop_verdict)
7623 struct bpf_insn *insn = insn_buf;
7628 /* if (!skb->cloned)
7631 * (Fast-path, otherwise approximation that we might be
7632 * a clone, do the rest in helper.)
7634 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
7635 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
7636 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
7638 /* ret = bpf_skb_pull_data(skb, 0); */
7639 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
7640 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
7641 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7642 BPF_FUNC_skb_pull_data);
7645 * return TC_ACT_SHOT;
7647 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
7648 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
7649 *insn++ = BPF_EXIT_INSN();
7652 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
7654 *insn++ = prog->insnsi[0];
7656 return insn - insn_buf;
7659 static int bpf_gen_ld_abs(const struct bpf_insn *orig,
7660 struct bpf_insn *insn_buf)
7662 bool indirect = BPF_MODE(orig->code) == BPF_IND;
7663 struct bpf_insn *insn = insn_buf;
7666 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
7668 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
7670 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
7672 /* We're guaranteed here that CTX is in R6. */
7673 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
7675 switch (BPF_SIZE(orig->code)) {
7677 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
7680 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
7683 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
7687 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
7688 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
7689 *insn++ = BPF_EXIT_INSN();
7691 return insn - insn_buf;
7694 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
7695 const struct bpf_prog *prog)
7697 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
7700 static bool tc_cls_act_is_valid_access(int off, int size,
7701 enum bpf_access_type type,
7702 const struct bpf_prog *prog,
7703 struct bpf_insn_access_aux *info)
7705 if (type == BPF_WRITE) {
7707 case bpf_ctx_range(struct __sk_buff, mark):
7708 case bpf_ctx_range(struct __sk_buff, tc_index):
7709 case bpf_ctx_range(struct __sk_buff, priority):
7710 case bpf_ctx_range(struct __sk_buff, tc_classid):
7711 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7712 case bpf_ctx_range(struct __sk_buff, tstamp):
7713 case bpf_ctx_range(struct __sk_buff, queue_mapping):
7721 case bpf_ctx_range(struct __sk_buff, data):
7722 info->reg_type = PTR_TO_PACKET;
7724 case bpf_ctx_range(struct __sk_buff, data_meta):
7725 info->reg_type = PTR_TO_PACKET_META;
7727 case bpf_ctx_range(struct __sk_buff, data_end):
7728 info->reg_type = PTR_TO_PACKET_END;
7730 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7734 return bpf_skb_is_valid_access(off, size, type, prog, info);
7737 static bool __is_valid_xdp_access(int off, int size)
7739 if (off < 0 || off >= sizeof(struct xdp_md))
7741 if (off % size != 0)
7743 if (size != sizeof(__u32))
7749 static bool xdp_is_valid_access(int off, int size,
7750 enum bpf_access_type type,
7751 const struct bpf_prog *prog,
7752 struct bpf_insn_access_aux *info)
7754 if (prog->expected_attach_type != BPF_XDP_DEVMAP) {
7756 case offsetof(struct xdp_md, egress_ifindex):
7761 if (type == BPF_WRITE) {
7762 if (bpf_prog_is_dev_bound(prog->aux)) {
7764 case offsetof(struct xdp_md, rx_queue_index):
7765 return __is_valid_xdp_access(off, size);
7772 case offsetof(struct xdp_md, data):
7773 info->reg_type = PTR_TO_PACKET;
7775 case offsetof(struct xdp_md, data_meta):
7776 info->reg_type = PTR_TO_PACKET_META;
7778 case offsetof(struct xdp_md, data_end):
7779 info->reg_type = PTR_TO_PACKET_END;
7783 return __is_valid_xdp_access(off, size);
7786 void bpf_warn_invalid_xdp_action(u32 act)
7788 const u32 act_max = XDP_REDIRECT;
7790 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
7791 act > act_max ? "Illegal" : "Driver unsupported",
7794 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
7796 static bool sock_addr_is_valid_access(int off, int size,
7797 enum bpf_access_type type,
7798 const struct bpf_prog *prog,
7799 struct bpf_insn_access_aux *info)
7801 const int size_default = sizeof(__u32);
7803 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
7805 if (off % size != 0)
7808 /* Disallow access to IPv6 fields from IPv4 contex and vise
7812 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
7813 switch (prog->expected_attach_type) {
7814 case BPF_CGROUP_INET4_BIND:
7815 case BPF_CGROUP_INET4_CONNECT:
7816 case BPF_CGROUP_INET4_GETPEERNAME:
7817 case BPF_CGROUP_INET4_GETSOCKNAME:
7818 case BPF_CGROUP_UDP4_SENDMSG:
7819 case BPF_CGROUP_UDP4_RECVMSG:
7825 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7826 switch (prog->expected_attach_type) {
7827 case BPF_CGROUP_INET6_BIND:
7828 case BPF_CGROUP_INET6_CONNECT:
7829 case BPF_CGROUP_INET6_GETPEERNAME:
7830 case BPF_CGROUP_INET6_GETSOCKNAME:
7831 case BPF_CGROUP_UDP6_SENDMSG:
7832 case BPF_CGROUP_UDP6_RECVMSG:
7838 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
7839 switch (prog->expected_attach_type) {
7840 case BPF_CGROUP_UDP4_SENDMSG:
7846 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7848 switch (prog->expected_attach_type) {
7849 case BPF_CGROUP_UDP6_SENDMSG:
7858 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
7859 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7860 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
7861 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7863 case bpf_ctx_range(struct bpf_sock_addr, user_port):
7864 if (type == BPF_READ) {
7865 bpf_ctx_record_field_size(info, size_default);
7867 if (bpf_ctx_wide_access_ok(off, size,
7868 struct bpf_sock_addr,
7872 if (bpf_ctx_wide_access_ok(off, size,
7873 struct bpf_sock_addr,
7877 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
7880 if (bpf_ctx_wide_access_ok(off, size,
7881 struct bpf_sock_addr,
7885 if (bpf_ctx_wide_access_ok(off, size,
7886 struct bpf_sock_addr,
7890 if (size != size_default)
7894 case offsetof(struct bpf_sock_addr, sk):
7895 if (type != BPF_READ)
7897 if (size != sizeof(__u64))
7899 info->reg_type = PTR_TO_SOCKET;
7902 if (type == BPF_READ) {
7903 if (size != size_default)
7913 static bool sock_ops_is_valid_access(int off, int size,
7914 enum bpf_access_type type,
7915 const struct bpf_prog *prog,
7916 struct bpf_insn_access_aux *info)
7918 const int size_default = sizeof(__u32);
7920 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
7923 /* The verifier guarantees that size > 0. */
7924 if (off % size != 0)
7927 if (type == BPF_WRITE) {
7929 case offsetof(struct bpf_sock_ops, reply):
7930 case offsetof(struct bpf_sock_ops, sk_txhash):
7931 if (size != size_default)
7939 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
7941 if (size != sizeof(__u64))
7944 case offsetof(struct bpf_sock_ops, sk):
7945 if (size != sizeof(__u64))
7947 info->reg_type = PTR_TO_SOCKET_OR_NULL;
7949 case offsetof(struct bpf_sock_ops, skb_data):
7950 if (size != sizeof(__u64))
7952 info->reg_type = PTR_TO_PACKET;
7954 case offsetof(struct bpf_sock_ops, skb_data_end):
7955 if (size != sizeof(__u64))
7957 info->reg_type = PTR_TO_PACKET_END;
7959 case offsetof(struct bpf_sock_ops, skb_tcp_flags):
7960 bpf_ctx_record_field_size(info, size_default);
7961 return bpf_ctx_narrow_access_ok(off, size,
7964 if (size != size_default)
7973 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
7974 const struct bpf_prog *prog)
7976 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
7979 static bool sk_skb_is_valid_access(int off, int size,
7980 enum bpf_access_type type,
7981 const struct bpf_prog *prog,
7982 struct bpf_insn_access_aux *info)
7985 case bpf_ctx_range(struct __sk_buff, tc_classid):
7986 case bpf_ctx_range(struct __sk_buff, data_meta):
7987 case bpf_ctx_range(struct __sk_buff, tstamp):
7988 case bpf_ctx_range(struct __sk_buff, wire_len):
7992 if (type == BPF_WRITE) {
7994 case bpf_ctx_range(struct __sk_buff, tc_index):
7995 case bpf_ctx_range(struct __sk_buff, priority):
8003 case bpf_ctx_range(struct __sk_buff, mark):
8005 case bpf_ctx_range(struct __sk_buff, data):
8006 info->reg_type = PTR_TO_PACKET;
8008 case bpf_ctx_range(struct __sk_buff, data_end):
8009 info->reg_type = PTR_TO_PACKET_END;
8013 return bpf_skb_is_valid_access(off, size, type, prog, info);
8016 static bool sk_msg_is_valid_access(int off, int size,
8017 enum bpf_access_type type,
8018 const struct bpf_prog *prog,
8019 struct bpf_insn_access_aux *info)
8021 if (type == BPF_WRITE)
8024 if (off % size != 0)
8028 case offsetof(struct sk_msg_md, data):
8029 info->reg_type = PTR_TO_PACKET;
8030 if (size != sizeof(__u64))
8033 case offsetof(struct sk_msg_md, data_end):
8034 info->reg_type = PTR_TO_PACKET_END;
8035 if (size != sizeof(__u64))
8038 case offsetof(struct sk_msg_md, sk):
8039 if (size != sizeof(__u64))
8041 info->reg_type = PTR_TO_SOCKET;
8043 case bpf_ctx_range(struct sk_msg_md, family):
8044 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
8045 case bpf_ctx_range(struct sk_msg_md, local_ip4):
8046 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
8047 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
8048 case bpf_ctx_range(struct sk_msg_md, remote_port):
8049 case bpf_ctx_range(struct sk_msg_md, local_port):
8050 case bpf_ctx_range(struct sk_msg_md, size):
8051 if (size != sizeof(__u32))
8060 static bool flow_dissector_is_valid_access(int off, int size,
8061 enum bpf_access_type type,
8062 const struct bpf_prog *prog,
8063 struct bpf_insn_access_aux *info)
8065 const int size_default = sizeof(__u32);
8067 if (off < 0 || off >= sizeof(struct __sk_buff))
8070 if (type == BPF_WRITE)
8074 case bpf_ctx_range(struct __sk_buff, data):
8075 if (size != size_default)
8077 info->reg_type = PTR_TO_PACKET;
8079 case bpf_ctx_range(struct __sk_buff, data_end):
8080 if (size != size_default)
8082 info->reg_type = PTR_TO_PACKET_END;
8084 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
8085 if (size != sizeof(__u64))
8087 info->reg_type = PTR_TO_FLOW_KEYS;
8094 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
8095 const struct bpf_insn *si,
8096 struct bpf_insn *insn_buf,
8097 struct bpf_prog *prog,
8101 struct bpf_insn *insn = insn_buf;
8104 case offsetof(struct __sk_buff, data):
8105 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
8106 si->dst_reg, si->src_reg,
8107 offsetof(struct bpf_flow_dissector, data));
8110 case offsetof(struct __sk_buff, data_end):
8111 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
8112 si->dst_reg, si->src_reg,
8113 offsetof(struct bpf_flow_dissector, data_end));
8116 case offsetof(struct __sk_buff, flow_keys):
8117 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
8118 si->dst_reg, si->src_reg,
8119 offsetof(struct bpf_flow_dissector, flow_keys));
8123 return insn - insn_buf;
8126 static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
8127 struct bpf_insn *insn)
8129 /* si->dst_reg = skb_shinfo(SKB); */
8130 #ifdef NET_SKBUFF_DATA_USES_OFFSET
8131 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8132 BPF_REG_AX, si->src_reg,
8133 offsetof(struct sk_buff, end));
8134 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
8135 si->dst_reg, si->src_reg,
8136 offsetof(struct sk_buff, head));
8137 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
8139 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8140 si->dst_reg, si->src_reg,
8141 offsetof(struct sk_buff, end));
8147 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
8148 const struct bpf_insn *si,
8149 struct bpf_insn *insn_buf,
8150 struct bpf_prog *prog, u32 *target_size)
8152 struct bpf_insn *insn = insn_buf;
8156 case offsetof(struct __sk_buff, len):
8157 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8158 bpf_target_off(struct sk_buff, len, 4,
8162 case offsetof(struct __sk_buff, protocol):
8163 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8164 bpf_target_off(struct sk_buff, protocol, 2,
8168 case offsetof(struct __sk_buff, vlan_proto):
8169 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8170 bpf_target_off(struct sk_buff, vlan_proto, 2,
8174 case offsetof(struct __sk_buff, priority):
8175 if (type == BPF_WRITE)
8176 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8177 bpf_target_off(struct sk_buff, priority, 4,
8180 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8181 bpf_target_off(struct sk_buff, priority, 4,
8185 case offsetof(struct __sk_buff, ingress_ifindex):
8186 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8187 bpf_target_off(struct sk_buff, skb_iif, 4,
8191 case offsetof(struct __sk_buff, ifindex):
8192 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
8193 si->dst_reg, si->src_reg,
8194 offsetof(struct sk_buff, dev));
8195 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8196 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8197 bpf_target_off(struct net_device, ifindex, 4,
8201 case offsetof(struct __sk_buff, hash):
8202 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8203 bpf_target_off(struct sk_buff, hash, 4,
8207 case offsetof(struct __sk_buff, mark):
8208 if (type == BPF_WRITE)
8209 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8210 bpf_target_off(struct sk_buff, mark, 4,
8213 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8214 bpf_target_off(struct sk_buff, mark, 4,
8218 case offsetof(struct __sk_buff, pkt_type):
8220 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
8222 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
8223 #ifdef __BIG_ENDIAN_BITFIELD
8224 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
8228 case offsetof(struct __sk_buff, queue_mapping):
8229 if (type == BPF_WRITE) {
8230 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
8231 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
8232 bpf_target_off(struct sk_buff,
8236 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8237 bpf_target_off(struct sk_buff,
8243 case offsetof(struct __sk_buff, vlan_present):
8245 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
8246 PKT_VLAN_PRESENT_OFFSET());
8247 if (PKT_VLAN_PRESENT_BIT)
8248 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
8249 if (PKT_VLAN_PRESENT_BIT < 7)
8250 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
8253 case offsetof(struct __sk_buff, vlan_tci):
8254 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8255 bpf_target_off(struct sk_buff, vlan_tci, 2,
8259 case offsetof(struct __sk_buff, cb[0]) ...
8260 offsetofend(struct __sk_buff, cb[4]) - 1:
8261 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
8262 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
8263 offsetof(struct qdisc_skb_cb, data)) %
8266 prog->cb_access = 1;
8268 off -= offsetof(struct __sk_buff, cb[0]);
8269 off += offsetof(struct sk_buff, cb);
8270 off += offsetof(struct qdisc_skb_cb, data);
8271 if (type == BPF_WRITE)
8272 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
8275 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
8279 case offsetof(struct __sk_buff, tc_classid):
8280 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
8283 off -= offsetof(struct __sk_buff, tc_classid);
8284 off += offsetof(struct sk_buff, cb);
8285 off += offsetof(struct qdisc_skb_cb, tc_classid);
8287 if (type == BPF_WRITE)
8288 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
8291 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
8295 case offsetof(struct __sk_buff, data):
8296 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
8297 si->dst_reg, si->src_reg,
8298 offsetof(struct sk_buff, data));
8301 case offsetof(struct __sk_buff, data_meta):
8303 off -= offsetof(struct __sk_buff, data_meta);
8304 off += offsetof(struct sk_buff, cb);
8305 off += offsetof(struct bpf_skb_data_end, data_meta);
8306 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8310 case offsetof(struct __sk_buff, data_end):
8312 off -= offsetof(struct __sk_buff, data_end);
8313 off += offsetof(struct sk_buff, cb);
8314 off += offsetof(struct bpf_skb_data_end, data_end);
8315 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8319 case offsetof(struct __sk_buff, tc_index):
8320 #ifdef CONFIG_NET_SCHED
8321 if (type == BPF_WRITE)
8322 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
8323 bpf_target_off(struct sk_buff, tc_index, 2,
8326 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8327 bpf_target_off(struct sk_buff, tc_index, 2,
8331 if (type == BPF_WRITE)
8332 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
8334 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8338 case offsetof(struct __sk_buff, napi_id):
8339 #if defined(CONFIG_NET_RX_BUSY_POLL)
8340 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8341 bpf_target_off(struct sk_buff, napi_id, 4,
8343 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
8344 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8347 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8350 case offsetof(struct __sk_buff, family):
8351 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
8353 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8354 si->dst_reg, si->src_reg,
8355 offsetof(struct sk_buff, sk));
8356 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8357 bpf_target_off(struct sock_common,
8361 case offsetof(struct __sk_buff, remote_ip4):
8362 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
8364 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8365 si->dst_reg, si->src_reg,
8366 offsetof(struct sk_buff, sk));
8367 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8368 bpf_target_off(struct sock_common,
8372 case offsetof(struct __sk_buff, local_ip4):
8373 BUILD_BUG_ON(sizeof_field(struct sock_common,
8374 skc_rcv_saddr) != 4);
8376 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8377 si->dst_reg, si->src_reg,
8378 offsetof(struct sk_buff, sk));
8379 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8380 bpf_target_off(struct sock_common,
8384 case offsetof(struct __sk_buff, remote_ip6[0]) ...
8385 offsetof(struct __sk_buff, remote_ip6[3]):
8386 #if IS_ENABLED(CONFIG_IPV6)
8387 BUILD_BUG_ON(sizeof_field(struct sock_common,
8388 skc_v6_daddr.s6_addr32[0]) != 4);
8391 off -= offsetof(struct __sk_buff, remote_ip6[0]);
8393 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8394 si->dst_reg, si->src_reg,
8395 offsetof(struct sk_buff, sk));
8396 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8397 offsetof(struct sock_common,
8398 skc_v6_daddr.s6_addr32[0]) +
8401 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8404 case offsetof(struct __sk_buff, local_ip6[0]) ...
8405 offsetof(struct __sk_buff, local_ip6[3]):
8406 #if IS_ENABLED(CONFIG_IPV6)
8407 BUILD_BUG_ON(sizeof_field(struct sock_common,
8408 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8411 off -= offsetof(struct __sk_buff, local_ip6[0]);
8413 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8414 si->dst_reg, si->src_reg,
8415 offsetof(struct sk_buff, sk));
8416 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8417 offsetof(struct sock_common,
8418 skc_v6_rcv_saddr.s6_addr32[0]) +
8421 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8425 case offsetof(struct __sk_buff, remote_port):
8426 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
8428 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8429 si->dst_reg, si->src_reg,
8430 offsetof(struct sk_buff, sk));
8431 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8432 bpf_target_off(struct sock_common,
8435 #ifndef __BIG_ENDIAN_BITFIELD
8436 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8440 case offsetof(struct __sk_buff, local_port):
8441 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
8443 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8444 si->dst_reg, si->src_reg,
8445 offsetof(struct sk_buff, sk));
8446 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8447 bpf_target_off(struct sock_common,
8448 skc_num, 2, target_size));
8451 case offsetof(struct __sk_buff, tstamp):
8452 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
8454 if (type == BPF_WRITE)
8455 *insn++ = BPF_STX_MEM(BPF_DW,
8456 si->dst_reg, si->src_reg,
8457 bpf_target_off(struct sk_buff,
8461 *insn++ = BPF_LDX_MEM(BPF_DW,
8462 si->dst_reg, si->src_reg,
8463 bpf_target_off(struct sk_buff,
8468 case offsetof(struct __sk_buff, gso_segs):
8469 insn = bpf_convert_shinfo_access(si, insn);
8470 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
8471 si->dst_reg, si->dst_reg,
8472 bpf_target_off(struct skb_shared_info,
8476 case offsetof(struct __sk_buff, gso_size):
8477 insn = bpf_convert_shinfo_access(si, insn);
8478 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
8479 si->dst_reg, si->dst_reg,
8480 bpf_target_off(struct skb_shared_info,
8484 case offsetof(struct __sk_buff, wire_len):
8485 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
8488 off -= offsetof(struct __sk_buff, wire_len);
8489 off += offsetof(struct sk_buff, cb);
8490 off += offsetof(struct qdisc_skb_cb, pkt_len);
8492 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
8495 case offsetof(struct __sk_buff, sk):
8496 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8497 si->dst_reg, si->src_reg,
8498 offsetof(struct sk_buff, sk));
8502 return insn - insn_buf;
8505 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
8506 const struct bpf_insn *si,
8507 struct bpf_insn *insn_buf,
8508 struct bpf_prog *prog, u32 *target_size)
8510 struct bpf_insn *insn = insn_buf;
8514 case offsetof(struct bpf_sock, bound_dev_if):
8515 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
8517 if (type == BPF_WRITE)
8518 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8519 offsetof(struct sock, sk_bound_dev_if));
8521 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8522 offsetof(struct sock, sk_bound_dev_if));
8525 case offsetof(struct bpf_sock, mark):
8526 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
8528 if (type == BPF_WRITE)
8529 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8530 offsetof(struct sock, sk_mark));
8532 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8533 offsetof(struct sock, sk_mark));
8536 case offsetof(struct bpf_sock, priority):
8537 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
8539 if (type == BPF_WRITE)
8540 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8541 offsetof(struct sock, sk_priority));
8543 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8544 offsetof(struct sock, sk_priority));
8547 case offsetof(struct bpf_sock, family):
8548 *insn++ = BPF_LDX_MEM(
8549 BPF_FIELD_SIZEOF(struct sock_common, skc_family),
8550 si->dst_reg, si->src_reg,
8551 bpf_target_off(struct sock_common,
8553 sizeof_field(struct sock_common,
8558 case offsetof(struct bpf_sock, type):
8559 *insn++ = BPF_LDX_MEM(
8560 BPF_FIELD_SIZEOF(struct sock, sk_type),
8561 si->dst_reg, si->src_reg,
8562 bpf_target_off(struct sock, sk_type,
8563 sizeof_field(struct sock, sk_type),
8567 case offsetof(struct bpf_sock, protocol):
8568 *insn++ = BPF_LDX_MEM(
8569 BPF_FIELD_SIZEOF(struct sock, sk_protocol),
8570 si->dst_reg, si->src_reg,
8571 bpf_target_off(struct sock, sk_protocol,
8572 sizeof_field(struct sock, sk_protocol),
8576 case offsetof(struct bpf_sock, src_ip4):
8577 *insn++ = BPF_LDX_MEM(
8578 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8579 bpf_target_off(struct sock_common, skc_rcv_saddr,
8580 sizeof_field(struct sock_common,
8585 case offsetof(struct bpf_sock, dst_ip4):
8586 *insn++ = BPF_LDX_MEM(
8587 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8588 bpf_target_off(struct sock_common, skc_daddr,
8589 sizeof_field(struct sock_common,
8594 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
8595 #if IS_ENABLED(CONFIG_IPV6)
8597 off -= offsetof(struct bpf_sock, src_ip6[0]);
8598 *insn++ = BPF_LDX_MEM(
8599 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8602 skc_v6_rcv_saddr.s6_addr32[0],
8603 sizeof_field(struct sock_common,
8604 skc_v6_rcv_saddr.s6_addr32[0]),
8605 target_size) + off);
8608 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8612 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
8613 #if IS_ENABLED(CONFIG_IPV6)
8615 off -= offsetof(struct bpf_sock, dst_ip6[0]);
8616 *insn++ = BPF_LDX_MEM(
8617 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8618 bpf_target_off(struct sock_common,
8619 skc_v6_daddr.s6_addr32[0],
8620 sizeof_field(struct sock_common,
8621 skc_v6_daddr.s6_addr32[0]),
8622 target_size) + off);
8624 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8629 case offsetof(struct bpf_sock, src_port):
8630 *insn++ = BPF_LDX_MEM(
8631 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
8632 si->dst_reg, si->src_reg,
8633 bpf_target_off(struct sock_common, skc_num,
8634 sizeof_field(struct sock_common,
8639 case offsetof(struct bpf_sock, dst_port):
8640 *insn++ = BPF_LDX_MEM(
8641 BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
8642 si->dst_reg, si->src_reg,
8643 bpf_target_off(struct sock_common, skc_dport,
8644 sizeof_field(struct sock_common,
8649 case offsetof(struct bpf_sock, state):
8650 *insn++ = BPF_LDX_MEM(
8651 BPF_FIELD_SIZEOF(struct sock_common, skc_state),
8652 si->dst_reg, si->src_reg,
8653 bpf_target_off(struct sock_common, skc_state,
8654 sizeof_field(struct sock_common,
8658 case offsetof(struct bpf_sock, rx_queue_mapping):
8660 *insn++ = BPF_LDX_MEM(
8661 BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping),
8662 si->dst_reg, si->src_reg,
8663 bpf_target_off(struct sock, sk_rx_queue_mapping,
8664 sizeof_field(struct sock,
8665 sk_rx_queue_mapping),
8667 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING,
8669 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
8671 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
8677 return insn - insn_buf;
8680 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
8681 const struct bpf_insn *si,
8682 struct bpf_insn *insn_buf,
8683 struct bpf_prog *prog, u32 *target_size)
8685 struct bpf_insn *insn = insn_buf;
8688 case offsetof(struct __sk_buff, ifindex):
8689 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
8690 si->dst_reg, si->src_reg,
8691 offsetof(struct sk_buff, dev));
8692 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8693 bpf_target_off(struct net_device, ifindex, 4,
8697 return bpf_convert_ctx_access(type, si, insn_buf, prog,
8701 return insn - insn_buf;
8704 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
8705 const struct bpf_insn *si,
8706 struct bpf_insn *insn_buf,
8707 struct bpf_prog *prog, u32 *target_size)
8709 struct bpf_insn *insn = insn_buf;
8712 case offsetof(struct xdp_md, data):
8713 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
8714 si->dst_reg, si->src_reg,
8715 offsetof(struct xdp_buff, data));
8717 case offsetof(struct xdp_md, data_meta):
8718 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
8719 si->dst_reg, si->src_reg,
8720 offsetof(struct xdp_buff, data_meta));
8722 case offsetof(struct xdp_md, data_end):
8723 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
8724 si->dst_reg, si->src_reg,
8725 offsetof(struct xdp_buff, data_end));
8727 case offsetof(struct xdp_md, ingress_ifindex):
8728 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
8729 si->dst_reg, si->src_reg,
8730 offsetof(struct xdp_buff, rxq));
8731 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
8732 si->dst_reg, si->dst_reg,
8733 offsetof(struct xdp_rxq_info, dev));
8734 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8735 offsetof(struct net_device, ifindex));
8737 case offsetof(struct xdp_md, rx_queue_index):
8738 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
8739 si->dst_reg, si->src_reg,
8740 offsetof(struct xdp_buff, rxq));
8741 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8742 offsetof(struct xdp_rxq_info,
8745 case offsetof(struct xdp_md, egress_ifindex):
8746 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq),
8747 si->dst_reg, si->src_reg,
8748 offsetof(struct xdp_buff, txq));
8749 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev),
8750 si->dst_reg, si->dst_reg,
8751 offsetof(struct xdp_txq_info, dev));
8752 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8753 offsetof(struct net_device, ifindex));
8757 return insn - insn_buf;
8760 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
8761 * context Structure, F is Field in context structure that contains a pointer
8762 * to Nested Structure of type NS that has the field NF.
8764 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
8765 * sure that SIZE is not greater than actual size of S.F.NF.
8767 * If offset OFF is provided, the load happens from that offset relative to
8770 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
8772 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
8773 si->src_reg, offsetof(S, F)); \
8774 *insn++ = BPF_LDX_MEM( \
8775 SIZE, si->dst_reg, si->dst_reg, \
8776 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
8781 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
8782 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
8783 BPF_FIELD_SIZEOF(NS, NF), 0)
8785 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
8786 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
8788 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
8789 * "register" since two registers available in convert_ctx_access are not
8790 * enough: we can't override neither SRC, since it contains value to store, nor
8791 * DST since it contains pointer to context that may be used by later
8792 * instructions. But we need a temporary place to save pointer to nested
8793 * structure whose field we want to store to.
8795 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \
8797 int tmp_reg = BPF_REG_9; \
8798 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
8800 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
8802 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
8804 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
8805 si->dst_reg, offsetof(S, F)); \
8806 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
8807 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
8810 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
8814 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
8817 if (type == BPF_WRITE) { \
8818 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \
8821 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
8822 S, NS, F, NF, SIZE, OFF); \
8826 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
8827 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
8828 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
8830 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
8831 const struct bpf_insn *si,
8832 struct bpf_insn *insn_buf,
8833 struct bpf_prog *prog, u32 *target_size)
8835 int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
8836 struct bpf_insn *insn = insn_buf;
8839 case offsetof(struct bpf_sock_addr, user_family):
8840 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
8841 struct sockaddr, uaddr, sa_family);
8844 case offsetof(struct bpf_sock_addr, user_ip4):
8845 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
8846 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
8847 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
8850 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
8852 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
8853 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
8854 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
8855 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
8859 case offsetof(struct bpf_sock_addr, user_port):
8860 /* To get port we need to know sa_family first and then treat
8861 * sockaddr as either sockaddr_in or sockaddr_in6.
8862 * Though we can simplify since port field has same offset and
8863 * size in both structures.
8864 * Here we check this invariant and use just one of the
8865 * structures if it's true.
8867 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
8868 offsetof(struct sockaddr_in6, sin6_port));
8869 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
8870 sizeof_field(struct sockaddr_in6, sin6_port));
8871 /* Account for sin6_port being smaller than user_port. */
8872 port_size = min(port_size, BPF_LDST_BYTES(si));
8873 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
8874 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
8875 sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
8878 case offsetof(struct bpf_sock_addr, family):
8879 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
8880 struct sock, sk, sk_family);
8883 case offsetof(struct bpf_sock_addr, type):
8884 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
8885 struct sock, sk, sk_type);
8888 case offsetof(struct bpf_sock_addr, protocol):
8889 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
8890 struct sock, sk, sk_protocol);
8893 case offsetof(struct bpf_sock_addr, msg_src_ip4):
8894 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
8895 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
8896 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
8897 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
8900 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
8903 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
8904 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
8905 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
8906 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
8907 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
8909 case offsetof(struct bpf_sock_addr, sk):
8910 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
8911 si->dst_reg, si->src_reg,
8912 offsetof(struct bpf_sock_addr_kern, sk));
8916 return insn - insn_buf;
8919 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
8920 const struct bpf_insn *si,
8921 struct bpf_insn *insn_buf,
8922 struct bpf_prog *prog,
8925 struct bpf_insn *insn = insn_buf;
8928 /* Helper macro for adding read access to tcp_sock or sock fields. */
8929 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
8931 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
8932 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
8933 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
8934 if (si->dst_reg == reg || si->src_reg == reg) \
8936 if (si->dst_reg == reg || si->src_reg == reg) \
8938 if (si->dst_reg == si->src_reg) { \
8939 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
8940 offsetof(struct bpf_sock_ops_kern, \
8942 fullsock_reg = reg; \
8945 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8946 struct bpf_sock_ops_kern, \
8948 fullsock_reg, si->src_reg, \
8949 offsetof(struct bpf_sock_ops_kern, \
8951 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
8952 if (si->dst_reg == si->src_reg) \
8953 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
8954 offsetof(struct bpf_sock_ops_kern, \
8956 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8957 struct bpf_sock_ops_kern, sk),\
8958 si->dst_reg, si->src_reg, \
8959 offsetof(struct bpf_sock_ops_kern, sk));\
8960 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
8962 si->dst_reg, si->dst_reg, \
8963 offsetof(OBJ, OBJ_FIELD)); \
8964 if (si->dst_reg == si->src_reg) { \
8965 *insn++ = BPF_JMP_A(1); \
8966 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
8967 offsetof(struct bpf_sock_ops_kern, \
8972 #define SOCK_OPS_GET_SK() \
8974 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
8975 if (si->dst_reg == reg || si->src_reg == reg) \
8977 if (si->dst_reg == reg || si->src_reg == reg) \
8979 if (si->dst_reg == si->src_reg) { \
8980 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
8981 offsetof(struct bpf_sock_ops_kern, \
8983 fullsock_reg = reg; \
8986 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8987 struct bpf_sock_ops_kern, \
8989 fullsock_reg, si->src_reg, \
8990 offsetof(struct bpf_sock_ops_kern, \
8992 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
8993 if (si->dst_reg == si->src_reg) \
8994 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
8995 offsetof(struct bpf_sock_ops_kern, \
8997 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8998 struct bpf_sock_ops_kern, sk),\
8999 si->dst_reg, si->src_reg, \
9000 offsetof(struct bpf_sock_ops_kern, sk));\
9001 if (si->dst_reg == si->src_reg) { \
9002 *insn++ = BPF_JMP_A(1); \
9003 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
9004 offsetof(struct bpf_sock_ops_kern, \
9009 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
9010 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
9012 /* Helper macro for adding write access to tcp_sock or sock fields.
9013 * The macro is called with two registers, dst_reg which contains a pointer
9014 * to ctx (context) and src_reg which contains the value that should be
9015 * stored. However, we need an additional register since we cannot overwrite
9016 * dst_reg because it may be used later in the program.
9017 * Instead we "borrow" one of the other register. We first save its value
9018 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
9019 * it at the end of the macro.
9021 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
9023 int reg = BPF_REG_9; \
9024 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
9025 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
9026 if (si->dst_reg == reg || si->src_reg == reg) \
9028 if (si->dst_reg == reg || si->src_reg == reg) \
9030 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
9031 offsetof(struct bpf_sock_ops_kern, \
9033 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9034 struct bpf_sock_ops_kern, \
9037 offsetof(struct bpf_sock_ops_kern, \
9039 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
9040 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9041 struct bpf_sock_ops_kern, sk),\
9043 offsetof(struct bpf_sock_ops_kern, sk));\
9044 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
9046 offsetof(OBJ, OBJ_FIELD)); \
9047 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
9048 offsetof(struct bpf_sock_ops_kern, \
9052 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
9054 if (TYPE == BPF_WRITE) \
9055 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
9057 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
9060 if (insn > insn_buf)
9061 return insn - insn_buf;
9064 case offsetof(struct bpf_sock_ops, op):
9065 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9067 si->dst_reg, si->src_reg,
9068 offsetof(struct bpf_sock_ops_kern, op));
9071 case offsetof(struct bpf_sock_ops, replylong[0]) ...
9072 offsetof(struct bpf_sock_ops, replylong[3]):
9073 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
9074 sizeof_field(struct bpf_sock_ops_kern, reply));
9075 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
9076 sizeof_field(struct bpf_sock_ops_kern, replylong));
9078 off -= offsetof(struct bpf_sock_ops, replylong[0]);
9079 off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
9080 if (type == BPF_WRITE)
9081 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
9084 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
9088 case offsetof(struct bpf_sock_ops, family):
9089 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
9091 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9092 struct bpf_sock_ops_kern, sk),
9093 si->dst_reg, si->src_reg,
9094 offsetof(struct bpf_sock_ops_kern, sk));
9095 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9096 offsetof(struct sock_common, skc_family));
9099 case offsetof(struct bpf_sock_ops, remote_ip4):
9100 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
9102 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9103 struct bpf_sock_ops_kern, sk),
9104 si->dst_reg, si->src_reg,
9105 offsetof(struct bpf_sock_ops_kern, sk));
9106 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9107 offsetof(struct sock_common, skc_daddr));
9110 case offsetof(struct bpf_sock_ops, local_ip4):
9111 BUILD_BUG_ON(sizeof_field(struct sock_common,
9112 skc_rcv_saddr) != 4);
9114 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9115 struct bpf_sock_ops_kern, sk),
9116 si->dst_reg, si->src_reg,
9117 offsetof(struct bpf_sock_ops_kern, sk));
9118 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9119 offsetof(struct sock_common,
9123 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
9124 offsetof(struct bpf_sock_ops, remote_ip6[3]):
9125 #if IS_ENABLED(CONFIG_IPV6)
9126 BUILD_BUG_ON(sizeof_field(struct sock_common,
9127 skc_v6_daddr.s6_addr32[0]) != 4);
9130 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
9131 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9132 struct bpf_sock_ops_kern, sk),
9133 si->dst_reg, si->src_reg,
9134 offsetof(struct bpf_sock_ops_kern, sk));
9135 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9136 offsetof(struct sock_common,
9137 skc_v6_daddr.s6_addr32[0]) +
9140 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9144 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
9145 offsetof(struct bpf_sock_ops, local_ip6[3]):
9146 #if IS_ENABLED(CONFIG_IPV6)
9147 BUILD_BUG_ON(sizeof_field(struct sock_common,
9148 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9151 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
9152 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9153 struct bpf_sock_ops_kern, sk),
9154 si->dst_reg, si->src_reg,
9155 offsetof(struct bpf_sock_ops_kern, sk));
9156 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9157 offsetof(struct sock_common,
9158 skc_v6_rcv_saddr.s6_addr32[0]) +
9161 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9165 case offsetof(struct bpf_sock_ops, remote_port):
9166 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
9168 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9169 struct bpf_sock_ops_kern, sk),
9170 si->dst_reg, si->src_reg,
9171 offsetof(struct bpf_sock_ops_kern, sk));
9172 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9173 offsetof(struct sock_common, skc_dport));
9174 #ifndef __BIG_ENDIAN_BITFIELD
9175 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9179 case offsetof(struct bpf_sock_ops, local_port):
9180 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
9182 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9183 struct bpf_sock_ops_kern, sk),
9184 si->dst_reg, si->src_reg,
9185 offsetof(struct bpf_sock_ops_kern, sk));
9186 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9187 offsetof(struct sock_common, skc_num));
9190 case offsetof(struct bpf_sock_ops, is_fullsock):
9191 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9192 struct bpf_sock_ops_kern,
9194 si->dst_reg, si->src_reg,
9195 offsetof(struct bpf_sock_ops_kern,
9199 case offsetof(struct bpf_sock_ops, state):
9200 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
9202 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9203 struct bpf_sock_ops_kern, sk),
9204 si->dst_reg, si->src_reg,
9205 offsetof(struct bpf_sock_ops_kern, sk));
9206 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
9207 offsetof(struct sock_common, skc_state));
9210 case offsetof(struct bpf_sock_ops, rtt_min):
9211 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
9212 sizeof(struct minmax));
9213 BUILD_BUG_ON(sizeof(struct minmax) <
9214 sizeof(struct minmax_sample));
9216 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9217 struct bpf_sock_ops_kern, sk),
9218 si->dst_reg, si->src_reg,
9219 offsetof(struct bpf_sock_ops_kern, sk));
9220 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9221 offsetof(struct tcp_sock, rtt_min) +
9222 sizeof_field(struct minmax_sample, t));
9225 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
9226 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
9230 case offsetof(struct bpf_sock_ops, sk_txhash):
9231 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
9234 case offsetof(struct bpf_sock_ops, snd_cwnd):
9235 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
9237 case offsetof(struct bpf_sock_ops, srtt_us):
9238 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
9240 case offsetof(struct bpf_sock_ops, snd_ssthresh):
9241 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
9243 case offsetof(struct bpf_sock_ops, rcv_nxt):
9244 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
9246 case offsetof(struct bpf_sock_ops, snd_nxt):
9247 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
9249 case offsetof(struct bpf_sock_ops, snd_una):
9250 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
9252 case offsetof(struct bpf_sock_ops, mss_cache):
9253 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
9255 case offsetof(struct bpf_sock_ops, ecn_flags):
9256 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
9258 case offsetof(struct bpf_sock_ops, rate_delivered):
9259 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
9261 case offsetof(struct bpf_sock_ops, rate_interval_us):
9262 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
9264 case offsetof(struct bpf_sock_ops, packets_out):
9265 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
9267 case offsetof(struct bpf_sock_ops, retrans_out):
9268 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
9270 case offsetof(struct bpf_sock_ops, total_retrans):
9271 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
9273 case offsetof(struct bpf_sock_ops, segs_in):
9274 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
9276 case offsetof(struct bpf_sock_ops, data_segs_in):
9277 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
9279 case offsetof(struct bpf_sock_ops, segs_out):
9280 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
9282 case offsetof(struct bpf_sock_ops, data_segs_out):
9283 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
9285 case offsetof(struct bpf_sock_ops, lost_out):
9286 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
9288 case offsetof(struct bpf_sock_ops, sacked_out):
9289 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
9291 case offsetof(struct bpf_sock_ops, bytes_received):
9292 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
9294 case offsetof(struct bpf_sock_ops, bytes_acked):
9295 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
9297 case offsetof(struct bpf_sock_ops, sk):
9300 case offsetof(struct bpf_sock_ops, skb_data_end):
9301 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9303 si->dst_reg, si->src_reg,
9304 offsetof(struct bpf_sock_ops_kern,
9307 case offsetof(struct bpf_sock_ops, skb_data):
9308 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9310 si->dst_reg, si->src_reg,
9311 offsetof(struct bpf_sock_ops_kern,
9313 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9314 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
9315 si->dst_reg, si->dst_reg,
9316 offsetof(struct sk_buff, data));
9318 case offsetof(struct bpf_sock_ops, skb_len):
9319 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9321 si->dst_reg, si->src_reg,
9322 offsetof(struct bpf_sock_ops_kern,
9324 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9325 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
9326 si->dst_reg, si->dst_reg,
9327 offsetof(struct sk_buff, len));
9329 case offsetof(struct bpf_sock_ops, skb_tcp_flags):
9330 off = offsetof(struct sk_buff, cb);
9331 off += offsetof(struct tcp_skb_cb, tcp_flags);
9332 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags);
9333 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9335 si->dst_reg, si->src_reg,
9336 offsetof(struct bpf_sock_ops_kern,
9338 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9339 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb,
9341 si->dst_reg, si->dst_reg, off);
9344 return insn - insn_buf;
9347 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
9348 const struct bpf_insn *si,
9349 struct bpf_insn *insn_buf,
9350 struct bpf_prog *prog, u32 *target_size)
9352 struct bpf_insn *insn = insn_buf;
9356 case offsetof(struct __sk_buff, data_end):
9358 off -= offsetof(struct __sk_buff, data_end);
9359 off += offsetof(struct sk_buff, cb);
9360 off += offsetof(struct tcp_skb_cb, bpf.data_end);
9361 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
9365 return bpf_convert_ctx_access(type, si, insn_buf, prog,
9369 return insn - insn_buf;
9372 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
9373 const struct bpf_insn *si,
9374 struct bpf_insn *insn_buf,
9375 struct bpf_prog *prog, u32 *target_size)
9377 struct bpf_insn *insn = insn_buf;
9378 #if IS_ENABLED(CONFIG_IPV6)
9382 /* convert ctx uses the fact sg element is first in struct */
9383 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
9386 case offsetof(struct sk_msg_md, data):
9387 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
9388 si->dst_reg, si->src_reg,
9389 offsetof(struct sk_msg, data));
9391 case offsetof(struct sk_msg_md, data_end):
9392 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
9393 si->dst_reg, si->src_reg,
9394 offsetof(struct sk_msg, data_end));
9396 case offsetof(struct sk_msg_md, family):
9397 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
9399 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9401 si->dst_reg, si->src_reg,
9402 offsetof(struct sk_msg, sk));
9403 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9404 offsetof(struct sock_common, skc_family));
9407 case offsetof(struct sk_msg_md, remote_ip4):
9408 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
9410 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9412 si->dst_reg, si->src_reg,
9413 offsetof(struct sk_msg, sk));
9414 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9415 offsetof(struct sock_common, skc_daddr));
9418 case offsetof(struct sk_msg_md, local_ip4):
9419 BUILD_BUG_ON(sizeof_field(struct sock_common,
9420 skc_rcv_saddr) != 4);
9422 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9424 si->dst_reg, si->src_reg,
9425 offsetof(struct sk_msg, sk));
9426 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9427 offsetof(struct sock_common,
9431 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
9432 offsetof(struct sk_msg_md, remote_ip6[3]):
9433 #if IS_ENABLED(CONFIG_IPV6)
9434 BUILD_BUG_ON(sizeof_field(struct sock_common,
9435 skc_v6_daddr.s6_addr32[0]) != 4);
9438 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
9439 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9441 si->dst_reg, si->src_reg,
9442 offsetof(struct sk_msg, sk));
9443 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9444 offsetof(struct sock_common,
9445 skc_v6_daddr.s6_addr32[0]) +
9448 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9452 case offsetof(struct sk_msg_md, local_ip6[0]) ...
9453 offsetof(struct sk_msg_md, local_ip6[3]):
9454 #if IS_ENABLED(CONFIG_IPV6)
9455 BUILD_BUG_ON(sizeof_field(struct sock_common,
9456 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9459 off -= offsetof(struct sk_msg_md, local_ip6[0]);
9460 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9462 si->dst_reg, si->src_reg,
9463 offsetof(struct sk_msg, sk));
9464 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9465 offsetof(struct sock_common,
9466 skc_v6_rcv_saddr.s6_addr32[0]) +
9469 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9473 case offsetof(struct sk_msg_md, remote_port):
9474 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
9476 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9478 si->dst_reg, si->src_reg,
9479 offsetof(struct sk_msg, sk));
9480 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9481 offsetof(struct sock_common, skc_dport));
9482 #ifndef __BIG_ENDIAN_BITFIELD
9483 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9487 case offsetof(struct sk_msg_md, local_port):
9488 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
9490 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9492 si->dst_reg, si->src_reg,
9493 offsetof(struct sk_msg, sk));
9494 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9495 offsetof(struct sock_common, skc_num));
9498 case offsetof(struct sk_msg_md, size):
9499 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
9500 si->dst_reg, si->src_reg,
9501 offsetof(struct sk_msg_sg, size));
9504 case offsetof(struct sk_msg_md, sk):
9505 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk),
9506 si->dst_reg, si->src_reg,
9507 offsetof(struct sk_msg, sk));
9511 return insn - insn_buf;
9514 const struct bpf_verifier_ops sk_filter_verifier_ops = {
9515 .get_func_proto = sk_filter_func_proto,
9516 .is_valid_access = sk_filter_is_valid_access,
9517 .convert_ctx_access = bpf_convert_ctx_access,
9518 .gen_ld_abs = bpf_gen_ld_abs,
9521 const struct bpf_prog_ops sk_filter_prog_ops = {
9522 .test_run = bpf_prog_test_run_skb,
9525 const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
9526 .get_func_proto = tc_cls_act_func_proto,
9527 .is_valid_access = tc_cls_act_is_valid_access,
9528 .convert_ctx_access = tc_cls_act_convert_ctx_access,
9529 .gen_prologue = tc_cls_act_prologue,
9530 .gen_ld_abs = bpf_gen_ld_abs,
9533 const struct bpf_prog_ops tc_cls_act_prog_ops = {
9534 .test_run = bpf_prog_test_run_skb,
9537 const struct bpf_verifier_ops xdp_verifier_ops = {
9538 .get_func_proto = xdp_func_proto,
9539 .is_valid_access = xdp_is_valid_access,
9540 .convert_ctx_access = xdp_convert_ctx_access,
9541 .gen_prologue = bpf_noop_prologue,
9544 const struct bpf_prog_ops xdp_prog_ops = {
9545 .test_run = bpf_prog_test_run_xdp,
9548 const struct bpf_verifier_ops cg_skb_verifier_ops = {
9549 .get_func_proto = cg_skb_func_proto,
9550 .is_valid_access = cg_skb_is_valid_access,
9551 .convert_ctx_access = bpf_convert_ctx_access,
9554 const struct bpf_prog_ops cg_skb_prog_ops = {
9555 .test_run = bpf_prog_test_run_skb,
9558 const struct bpf_verifier_ops lwt_in_verifier_ops = {
9559 .get_func_proto = lwt_in_func_proto,
9560 .is_valid_access = lwt_is_valid_access,
9561 .convert_ctx_access = bpf_convert_ctx_access,
9564 const struct bpf_prog_ops lwt_in_prog_ops = {
9565 .test_run = bpf_prog_test_run_skb,
9568 const struct bpf_verifier_ops lwt_out_verifier_ops = {
9569 .get_func_proto = lwt_out_func_proto,
9570 .is_valid_access = lwt_is_valid_access,
9571 .convert_ctx_access = bpf_convert_ctx_access,
9574 const struct bpf_prog_ops lwt_out_prog_ops = {
9575 .test_run = bpf_prog_test_run_skb,
9578 const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
9579 .get_func_proto = lwt_xmit_func_proto,
9580 .is_valid_access = lwt_is_valid_access,
9581 .convert_ctx_access = bpf_convert_ctx_access,
9582 .gen_prologue = tc_cls_act_prologue,
9585 const struct bpf_prog_ops lwt_xmit_prog_ops = {
9586 .test_run = bpf_prog_test_run_skb,
9589 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
9590 .get_func_proto = lwt_seg6local_func_proto,
9591 .is_valid_access = lwt_is_valid_access,
9592 .convert_ctx_access = bpf_convert_ctx_access,
9595 const struct bpf_prog_ops lwt_seg6local_prog_ops = {
9596 .test_run = bpf_prog_test_run_skb,
9599 const struct bpf_verifier_ops cg_sock_verifier_ops = {
9600 .get_func_proto = sock_filter_func_proto,
9601 .is_valid_access = sock_filter_is_valid_access,
9602 .convert_ctx_access = bpf_sock_convert_ctx_access,
9605 const struct bpf_prog_ops cg_sock_prog_ops = {
9608 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
9609 .get_func_proto = sock_addr_func_proto,
9610 .is_valid_access = sock_addr_is_valid_access,
9611 .convert_ctx_access = sock_addr_convert_ctx_access,
9614 const struct bpf_prog_ops cg_sock_addr_prog_ops = {
9617 const struct bpf_verifier_ops sock_ops_verifier_ops = {
9618 .get_func_proto = sock_ops_func_proto,
9619 .is_valid_access = sock_ops_is_valid_access,
9620 .convert_ctx_access = sock_ops_convert_ctx_access,
9623 const struct bpf_prog_ops sock_ops_prog_ops = {
9626 const struct bpf_verifier_ops sk_skb_verifier_ops = {
9627 .get_func_proto = sk_skb_func_proto,
9628 .is_valid_access = sk_skb_is_valid_access,
9629 .convert_ctx_access = sk_skb_convert_ctx_access,
9630 .gen_prologue = sk_skb_prologue,
9633 const struct bpf_prog_ops sk_skb_prog_ops = {
9636 const struct bpf_verifier_ops sk_msg_verifier_ops = {
9637 .get_func_proto = sk_msg_func_proto,
9638 .is_valid_access = sk_msg_is_valid_access,
9639 .convert_ctx_access = sk_msg_convert_ctx_access,
9640 .gen_prologue = bpf_noop_prologue,
9643 const struct bpf_prog_ops sk_msg_prog_ops = {
9646 const struct bpf_verifier_ops flow_dissector_verifier_ops = {
9647 .get_func_proto = flow_dissector_func_proto,
9648 .is_valid_access = flow_dissector_is_valid_access,
9649 .convert_ctx_access = flow_dissector_convert_ctx_access,
9652 const struct bpf_prog_ops flow_dissector_prog_ops = {
9653 .test_run = bpf_prog_test_run_flow_dissector,
9656 int sk_detach_filter(struct sock *sk)
9659 struct sk_filter *filter;
9661 if (sock_flag(sk, SOCK_FILTER_LOCKED))
9664 filter = rcu_dereference_protected(sk->sk_filter,
9665 lockdep_sock_is_held(sk));
9667 RCU_INIT_POINTER(sk->sk_filter, NULL);
9668 sk_filter_uncharge(sk, filter);
9674 EXPORT_SYMBOL_GPL(sk_detach_filter);
9676 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
9679 struct sock_fprog_kern *fprog;
9680 struct sk_filter *filter;
9684 filter = rcu_dereference_protected(sk->sk_filter,
9685 lockdep_sock_is_held(sk));
9689 /* We're copying the filter that has been originally attached,
9690 * so no conversion/decode needed anymore. eBPF programs that
9691 * have no original program cannot be dumped through this.
9694 fprog = filter->prog->orig_prog;
9700 /* User space only enquires number of filter blocks. */
9704 if (len < fprog->len)
9708 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
9711 /* Instead of bytes, the API requests to return the number
9721 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
9722 struct sock_reuseport *reuse,
9723 struct sock *sk, struct sk_buff *skb,
9726 reuse_kern->skb = skb;
9727 reuse_kern->sk = sk;
9728 reuse_kern->selected_sk = NULL;
9729 reuse_kern->data_end = skb->data + skb_headlen(skb);
9730 reuse_kern->hash = hash;
9731 reuse_kern->reuseport_id = reuse->reuseport_id;
9732 reuse_kern->bind_inany = reuse->bind_inany;
9735 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
9736 struct bpf_prog *prog, struct sk_buff *skb,
9739 struct sk_reuseport_kern reuse_kern;
9740 enum sk_action action;
9742 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
9743 action = BPF_PROG_RUN(prog, &reuse_kern);
9745 if (action == SK_PASS)
9746 return reuse_kern.selected_sk;
9748 return ERR_PTR(-ECONNREFUSED);
9751 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
9752 struct bpf_map *, map, void *, key, u32, flags)
9754 bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
9755 struct sock_reuseport *reuse;
9756 struct sock *selected_sk;
9758 selected_sk = map->ops->map_lookup_elem(map, key);
9762 reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
9764 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
9765 if (sk_is_refcounted(selected_sk))
9766 sock_put(selected_sk);
9768 /* reuseport_array has only sk with non NULL sk_reuseport_cb.
9769 * The only (!reuse) case here is - the sk has already been
9770 * unhashed (e.g. by close()), so treat it as -ENOENT.
9772 * Other maps (e.g. sock_map) do not provide this guarantee and
9773 * the sk may never be in the reuseport group to begin with.
9775 return is_sockarray ? -ENOENT : -EINVAL;
9778 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
9779 struct sock *sk = reuse_kern->sk;
9781 if (sk->sk_protocol != selected_sk->sk_protocol)
9783 else if (sk->sk_family != selected_sk->sk_family)
9784 return -EAFNOSUPPORT;
9786 /* Catch all. Likely bound to a different sockaddr. */
9790 reuse_kern->selected_sk = selected_sk;
9795 static const struct bpf_func_proto sk_select_reuseport_proto = {
9796 .func = sk_select_reuseport,
9798 .ret_type = RET_INTEGER,
9799 .arg1_type = ARG_PTR_TO_CTX,
9800 .arg2_type = ARG_CONST_MAP_PTR,
9801 .arg3_type = ARG_PTR_TO_MAP_KEY,
9802 .arg4_type = ARG_ANYTHING,
9805 BPF_CALL_4(sk_reuseport_load_bytes,
9806 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
9807 void *, to, u32, len)
9809 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
9812 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
9813 .func = sk_reuseport_load_bytes,
9815 .ret_type = RET_INTEGER,
9816 .arg1_type = ARG_PTR_TO_CTX,
9817 .arg2_type = ARG_ANYTHING,
9818 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
9819 .arg4_type = ARG_CONST_SIZE,
9822 BPF_CALL_5(sk_reuseport_load_bytes_relative,
9823 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
9824 void *, to, u32, len, u32, start_header)
9826 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
9830 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
9831 .func = sk_reuseport_load_bytes_relative,
9833 .ret_type = RET_INTEGER,
9834 .arg1_type = ARG_PTR_TO_CTX,
9835 .arg2_type = ARG_ANYTHING,
9836 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
9837 .arg4_type = ARG_CONST_SIZE,
9838 .arg5_type = ARG_ANYTHING,
9841 static const struct bpf_func_proto *
9842 sk_reuseport_func_proto(enum bpf_func_id func_id,
9843 const struct bpf_prog *prog)
9846 case BPF_FUNC_sk_select_reuseport:
9847 return &sk_select_reuseport_proto;
9848 case BPF_FUNC_skb_load_bytes:
9849 return &sk_reuseport_load_bytes_proto;
9850 case BPF_FUNC_skb_load_bytes_relative:
9851 return &sk_reuseport_load_bytes_relative_proto;
9853 return bpf_base_func_proto(func_id);
9858 sk_reuseport_is_valid_access(int off, int size,
9859 enum bpf_access_type type,
9860 const struct bpf_prog *prog,
9861 struct bpf_insn_access_aux *info)
9863 const u32 size_default = sizeof(__u32);
9865 if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
9866 off % size || type != BPF_READ)
9870 case offsetof(struct sk_reuseport_md, data):
9871 info->reg_type = PTR_TO_PACKET;
9872 return size == sizeof(__u64);
9874 case offsetof(struct sk_reuseport_md, data_end):
9875 info->reg_type = PTR_TO_PACKET_END;
9876 return size == sizeof(__u64);
9878 case offsetof(struct sk_reuseport_md, hash):
9879 return size == size_default;
9881 /* Fields that allow narrowing */
9882 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
9883 if (size < sizeof_field(struct sk_buff, protocol))
9886 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
9887 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
9888 case bpf_ctx_range(struct sk_reuseport_md, len):
9889 bpf_ctx_record_field_size(info, size_default);
9890 return bpf_ctx_narrow_access_ok(off, size, size_default);
9897 #define SK_REUSEPORT_LOAD_FIELD(F) ({ \
9898 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
9899 si->dst_reg, si->src_reg, \
9900 bpf_target_off(struct sk_reuseport_kern, F, \
9901 sizeof_field(struct sk_reuseport_kern, F), \
9905 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
9906 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
9911 #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \
9912 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
9917 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
9918 const struct bpf_insn *si,
9919 struct bpf_insn *insn_buf,
9920 struct bpf_prog *prog,
9923 struct bpf_insn *insn = insn_buf;
9926 case offsetof(struct sk_reuseport_md, data):
9927 SK_REUSEPORT_LOAD_SKB_FIELD(data);
9930 case offsetof(struct sk_reuseport_md, len):
9931 SK_REUSEPORT_LOAD_SKB_FIELD(len);
9934 case offsetof(struct sk_reuseport_md, eth_protocol):
9935 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
9938 case offsetof(struct sk_reuseport_md, ip_protocol):
9939 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
9942 case offsetof(struct sk_reuseport_md, data_end):
9943 SK_REUSEPORT_LOAD_FIELD(data_end);
9946 case offsetof(struct sk_reuseport_md, hash):
9947 SK_REUSEPORT_LOAD_FIELD(hash);
9950 case offsetof(struct sk_reuseport_md, bind_inany):
9951 SK_REUSEPORT_LOAD_FIELD(bind_inany);
9955 return insn - insn_buf;
9958 const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
9959 .get_func_proto = sk_reuseport_func_proto,
9960 .is_valid_access = sk_reuseport_is_valid_access,
9961 .convert_ctx_access = sk_reuseport_convert_ctx_access,
9964 const struct bpf_prog_ops sk_reuseport_prog_ops = {
9967 DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
9968 EXPORT_SYMBOL(bpf_sk_lookup_enabled);
9970 BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
9971 struct sock *, sk, u64, flags)
9973 if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE |
9974 BPF_SK_LOOKUP_F_NO_REUSEPORT)))
9976 if (unlikely(sk && sk_is_refcounted(sk)))
9977 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
9978 if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED))
9979 return -ESOCKTNOSUPPORT; /* reject connected sockets */
9981 /* Check if socket is suitable for packet L3/L4 protocol */
9982 if (sk && sk->sk_protocol != ctx->protocol)
9984 if (sk && sk->sk_family != ctx->family &&
9985 (sk->sk_family == AF_INET || ipv6_only_sock(sk)))
9986 return -EAFNOSUPPORT;
9988 if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE))
9991 /* Select socket as lookup result */
9992 ctx->selected_sk = sk;
9993 ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT;
9997 static const struct bpf_func_proto bpf_sk_lookup_assign_proto = {
9998 .func = bpf_sk_lookup_assign,
10000 .ret_type = RET_INTEGER,
10001 .arg1_type = ARG_PTR_TO_CTX,
10002 .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL,
10003 .arg3_type = ARG_ANYTHING,
10006 static const struct bpf_func_proto *
10007 sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
10010 case BPF_FUNC_perf_event_output:
10011 return &bpf_event_output_data_proto;
10012 case BPF_FUNC_sk_assign:
10013 return &bpf_sk_lookup_assign_proto;
10014 case BPF_FUNC_sk_release:
10015 return &bpf_sk_release_proto;
10017 return bpf_sk_base_func_proto(func_id);
10021 static bool sk_lookup_is_valid_access(int off, int size,
10022 enum bpf_access_type type,
10023 const struct bpf_prog *prog,
10024 struct bpf_insn_access_aux *info)
10026 if (off < 0 || off >= sizeof(struct bpf_sk_lookup))
10028 if (off % size != 0)
10030 if (type != BPF_READ)
10034 case offsetof(struct bpf_sk_lookup, sk):
10035 info->reg_type = PTR_TO_SOCKET_OR_NULL;
10036 return size == sizeof(__u64);
10038 case bpf_ctx_range(struct bpf_sk_lookup, family):
10039 case bpf_ctx_range(struct bpf_sk_lookup, protocol):
10040 case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4):
10041 case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
10042 case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
10043 case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
10044 case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
10045 case bpf_ctx_range(struct bpf_sk_lookup, local_port):
10046 bpf_ctx_record_field_size(info, sizeof(__u32));
10047 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
10054 static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
10055 const struct bpf_insn *si,
10056 struct bpf_insn *insn_buf,
10057 struct bpf_prog *prog,
10060 struct bpf_insn *insn = insn_buf;
10063 case offsetof(struct bpf_sk_lookup, sk):
10064 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10065 offsetof(struct bpf_sk_lookup_kern, selected_sk));
10068 case offsetof(struct bpf_sk_lookup, family):
10069 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10070 bpf_target_off(struct bpf_sk_lookup_kern,
10071 family, 2, target_size));
10074 case offsetof(struct bpf_sk_lookup, protocol):
10075 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10076 bpf_target_off(struct bpf_sk_lookup_kern,
10077 protocol, 2, target_size));
10080 case offsetof(struct bpf_sk_lookup, remote_ip4):
10081 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10082 bpf_target_off(struct bpf_sk_lookup_kern,
10083 v4.saddr, 4, target_size));
10086 case offsetof(struct bpf_sk_lookup, local_ip4):
10087 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10088 bpf_target_off(struct bpf_sk_lookup_kern,
10089 v4.daddr, 4, target_size));
10092 case bpf_ctx_range_till(struct bpf_sk_lookup,
10093 remote_ip6[0], remote_ip6[3]): {
10094 #if IS_ENABLED(CONFIG_IPV6)
10097 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]);
10098 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10099 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10100 offsetof(struct bpf_sk_lookup_kern, v6.saddr));
10101 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10102 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10104 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10108 case bpf_ctx_range_till(struct bpf_sk_lookup,
10109 local_ip6[0], local_ip6[3]): {
10110 #if IS_ENABLED(CONFIG_IPV6)
10113 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]);
10114 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10115 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10116 offsetof(struct bpf_sk_lookup_kern, v6.daddr));
10117 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10118 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10120 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10124 case offsetof(struct bpf_sk_lookup, remote_port):
10125 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10126 bpf_target_off(struct bpf_sk_lookup_kern,
10127 sport, 2, target_size));
10130 case offsetof(struct bpf_sk_lookup, local_port):
10131 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10132 bpf_target_off(struct bpf_sk_lookup_kern,
10133 dport, 2, target_size));
10137 return insn - insn_buf;
10140 const struct bpf_prog_ops sk_lookup_prog_ops = {
10143 const struct bpf_verifier_ops sk_lookup_verifier_ops = {
10144 .get_func_proto = sk_lookup_func_proto,
10145 .is_valid_access = sk_lookup_is_valid_access,
10146 .convert_ctx_access = sk_lookup_convert_ctx_access,
10149 #endif /* CONFIG_INET */
10151 DEFINE_BPF_DISPATCHER(xdp)
10153 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
10155 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
10158 #ifdef CONFIG_DEBUG_INFO_BTF
10159 BTF_ID_LIST_GLOBAL(btf_sock_ids)
10160 #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
10162 #undef BTF_SOCK_TYPE
10164 u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
10167 BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
10169 /* tcp6_sock type is not generated in dwarf and hence btf,
10170 * trigger an explicit type generation here.
10172 BTF_TYPE_EMIT(struct tcp6_sock);
10173 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
10174 sk->sk_family == AF_INET6)
10175 return (unsigned long)sk;
10177 return (unsigned long)NULL;
10180 const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
10181 .func = bpf_skc_to_tcp6_sock,
10183 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10184 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10185 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
10188 BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
10190 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
10191 return (unsigned long)sk;
10193 return (unsigned long)NULL;
10196 const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
10197 .func = bpf_skc_to_tcp_sock,
10199 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10200 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10201 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
10204 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
10207 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
10208 return (unsigned long)sk;
10211 #if IS_BUILTIN(CONFIG_IPV6)
10212 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
10213 return (unsigned long)sk;
10216 return (unsigned long)NULL;
10219 const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
10220 .func = bpf_skc_to_tcp_timewait_sock,
10222 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10223 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10224 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
10227 BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
10230 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
10231 return (unsigned long)sk;
10234 #if IS_BUILTIN(CONFIG_IPV6)
10235 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
10236 return (unsigned long)sk;
10239 return (unsigned long)NULL;
10242 const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
10243 .func = bpf_skc_to_tcp_request_sock,
10245 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10246 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10247 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
10250 BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
10252 /* udp6_sock type is not generated in dwarf and hence btf,
10253 * trigger an explicit type generation here.
10255 BTF_TYPE_EMIT(struct udp6_sock);
10256 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
10257 sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
10258 return (unsigned long)sk;
10260 return (unsigned long)NULL;
10263 const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
10264 .func = bpf_skc_to_udp6_sock,
10266 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10267 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10268 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
10271 static const struct bpf_func_proto *
10272 bpf_sk_base_func_proto(enum bpf_func_id func_id)
10274 const struct bpf_func_proto *func;
10277 case BPF_FUNC_skc_to_tcp6_sock:
10278 func = &bpf_skc_to_tcp6_sock_proto;
10280 case BPF_FUNC_skc_to_tcp_sock:
10281 func = &bpf_skc_to_tcp_sock_proto;
10283 case BPF_FUNC_skc_to_tcp_timewait_sock:
10284 func = &bpf_skc_to_tcp_timewait_sock_proto;
10286 case BPF_FUNC_skc_to_tcp_request_sock:
10287 func = &bpf_skc_to_tcp_request_sock_proto;
10289 case BPF_FUNC_skc_to_udp6_sock:
10290 func = &bpf_skc_to_udp6_sock_proto;
10293 return bpf_base_func_proto(func_id);
10296 if (!perfmon_capable())