1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
45 #define BPF_R0 regs[BPF_REG_0]
46 #define BPF_R1 regs[BPF_REG_1]
47 #define BPF_R2 regs[BPF_REG_2]
48 #define BPF_R3 regs[BPF_REG_3]
49 #define BPF_R4 regs[BPF_REG_4]
50 #define BPF_R5 regs[BPF_REG_5]
51 #define BPF_R6 regs[BPF_REG_6]
52 #define BPF_R7 regs[BPF_REG_7]
53 #define BPF_R8 regs[BPF_REG_8]
54 #define BPF_R9 regs[BPF_REG_9]
55 #define BPF_R10 regs[BPF_REG_10]
58 #define DST regs[insn->dst_reg]
59 #define SRC regs[insn->src_reg]
60 #define FP regs[BPF_REG_FP]
61 #define AX regs[BPF_REG_AX]
62 #define ARG1 regs[BPF_REG_ARG1]
63 #define CTX regs[BPF_REG_CTX]
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
70 /* No hurry in this branch
72 * Exported for the bpf jit load helper.
74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
78 if (k >= SKF_NET_OFF) {
79 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80 } else if (k >= SKF_LL_OFF) {
81 if (unlikely(!skb_mac_header_was_set(skb)))
83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
93 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
94 struct bpf_prog_aux *aux;
97 size = round_up(size, PAGE_SIZE);
98 fp = __vmalloc(size, gfp_flags);
102 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
107 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
114 fp->pages = size / PAGE_SIZE;
117 fp->jit_requested = ebpf_jit_enabled();
118 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
119 #ifdef CONFIG_CGROUP_BPF
120 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
123 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
124 mutex_init(&fp->aux->used_maps_mutex);
125 mutex_init(&fp->aux->dst_mutex);
130 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
132 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
133 struct bpf_prog *prog;
136 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
140 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
142 free_percpu(prog->active);
148 for_each_possible_cpu(cpu) {
149 struct bpf_prog_stats *pstats;
151 pstats = per_cpu_ptr(prog->stats, cpu);
152 u64_stats_init(&pstats->syncp);
156 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
158 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
160 if (!prog->aux->nr_linfo || !prog->jit_requested)
163 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
164 sizeof(*prog->aux->jited_linfo),
165 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
166 if (!prog->aux->jited_linfo)
172 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
174 if (prog->aux->jited_linfo &&
175 (!prog->jited || !prog->aux->jited_linfo[0])) {
176 kvfree(prog->aux->jited_linfo);
177 prog->aux->jited_linfo = NULL;
180 kfree(prog->aux->kfunc_tab);
181 prog->aux->kfunc_tab = NULL;
184 /* The jit engine is responsible to provide an array
185 * for insn_off to the jited_off mapping (insn_to_jit_off).
187 * The idx to this array is the insn_off. Hence, the insn_off
188 * here is relative to the prog itself instead of the main prog.
189 * This array has one entry for each xlated bpf insn.
191 * jited_off is the byte off to the end of the jited insn.
195 * The first bpf insn off of the prog. The insn off
196 * here is relative to the main prog.
197 * e.g. if prog is a subprog, insn_start > 0
199 * The prog's idx to prog->aux->linfo and jited_linfo
201 * jited_linfo[linfo_idx] = prog->bpf_func
205 * jited_linfo[i] = prog->bpf_func +
206 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
208 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
209 const u32 *insn_to_jit_off)
211 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
212 const struct bpf_line_info *linfo;
215 if (!prog->aux->jited_linfo)
216 /* Userspace did not provide linfo */
219 linfo_idx = prog->aux->linfo_idx;
220 linfo = &prog->aux->linfo[linfo_idx];
221 insn_start = linfo[0].insn_off;
222 insn_end = insn_start + prog->len;
224 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
225 jited_linfo[0] = prog->bpf_func;
227 nr_linfo = prog->aux->nr_linfo - linfo_idx;
229 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
230 /* The verifier ensures that linfo[i].insn_off is
231 * strictly increasing
233 jited_linfo[i] = prog->bpf_func +
234 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
237 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
238 gfp_t gfp_extra_flags)
240 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
244 size = round_up(size, PAGE_SIZE);
245 pages = size / PAGE_SIZE;
246 if (pages <= fp_old->pages)
249 fp = __vmalloc(size, gfp_flags);
251 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
255 /* We keep fp->aux from fp_old around in the new
256 * reallocated structure.
259 fp_old->stats = NULL;
260 fp_old->active = NULL;
261 __bpf_prog_free(fp_old);
267 void __bpf_prog_free(struct bpf_prog *fp)
270 mutex_destroy(&fp->aux->used_maps_mutex);
271 mutex_destroy(&fp->aux->dst_mutex);
272 kfree(fp->aux->poke_tab);
275 free_percpu(fp->stats);
276 free_percpu(fp->active);
280 int bpf_prog_calc_tag(struct bpf_prog *fp)
282 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
283 u32 raw_size = bpf_prog_tag_scratch_size(fp);
284 u32 digest[SHA1_DIGEST_WORDS];
285 u32 ws[SHA1_WORKSPACE_WORDS];
286 u32 i, bsize, psize, blocks;
287 struct bpf_insn *dst;
293 raw = vmalloc(raw_size);
298 memset(ws, 0, sizeof(ws));
300 /* We need to take out the map fd for the digest calculation
301 * since they are unstable from user space side.
304 for (i = 0, was_ld_map = false; i < fp->len; i++) {
305 dst[i] = fp->insnsi[i];
307 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
308 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
309 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
312 } else if (was_ld_map &&
314 dst[i].dst_reg == 0 &&
315 dst[i].src_reg == 0 &&
324 psize = bpf_prog_insn_size(fp);
325 memset(&raw[psize], 0, raw_size - psize);
328 bsize = round_up(psize, SHA1_BLOCK_SIZE);
329 blocks = bsize / SHA1_BLOCK_SIZE;
331 if (bsize - psize >= sizeof(__be64)) {
332 bits = (__be64 *)(todo + bsize - sizeof(__be64));
334 bits = (__be64 *)(todo + bsize + bits_offset);
337 *bits = cpu_to_be64((psize - 1) << 3);
340 sha1_transform(digest, todo, ws);
341 todo += SHA1_BLOCK_SIZE;
344 result = (__force __be32 *)digest;
345 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
346 result[i] = cpu_to_be32(digest[i]);
347 memcpy(fp->tag, result, sizeof(fp->tag));
353 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
354 s32 end_new, s32 curr, const bool probe_pass)
356 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
357 s32 delta = end_new - end_old;
360 if (curr < pos && curr + imm + 1 >= end_old)
362 else if (curr >= end_new && curr + imm + 1 < end_new)
364 if (imm < imm_min || imm > imm_max)
371 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
372 s32 end_new, s32 curr, const bool probe_pass)
374 const s32 off_min = S16_MIN, off_max = S16_MAX;
375 s32 delta = end_new - end_old;
378 if (curr < pos && curr + off + 1 >= end_old)
380 else if (curr >= end_new && curr + off + 1 < end_new)
382 if (off < off_min || off > off_max)
389 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
390 s32 end_new, const bool probe_pass)
392 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
393 struct bpf_insn *insn = prog->insnsi;
396 for (i = 0; i < insn_cnt; i++, insn++) {
399 /* In the probing pass we still operate on the original,
400 * unpatched image in order to check overflows before we
401 * do any other adjustments. Therefore skip the patchlet.
403 if (probe_pass && i == pos) {
405 insn = prog->insnsi + end_old;
407 if (bpf_pseudo_func(insn)) {
408 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
409 end_new, i, probe_pass);
415 if ((BPF_CLASS(code) != BPF_JMP &&
416 BPF_CLASS(code) != BPF_JMP32) ||
417 BPF_OP(code) == BPF_EXIT)
419 /* Adjust offset of jmps if we cross patch boundaries. */
420 if (BPF_OP(code) == BPF_CALL) {
421 if (insn->src_reg != BPF_PSEUDO_CALL)
423 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
424 end_new, i, probe_pass);
426 ret = bpf_adj_delta_to_off(insn, pos, end_old,
427 end_new, i, probe_pass);
436 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
438 struct bpf_line_info *linfo;
441 nr_linfo = prog->aux->nr_linfo;
442 if (!nr_linfo || !delta)
445 linfo = prog->aux->linfo;
447 for (i = 0; i < nr_linfo; i++)
448 if (off < linfo[i].insn_off)
451 /* Push all off < linfo[i].insn_off by delta */
452 for (; i < nr_linfo; i++)
453 linfo[i].insn_off += delta;
456 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
457 const struct bpf_insn *patch, u32 len)
459 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
460 const u32 cnt_max = S16_MAX;
461 struct bpf_prog *prog_adj;
464 /* Since our patchlet doesn't expand the image, we're done. */
465 if (insn_delta == 0) {
466 memcpy(prog->insnsi + off, patch, sizeof(*patch));
470 insn_adj_cnt = prog->len + insn_delta;
472 /* Reject anything that would potentially let the insn->off
473 * target overflow when we have excessive program expansions.
474 * We need to probe here before we do any reallocation where
475 * we afterwards may not fail anymore.
477 if (insn_adj_cnt > cnt_max &&
478 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
481 /* Several new instructions need to be inserted. Make room
482 * for them. Likely, there's no need for a new allocation as
483 * last page could have large enough tailroom.
485 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
488 return ERR_PTR(-ENOMEM);
490 prog_adj->len = insn_adj_cnt;
492 /* Patching happens in 3 steps:
494 * 1) Move over tail of insnsi from next instruction onwards,
495 * so we can patch the single target insn with one or more
496 * new ones (patching is always from 1 to n insns, n > 0).
497 * 2) Inject new instructions at the target location.
498 * 3) Adjust branch offsets if necessary.
500 insn_rest = insn_adj_cnt - off - len;
502 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
503 sizeof(*patch) * insn_rest);
504 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
506 /* We are guaranteed to not fail at this point, otherwise
507 * the ship has sailed to reverse to the original state. An
508 * overflow cannot happen at this point.
510 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
512 bpf_adj_linfo(prog_adj, off, insn_delta);
517 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
519 /* Branch offsets can't overflow when program is shrinking, no need
520 * to call bpf_adj_branches(..., true) here
522 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
523 sizeof(struct bpf_insn) * (prog->len - off - cnt));
526 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
529 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
533 for (i = 0; i < fp->aux->func_cnt; i++)
534 bpf_prog_kallsyms_del(fp->aux->func[i]);
537 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
539 bpf_prog_kallsyms_del_subprogs(fp);
540 bpf_prog_kallsyms_del(fp);
543 #ifdef CONFIG_BPF_JIT
544 /* All BPF JIT sysctl knobs here. */
545 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
546 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
547 int bpf_jit_harden __read_mostly;
548 long bpf_jit_limit __read_mostly;
549 long bpf_jit_limit_max __read_mostly;
552 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
554 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
556 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
557 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
561 bpf_prog_ksym_set_name(struct bpf_prog *prog)
563 char *sym = prog->aux->ksym.name;
564 const char *end = sym + KSYM_NAME_LEN;
565 const struct btf_type *type;
566 const char *func_name;
568 BUILD_BUG_ON(sizeof("bpf_prog_") +
569 sizeof(prog->tag) * 2 +
570 /* name has been null terminated.
571 * We should need +1 for the '_' preceding
572 * the name. However, the null character
573 * is double counted between the name and the
574 * sizeof("bpf_prog_") above, so we omit
577 sizeof(prog->aux->name) > KSYM_NAME_LEN);
579 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
580 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
582 /* prog->aux->name will be ignored if full btf name is available */
583 if (prog->aux->func_info_cnt) {
584 type = btf_type_by_id(prog->aux->btf,
585 prog->aux->func_info[prog->aux->func_idx].type_id);
586 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
587 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
591 if (prog->aux->name[0])
592 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
597 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
599 return container_of(n, struct bpf_ksym, tnode)->start;
602 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
603 struct latch_tree_node *b)
605 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
608 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
610 unsigned long val = (unsigned long)key;
611 const struct bpf_ksym *ksym;
613 ksym = container_of(n, struct bpf_ksym, tnode);
615 if (val < ksym->start)
617 if (val >= ksym->end)
623 static const struct latch_tree_ops bpf_tree_ops = {
624 .less = bpf_tree_less,
625 .comp = bpf_tree_comp,
628 static DEFINE_SPINLOCK(bpf_lock);
629 static LIST_HEAD(bpf_kallsyms);
630 static struct latch_tree_root bpf_tree __cacheline_aligned;
632 void bpf_ksym_add(struct bpf_ksym *ksym)
634 spin_lock_bh(&bpf_lock);
635 WARN_ON_ONCE(!list_empty(&ksym->lnode));
636 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
637 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
638 spin_unlock_bh(&bpf_lock);
641 static void __bpf_ksym_del(struct bpf_ksym *ksym)
643 if (list_empty(&ksym->lnode))
646 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
647 list_del_rcu(&ksym->lnode);
650 void bpf_ksym_del(struct bpf_ksym *ksym)
652 spin_lock_bh(&bpf_lock);
653 __bpf_ksym_del(ksym);
654 spin_unlock_bh(&bpf_lock);
657 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
659 return fp->jited && !bpf_prog_was_classic(fp);
662 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
664 if (!bpf_prog_kallsyms_candidate(fp) ||
668 bpf_prog_ksym_set_addr(fp);
669 bpf_prog_ksym_set_name(fp);
670 fp->aux->ksym.prog = true;
672 bpf_ksym_add(&fp->aux->ksym);
675 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
677 if (!bpf_prog_kallsyms_candidate(fp))
680 bpf_ksym_del(&fp->aux->ksym);
683 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
685 struct latch_tree_node *n;
687 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
688 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
691 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
692 unsigned long *off, char *sym)
694 struct bpf_ksym *ksym;
698 ksym = bpf_ksym_find(addr);
700 unsigned long symbol_start = ksym->start;
701 unsigned long symbol_end = ksym->end;
703 strncpy(sym, ksym->name, KSYM_NAME_LEN);
707 *size = symbol_end - symbol_start;
709 *off = addr - symbol_start;
716 bool is_bpf_text_address(unsigned long addr)
721 ret = bpf_ksym_find(addr) != NULL;
727 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
729 struct bpf_ksym *ksym = bpf_ksym_find(addr);
731 return ksym && ksym->prog ?
732 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
736 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
738 const struct exception_table_entry *e = NULL;
739 struct bpf_prog *prog;
742 prog = bpf_prog_ksym_find(addr);
745 if (!prog->aux->num_exentries)
748 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
754 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
757 struct bpf_ksym *ksym;
761 if (!bpf_jit_kallsyms_enabled())
765 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
769 strncpy(sym, ksym->name, KSYM_NAME_LEN);
771 *value = ksym->start;
772 *type = BPF_SYM_ELF_TYPE;
782 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
783 struct bpf_jit_poke_descriptor *poke)
785 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
786 static const u32 poke_tab_max = 1024;
787 u32 slot = prog->aux->size_poke_tab;
790 if (size > poke_tab_max)
792 if (poke->tailcall_target || poke->tailcall_target_stable ||
793 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
796 switch (poke->reason) {
797 case BPF_POKE_REASON_TAIL_CALL:
798 if (!poke->tail_call.map)
805 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
809 memcpy(&tab[slot], poke, sizeof(*poke));
810 prog->aux->size_poke_tab = size;
811 prog->aux->poke_tab = tab;
817 * BPF program pack allocator.
819 * Most BPF programs are pretty small. Allocating a hole page for each
820 * program is sometime a waste. Many small bpf program also adds pressure
821 * to instruction TLB. To solve this issue, we introduce a BPF program pack
822 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
823 * to host BPF programs.
825 #define BPF_PROG_CHUNK_SHIFT 6
826 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
827 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
829 struct bpf_prog_pack {
830 struct list_head list;
832 unsigned long bitmap[];
835 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
837 memset(area, 0, size);
840 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
842 static DEFINE_MUTEX(pack_mutex);
843 static LIST_HEAD(pack_list);
845 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
846 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
849 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
851 #define BPF_PROG_PACK_SIZE PAGE_SIZE
854 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
856 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
858 struct bpf_prog_pack *pack;
860 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
864 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
869 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
870 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
871 list_add_tail(&pack->list, &pack_list);
873 set_vm_flush_reset_perms(pack->ptr);
874 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
878 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
880 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
881 struct bpf_prog_pack *pack;
885 mutex_lock(&pack_mutex);
886 if (size > BPF_PROG_PACK_SIZE) {
887 size = round_up(size, PAGE_SIZE);
888 ptr = module_alloc(size);
890 bpf_fill_ill_insns(ptr, size);
891 set_vm_flush_reset_perms(ptr);
892 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
896 list_for_each_entry(pack, &pack_list, list) {
897 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
899 if (pos < BPF_PROG_CHUNK_COUNT)
900 goto found_free_area;
903 pack = alloc_new_pack(bpf_fill_ill_insns);
910 bitmap_set(pack->bitmap, pos, nbits);
911 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
914 mutex_unlock(&pack_mutex);
918 void bpf_prog_pack_free(struct bpf_binary_header *hdr)
920 struct bpf_prog_pack *pack = NULL, *tmp;
924 mutex_lock(&pack_mutex);
925 if (hdr->size > BPF_PROG_PACK_SIZE) {
930 list_for_each_entry(tmp, &pack_list, list) {
931 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
937 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
940 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
941 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
943 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
944 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
946 bitmap_clear(pack->bitmap, pos, nbits);
947 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
948 BPF_PROG_CHUNK_COUNT, 0) == 0) {
949 list_del(&pack->list);
950 module_memfree(pack->ptr);
954 mutex_unlock(&pack_mutex);
957 static atomic_long_t bpf_jit_current;
959 /* Can be overridden by an arch's JIT compiler if it has a custom,
960 * dedicated BPF backend memory area, or if neither of the two
963 u64 __weak bpf_jit_alloc_exec_limit(void)
965 #if defined(MODULES_VADDR)
966 return MODULES_END - MODULES_VADDR;
968 return VMALLOC_END - VMALLOC_START;
972 static int __init bpf_jit_charge_init(void)
974 /* Only used as heuristic here to derive limit. */
975 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
976 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
977 PAGE_SIZE), LONG_MAX);
980 pure_initcall(bpf_jit_charge_init);
982 int bpf_jit_charge_modmem(u32 size)
984 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
985 if (!bpf_capable()) {
986 atomic_long_sub(size, &bpf_jit_current);
994 void bpf_jit_uncharge_modmem(u32 size)
996 atomic_long_sub(size, &bpf_jit_current);
999 void *__weak bpf_jit_alloc_exec(unsigned long size)
1001 return module_alloc(size);
1004 void __weak bpf_jit_free_exec(void *addr)
1006 module_memfree(addr);
1009 struct bpf_binary_header *
1010 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1011 unsigned int alignment,
1012 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1014 struct bpf_binary_header *hdr;
1015 u32 size, hole, start;
1017 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1018 alignment > BPF_IMAGE_ALIGNMENT);
1020 /* Most of BPF filters are really small, but if some of them
1021 * fill a page, allow at least 128 extra bytes to insert a
1022 * random section of illegal instructions.
1024 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1026 if (bpf_jit_charge_modmem(size))
1028 hdr = bpf_jit_alloc_exec(size);
1030 bpf_jit_uncharge_modmem(size);
1034 /* Fill space with illegal/arch-dep instructions. */
1035 bpf_fill_ill_insns(hdr, size);
1038 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1039 PAGE_SIZE - sizeof(*hdr));
1040 start = get_random_u32_below(hole) & ~(alignment - 1);
1042 /* Leave a random number of instructions before BPF code. */
1043 *image_ptr = &hdr->image[start];
1048 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1050 u32 size = hdr->size;
1052 bpf_jit_free_exec(hdr);
1053 bpf_jit_uncharge_modmem(size);
1056 /* Allocate jit binary from bpf_prog_pack allocator.
1057 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1058 * to the memory. To solve this problem, a RW buffer is also allocated at
1059 * as the same time. The JIT engine should calculate offsets based on the
1060 * RO memory address, but write JITed program to the RW buffer. Once the
1061 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1062 * the JITed program to the RO memory.
1064 struct bpf_binary_header *
1065 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1066 unsigned int alignment,
1067 struct bpf_binary_header **rw_header,
1069 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1071 struct bpf_binary_header *ro_header;
1072 u32 size, hole, start;
1074 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1075 alignment > BPF_IMAGE_ALIGNMENT);
1077 /* add 16 bytes for a random section of illegal instructions */
1078 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1080 if (bpf_jit_charge_modmem(size))
1082 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1084 bpf_jit_uncharge_modmem(size);
1088 *rw_header = kvmalloc(size, GFP_KERNEL);
1090 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1091 bpf_prog_pack_free(ro_header);
1092 bpf_jit_uncharge_modmem(size);
1096 /* Fill space with illegal/arch-dep instructions. */
1097 bpf_fill_ill_insns(*rw_header, size);
1098 (*rw_header)->size = size;
1100 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1101 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1102 start = get_random_u32_below(hole) & ~(alignment - 1);
1104 *image_ptr = &ro_header->image[start];
1105 *rw_image = &(*rw_header)->image[start];
1110 /* Copy JITed text from rw_header to its final location, the ro_header. */
1111 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1112 struct bpf_binary_header *ro_header,
1113 struct bpf_binary_header *rw_header)
1117 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1122 bpf_prog_pack_free(ro_header);
1123 return PTR_ERR(ptr);
1128 /* bpf_jit_binary_pack_free is called in two different scenarios:
1129 * 1) when the program is freed after;
1130 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1131 * For case 2), we need to free both the RO memory and the RW buffer.
1133 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1134 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1135 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1136 * bpf_arch_text_copy (when jit fails).
1138 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1139 struct bpf_binary_header *rw_header)
1141 u32 size = ro_header->size;
1143 bpf_prog_pack_free(ro_header);
1145 bpf_jit_uncharge_modmem(size);
1148 struct bpf_binary_header *
1149 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1151 unsigned long real_start = (unsigned long)fp->bpf_func;
1154 addr = real_start & BPF_PROG_CHUNK_MASK;
1155 return (void *)addr;
1158 static inline struct bpf_binary_header *
1159 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1161 unsigned long real_start = (unsigned long)fp->bpf_func;
1164 addr = real_start & PAGE_MASK;
1165 return (void *)addr;
1168 /* This symbol is only overridden by archs that have different
1169 * requirements than the usual eBPF JITs, f.e. when they only
1170 * implement cBPF JIT, do not set images read-only, etc.
1172 void __weak bpf_jit_free(struct bpf_prog *fp)
1175 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1177 bpf_jit_binary_free(hdr);
1178 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1181 bpf_prog_unlock_free(fp);
1184 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1185 const struct bpf_insn *insn, bool extra_pass,
1186 u64 *func_addr, bool *func_addr_fixed)
1188 s16 off = insn->off;
1189 s32 imm = insn->imm;
1193 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1194 if (!*func_addr_fixed) {
1195 /* Place-holder address till the last pass has collected
1196 * all addresses for JITed subprograms in which case we
1197 * can pick them up from prog->aux.
1201 else if (prog->aux->func &&
1202 off >= 0 && off < prog->aux->func_cnt)
1203 addr = (u8 *)prog->aux->func[off]->bpf_func;
1206 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1207 bpf_jit_supports_far_kfunc_call()) {
1208 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1212 /* Address of a BPF helper call. Since part of the core
1213 * kernel, it's always at a fixed location. __bpf_call_base
1214 * and the helper with imm relative to it are both in core
1217 addr = (u8 *)__bpf_call_base + imm;
1220 *func_addr = (unsigned long)addr;
1224 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1225 const struct bpf_insn *aux,
1226 struct bpf_insn *to_buff,
1229 struct bpf_insn *to = to_buff;
1230 u32 imm_rnd = get_random_u32();
1233 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1234 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1236 /* Constraints on AX register:
1238 * AX register is inaccessible from user space. It is mapped in
1239 * all JITs, and used here for constant blinding rewrites. It is
1240 * typically "stateless" meaning its contents are only valid within
1241 * the executed instruction, but not across several instructions.
1242 * There are a few exceptions however which are further detailed
1245 * Constant blinding is only used by JITs, not in the interpreter.
1246 * The interpreter uses AX in some occasions as a local temporary
1247 * register e.g. in DIV or MOD instructions.
1249 * In restricted circumstances, the verifier can also use the AX
1250 * register for rewrites as long as they do not interfere with
1253 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1256 if (from->imm == 0 &&
1257 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1258 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1259 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1263 switch (from->code) {
1264 case BPF_ALU | BPF_ADD | BPF_K:
1265 case BPF_ALU | BPF_SUB | BPF_K:
1266 case BPF_ALU | BPF_AND | BPF_K:
1267 case BPF_ALU | BPF_OR | BPF_K:
1268 case BPF_ALU | BPF_XOR | BPF_K:
1269 case BPF_ALU | BPF_MUL | BPF_K:
1270 case BPF_ALU | BPF_MOV | BPF_K:
1271 case BPF_ALU | BPF_DIV | BPF_K:
1272 case BPF_ALU | BPF_MOD | BPF_K:
1273 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1274 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1275 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1278 case BPF_ALU64 | BPF_ADD | BPF_K:
1279 case BPF_ALU64 | BPF_SUB | BPF_K:
1280 case BPF_ALU64 | BPF_AND | BPF_K:
1281 case BPF_ALU64 | BPF_OR | BPF_K:
1282 case BPF_ALU64 | BPF_XOR | BPF_K:
1283 case BPF_ALU64 | BPF_MUL | BPF_K:
1284 case BPF_ALU64 | BPF_MOV | BPF_K:
1285 case BPF_ALU64 | BPF_DIV | BPF_K:
1286 case BPF_ALU64 | BPF_MOD | BPF_K:
1287 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1288 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1289 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1292 case BPF_JMP | BPF_JEQ | BPF_K:
1293 case BPF_JMP | BPF_JNE | BPF_K:
1294 case BPF_JMP | BPF_JGT | BPF_K:
1295 case BPF_JMP | BPF_JLT | BPF_K:
1296 case BPF_JMP | BPF_JGE | BPF_K:
1297 case BPF_JMP | BPF_JLE | BPF_K:
1298 case BPF_JMP | BPF_JSGT | BPF_K:
1299 case BPF_JMP | BPF_JSLT | BPF_K:
1300 case BPF_JMP | BPF_JSGE | BPF_K:
1301 case BPF_JMP | BPF_JSLE | BPF_K:
1302 case BPF_JMP | BPF_JSET | BPF_K:
1303 /* Accommodate for extra offset in case of a backjump. */
1307 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1308 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1309 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1312 case BPF_JMP32 | BPF_JEQ | BPF_K:
1313 case BPF_JMP32 | BPF_JNE | BPF_K:
1314 case BPF_JMP32 | BPF_JGT | BPF_K:
1315 case BPF_JMP32 | BPF_JLT | BPF_K:
1316 case BPF_JMP32 | BPF_JGE | BPF_K:
1317 case BPF_JMP32 | BPF_JLE | BPF_K:
1318 case BPF_JMP32 | BPF_JSGT | BPF_K:
1319 case BPF_JMP32 | BPF_JSLT | BPF_K:
1320 case BPF_JMP32 | BPF_JSGE | BPF_K:
1321 case BPF_JMP32 | BPF_JSLE | BPF_K:
1322 case BPF_JMP32 | BPF_JSET | BPF_K:
1323 /* Accommodate for extra offset in case of a backjump. */
1327 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1328 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1329 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1333 case BPF_LD | BPF_IMM | BPF_DW:
1334 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1335 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1336 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1337 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1339 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1340 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1341 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1343 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1344 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1347 case BPF_ST | BPF_MEM | BPF_DW:
1348 case BPF_ST | BPF_MEM | BPF_W:
1349 case BPF_ST | BPF_MEM | BPF_H:
1350 case BPF_ST | BPF_MEM | BPF_B:
1351 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1352 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1353 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1357 return to - to_buff;
1360 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1361 gfp_t gfp_extra_flags)
1363 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1364 struct bpf_prog *fp;
1366 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1368 /* aux->prog still points to the fp_other one, so
1369 * when promoting the clone to the real program,
1370 * this still needs to be adapted.
1372 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1378 static void bpf_prog_clone_free(struct bpf_prog *fp)
1380 /* aux was stolen by the other clone, so we cannot free
1381 * it from this path! It will be freed eventually by the
1382 * other program on release.
1384 * At this point, we don't need a deferred release since
1385 * clone is guaranteed to not be locked.
1390 __bpf_prog_free(fp);
1393 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1395 /* We have to repoint aux->prog to self, as we don't
1396 * know whether fp here is the clone or the original.
1399 bpf_prog_clone_free(fp_other);
1402 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1404 struct bpf_insn insn_buff[16], aux[2];
1405 struct bpf_prog *clone, *tmp;
1406 int insn_delta, insn_cnt;
1407 struct bpf_insn *insn;
1410 if (!prog->blinding_requested || prog->blinded)
1413 clone = bpf_prog_clone_create(prog, GFP_USER);
1415 return ERR_PTR(-ENOMEM);
1417 insn_cnt = clone->len;
1418 insn = clone->insnsi;
1420 for (i = 0; i < insn_cnt; i++, insn++) {
1421 if (bpf_pseudo_func(insn)) {
1422 /* ld_imm64 with an address of bpf subprog is not
1423 * a user controlled constant. Don't randomize it,
1424 * since it will conflict with jit_subprogs() logic.
1431 /* We temporarily need to hold the original ld64 insn
1432 * so that we can still access the first part in the
1433 * second blinding run.
1435 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1437 memcpy(aux, insn, sizeof(aux));
1439 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1440 clone->aux->verifier_zext);
1444 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1446 /* Patching may have repointed aux->prog during
1447 * realloc from the original one, so we need to
1448 * fix it up here on error.
1450 bpf_jit_prog_release_other(prog, clone);
1455 insn_delta = rewritten - 1;
1457 /* Walk new program and skip insns we just inserted. */
1458 insn = clone->insnsi + i + insn_delta;
1459 insn_cnt += insn_delta;
1466 #endif /* CONFIG_BPF_JIT */
1468 /* Base function for offset calculation. Needs to go into .text section,
1469 * therefore keeping it non-static as well; will also be used by JITs
1470 * anyway later on, so do not let the compiler omit it. This also needs
1471 * to go into kallsyms for correlation from e.g. bpftool, so naming
1474 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1478 EXPORT_SYMBOL_GPL(__bpf_call_base);
1480 /* All UAPI available opcodes. */
1481 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1482 /* 32 bit ALU operations. */ \
1483 /* Register based. */ \
1484 INSN_3(ALU, ADD, X), \
1485 INSN_3(ALU, SUB, X), \
1486 INSN_3(ALU, AND, X), \
1487 INSN_3(ALU, OR, X), \
1488 INSN_3(ALU, LSH, X), \
1489 INSN_3(ALU, RSH, X), \
1490 INSN_3(ALU, XOR, X), \
1491 INSN_3(ALU, MUL, X), \
1492 INSN_3(ALU, MOV, X), \
1493 INSN_3(ALU, ARSH, X), \
1494 INSN_3(ALU, DIV, X), \
1495 INSN_3(ALU, MOD, X), \
1497 INSN_3(ALU, END, TO_BE), \
1498 INSN_3(ALU, END, TO_LE), \
1499 /* Immediate based. */ \
1500 INSN_3(ALU, ADD, K), \
1501 INSN_3(ALU, SUB, K), \
1502 INSN_3(ALU, AND, K), \
1503 INSN_3(ALU, OR, K), \
1504 INSN_3(ALU, LSH, K), \
1505 INSN_3(ALU, RSH, K), \
1506 INSN_3(ALU, XOR, K), \
1507 INSN_3(ALU, MUL, K), \
1508 INSN_3(ALU, MOV, K), \
1509 INSN_3(ALU, ARSH, K), \
1510 INSN_3(ALU, DIV, K), \
1511 INSN_3(ALU, MOD, K), \
1512 /* 64 bit ALU operations. */ \
1513 /* Register based. */ \
1514 INSN_3(ALU64, ADD, X), \
1515 INSN_3(ALU64, SUB, X), \
1516 INSN_3(ALU64, AND, X), \
1517 INSN_3(ALU64, OR, X), \
1518 INSN_3(ALU64, LSH, X), \
1519 INSN_3(ALU64, RSH, X), \
1520 INSN_3(ALU64, XOR, X), \
1521 INSN_3(ALU64, MUL, X), \
1522 INSN_3(ALU64, MOV, X), \
1523 INSN_3(ALU64, ARSH, X), \
1524 INSN_3(ALU64, DIV, X), \
1525 INSN_3(ALU64, MOD, X), \
1526 INSN_2(ALU64, NEG), \
1527 INSN_3(ALU64, END, TO_LE), \
1528 /* Immediate based. */ \
1529 INSN_3(ALU64, ADD, K), \
1530 INSN_3(ALU64, SUB, K), \
1531 INSN_3(ALU64, AND, K), \
1532 INSN_3(ALU64, OR, K), \
1533 INSN_3(ALU64, LSH, K), \
1534 INSN_3(ALU64, RSH, K), \
1535 INSN_3(ALU64, XOR, K), \
1536 INSN_3(ALU64, MUL, K), \
1537 INSN_3(ALU64, MOV, K), \
1538 INSN_3(ALU64, ARSH, K), \
1539 INSN_3(ALU64, DIV, K), \
1540 INSN_3(ALU64, MOD, K), \
1541 /* Call instruction. */ \
1542 INSN_2(JMP, CALL), \
1543 /* Exit instruction. */ \
1544 INSN_2(JMP, EXIT), \
1545 /* 32-bit Jump instructions. */ \
1546 /* Register based. */ \
1547 INSN_3(JMP32, JEQ, X), \
1548 INSN_3(JMP32, JNE, X), \
1549 INSN_3(JMP32, JGT, X), \
1550 INSN_3(JMP32, JLT, X), \
1551 INSN_3(JMP32, JGE, X), \
1552 INSN_3(JMP32, JLE, X), \
1553 INSN_3(JMP32, JSGT, X), \
1554 INSN_3(JMP32, JSLT, X), \
1555 INSN_3(JMP32, JSGE, X), \
1556 INSN_3(JMP32, JSLE, X), \
1557 INSN_3(JMP32, JSET, X), \
1558 /* Immediate based. */ \
1559 INSN_3(JMP32, JEQ, K), \
1560 INSN_3(JMP32, JNE, K), \
1561 INSN_3(JMP32, JGT, K), \
1562 INSN_3(JMP32, JLT, K), \
1563 INSN_3(JMP32, JGE, K), \
1564 INSN_3(JMP32, JLE, K), \
1565 INSN_3(JMP32, JSGT, K), \
1566 INSN_3(JMP32, JSLT, K), \
1567 INSN_3(JMP32, JSGE, K), \
1568 INSN_3(JMP32, JSLE, K), \
1569 INSN_3(JMP32, JSET, K), \
1570 /* Jump instructions. */ \
1571 /* Register based. */ \
1572 INSN_3(JMP, JEQ, X), \
1573 INSN_3(JMP, JNE, X), \
1574 INSN_3(JMP, JGT, X), \
1575 INSN_3(JMP, JLT, X), \
1576 INSN_3(JMP, JGE, X), \
1577 INSN_3(JMP, JLE, X), \
1578 INSN_3(JMP, JSGT, X), \
1579 INSN_3(JMP, JSLT, X), \
1580 INSN_3(JMP, JSGE, X), \
1581 INSN_3(JMP, JSLE, X), \
1582 INSN_3(JMP, JSET, X), \
1583 /* Immediate based. */ \
1584 INSN_3(JMP, JEQ, K), \
1585 INSN_3(JMP, JNE, K), \
1586 INSN_3(JMP, JGT, K), \
1587 INSN_3(JMP, JLT, K), \
1588 INSN_3(JMP, JGE, K), \
1589 INSN_3(JMP, JLE, K), \
1590 INSN_3(JMP, JSGT, K), \
1591 INSN_3(JMP, JSLT, K), \
1592 INSN_3(JMP, JSGE, K), \
1593 INSN_3(JMP, JSLE, K), \
1594 INSN_3(JMP, JSET, K), \
1596 /* Store instructions. */ \
1597 /* Register based. */ \
1598 INSN_3(STX, MEM, B), \
1599 INSN_3(STX, MEM, H), \
1600 INSN_3(STX, MEM, W), \
1601 INSN_3(STX, MEM, DW), \
1602 INSN_3(STX, ATOMIC, W), \
1603 INSN_3(STX, ATOMIC, DW), \
1604 /* Immediate based. */ \
1605 INSN_3(ST, MEM, B), \
1606 INSN_3(ST, MEM, H), \
1607 INSN_3(ST, MEM, W), \
1608 INSN_3(ST, MEM, DW), \
1609 /* Load instructions. */ \
1610 /* Register based. */ \
1611 INSN_3(LDX, MEM, B), \
1612 INSN_3(LDX, MEM, H), \
1613 INSN_3(LDX, MEM, W), \
1614 INSN_3(LDX, MEM, DW), \
1615 INSN_3(LDX, MEMSX, B), \
1616 INSN_3(LDX, MEMSX, H), \
1617 INSN_3(LDX, MEMSX, W), \
1618 /* Immediate based. */ \
1621 bool bpf_opcode_in_insntable(u8 code)
1623 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1624 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1625 static const bool public_insntable[256] = {
1626 [0 ... 255] = false,
1627 /* Now overwrite non-defaults ... */
1628 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1629 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1630 [BPF_LD | BPF_ABS | BPF_B] = true,
1631 [BPF_LD | BPF_ABS | BPF_H] = true,
1632 [BPF_LD | BPF_ABS | BPF_W] = true,
1633 [BPF_LD | BPF_IND | BPF_B] = true,
1634 [BPF_LD | BPF_IND | BPF_H] = true,
1635 [BPF_LD | BPF_IND | BPF_W] = true,
1637 #undef BPF_INSN_3_TBL
1638 #undef BPF_INSN_2_TBL
1639 return public_insntable[code];
1642 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1643 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1645 memset(dst, 0, size);
1650 * ___bpf_prog_run - run eBPF program on a given context
1651 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1652 * @insn: is the array of eBPF instructions
1654 * Decode and execute eBPF instructions.
1656 * Return: whatever value is in %BPF_R0 at program exit
1658 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1660 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1661 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1662 static const void * const jumptable[256] __annotate_jump_table = {
1663 [0 ... 255] = &&default_label,
1664 /* Now overwrite non-defaults ... */
1665 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1666 /* Non-UAPI available opcodes. */
1667 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1668 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1669 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1670 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1671 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1672 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1673 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1674 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1675 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1676 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1678 #undef BPF_INSN_3_LBL
1679 #undef BPF_INSN_2_LBL
1680 u32 tail_call_cnt = 0;
1682 #define CONT ({ insn++; goto select_insn; })
1683 #define CONT_JMP ({ insn++; goto select_insn; })
1686 goto *jumptable[insn->code];
1688 /* Explicitly mask the register-based shift amounts with 63 or 31
1689 * to avoid undefined behavior. Normally this won't affect the
1690 * generated code, for example, in case of native 64 bit archs such
1691 * as x86-64 or arm64, the compiler is optimizing the AND away for
1692 * the interpreter. In case of JITs, each of the JIT backends compiles
1693 * the BPF shift operations to machine instructions which produce
1694 * implementation-defined results in such a case; the resulting
1695 * contents of the register may be arbitrary, but program behaviour
1696 * as a whole remains defined. In other words, in case of JIT backends,
1697 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1700 #define SHT(OPCODE, OP) \
1701 ALU64_##OPCODE##_X: \
1702 DST = DST OP (SRC & 63); \
1705 DST = (u32) DST OP ((u32) SRC & 31); \
1707 ALU64_##OPCODE##_K: \
1711 DST = (u32) DST OP (u32) IMM; \
1714 #define ALU(OPCODE, OP) \
1715 ALU64_##OPCODE##_X: \
1719 DST = (u32) DST OP (u32) SRC; \
1721 ALU64_##OPCODE##_K: \
1725 DST = (u32) DST OP (u32) IMM; \
1749 DST = (u32)(s8) SRC;
1752 DST = (u32)(s16) SRC;
1779 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1783 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1786 DST = (u64) (u32) (((s32) DST) >> IMM);
1789 (*(s64 *) &DST) >>= (SRC & 63);
1792 (*(s64 *) &DST) >>= IMM;
1795 div64_u64_rem(DST, SRC, &AX);
1800 DST = do_div(AX, (u32) SRC);
1803 div64_u64_rem(DST, IMM, &AX);
1808 DST = do_div(AX, (u32) IMM);
1811 DST = div64_u64(DST, SRC);
1815 do_div(AX, (u32) SRC);
1819 DST = div64_u64(DST, IMM);
1823 do_div(AX, (u32) IMM);
1829 DST = (__force u16) cpu_to_be16(DST);
1832 DST = (__force u32) cpu_to_be32(DST);
1835 DST = (__force u64) cpu_to_be64(DST);
1842 DST = (__force u16) cpu_to_le16(DST);
1845 DST = (__force u32) cpu_to_le32(DST);
1848 DST = (__force u64) cpu_to_le64(DST);
1855 DST = (__force u16) __swab16(DST);
1858 DST = (__force u32) __swab32(DST);
1861 DST = (__force u64) __swab64(DST);
1868 /* Function call scratches BPF_R1-BPF_R5 registers,
1869 * preserves BPF_R6-BPF_R9, and stores return value
1872 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1877 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1880 insn + insn->off + 1);
1884 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1885 struct bpf_array *array = container_of(map, struct bpf_array, map);
1886 struct bpf_prog *prog;
1889 if (unlikely(index >= array->map.max_entries))
1892 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1897 prog = READ_ONCE(array->ptrs[index]);
1901 /* ARG1 at this point is guaranteed to point to CTX from
1902 * the verifier side due to the fact that the tail call is
1903 * handled like a helper, that is, bpf_tail_call_proto,
1904 * where arg1_type is ARG_PTR_TO_CTX.
1906 insn = prog->insnsi;
1917 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1919 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1920 insn += insn->off; \
1924 JMP32_##OPCODE##_X: \
1925 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1926 insn += insn->off; \
1931 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1932 insn += insn->off; \
1936 JMP32_##OPCODE##_K: \
1937 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1938 insn += insn->off; \
1942 COND_JMP(u, JEQ, ==)
1943 COND_JMP(u, JNE, !=)
1946 COND_JMP(u, JGE, >=)
1947 COND_JMP(u, JLE, <=)
1948 COND_JMP(u, JSET, &)
1949 COND_JMP(s, JSGT, >)
1950 COND_JMP(s, JSLT, <)
1951 COND_JMP(s, JSGE, >=)
1952 COND_JMP(s, JSLE, <=)
1954 /* ST, STX and LDX*/
1956 /* Speculation barrier for mitigating Speculative Store Bypass.
1957 * In case of arm64, we rely on the firmware mitigation as
1958 * controlled via the ssbd kernel parameter. Whenever the
1959 * mitigation is enabled, it works for all of the kernel code
1960 * with no need to provide any additional instructions here.
1961 * In case of x86, we use 'lfence' insn for mitigation. We
1962 * reuse preexisting logic from Spectre v1 mitigation that
1963 * happens to produce the required code on x86 for v4 as well.
1967 #define LDST(SIZEOP, SIZE) \
1969 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1972 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1975 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1977 LDX_PROBE_MEM_##SIZEOP: \
1978 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1979 (const void *)(long) (SRC + insn->off)); \
1980 DST = *((SIZE *)&DST); \
1989 #define LDSX(SIZEOP, SIZE) \
1990 LDX_MEMSX_##SIZEOP: \
1991 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1993 LDX_PROBE_MEMSX_##SIZEOP: \
1994 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1995 (const void *)(long) (SRC + insn->off)); \
1996 DST = *((SIZE *)&DST); \
2004 #define ATOMIC_ALU_OP(BOP, KOP) \
2006 if (BPF_SIZE(insn->code) == BPF_W) \
2007 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2008 (DST + insn->off)); \
2010 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2011 (DST + insn->off)); \
2013 case BOP | BPF_FETCH: \
2014 if (BPF_SIZE(insn->code) == BPF_W) \
2015 SRC = (u32) atomic_fetch_##KOP( \
2017 (atomic_t *)(unsigned long) (DST + insn->off)); \
2019 SRC = (u64) atomic64_fetch_##KOP( \
2021 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2027 ATOMIC_ALU_OP(BPF_ADD, add)
2028 ATOMIC_ALU_OP(BPF_AND, and)
2029 ATOMIC_ALU_OP(BPF_OR, or)
2030 ATOMIC_ALU_OP(BPF_XOR, xor)
2031 #undef ATOMIC_ALU_OP
2034 if (BPF_SIZE(insn->code) == BPF_W)
2035 SRC = (u32) atomic_xchg(
2036 (atomic_t *)(unsigned long) (DST + insn->off),
2039 SRC = (u64) atomic64_xchg(
2040 (atomic64_t *)(unsigned long) (DST + insn->off),
2044 if (BPF_SIZE(insn->code) == BPF_W)
2045 BPF_R0 = (u32) atomic_cmpxchg(
2046 (atomic_t *)(unsigned long) (DST + insn->off),
2047 (u32) BPF_R0, (u32) SRC);
2049 BPF_R0 = (u64) atomic64_cmpxchg(
2050 (atomic64_t *)(unsigned long) (DST + insn->off),
2051 (u64) BPF_R0, (u64) SRC);
2060 /* If we ever reach this, we have a bug somewhere. Die hard here
2061 * instead of just returning 0; we could be somewhere in a subprog,
2062 * so execution could continue otherwise which we do /not/ want.
2064 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2066 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2067 insn->code, insn->imm);
2072 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2073 #define DEFINE_BPF_PROG_RUN(stack_size) \
2074 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2076 u64 stack[stack_size / sizeof(u64)]; \
2077 u64 regs[MAX_BPF_EXT_REG] = {}; \
2079 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2080 ARG1 = (u64) (unsigned long) ctx; \
2081 return ___bpf_prog_run(regs, insn); \
2084 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2085 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2086 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2087 const struct bpf_insn *insn) \
2089 u64 stack[stack_size / sizeof(u64)]; \
2090 u64 regs[MAX_BPF_EXT_REG]; \
2092 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2098 return ___bpf_prog_run(regs, insn); \
2101 #define EVAL1(FN, X) FN(X)
2102 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2103 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2104 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2105 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2106 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2108 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2109 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2110 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2112 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2113 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2114 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2116 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2118 static unsigned int (*interpreters[])(const void *ctx,
2119 const struct bpf_insn *insn) = {
2120 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2121 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2122 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2124 #undef PROG_NAME_LIST
2125 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2126 static __maybe_unused
2127 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2128 const struct bpf_insn *insn) = {
2129 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2130 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2131 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2133 #undef PROG_NAME_LIST
2135 #ifdef CONFIG_BPF_SYSCALL
2136 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2138 stack_depth = max_t(u32, stack_depth, 1);
2139 insn->off = (s16) insn->imm;
2140 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2141 __bpf_call_base_args;
2142 insn->code = BPF_JMP | BPF_CALL_ARGS;
2146 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2147 const struct bpf_insn *insn)
2149 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2150 * is not working properly, so warn about it!
2157 bool bpf_prog_map_compatible(struct bpf_map *map,
2158 const struct bpf_prog *fp)
2160 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2163 if (fp->kprobe_override)
2166 /* XDP programs inserted into maps are not guaranteed to run on
2167 * a particular netdev (and can run outside driver context entirely
2168 * in the case of devmap and cpumap). Until device checks
2169 * are implemented, prohibit adding dev-bound programs to program maps.
2171 if (bpf_prog_is_dev_bound(fp->aux))
2174 spin_lock(&map->owner.lock);
2175 if (!map->owner.type) {
2176 /* There's no owner yet where we could check for
2179 map->owner.type = prog_type;
2180 map->owner.jited = fp->jited;
2181 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2184 ret = map->owner.type == prog_type &&
2185 map->owner.jited == fp->jited &&
2186 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2188 spin_unlock(&map->owner.lock);
2193 static int bpf_check_tail_call(const struct bpf_prog *fp)
2195 struct bpf_prog_aux *aux = fp->aux;
2198 mutex_lock(&aux->used_maps_mutex);
2199 for (i = 0; i < aux->used_map_cnt; i++) {
2200 struct bpf_map *map = aux->used_maps[i];
2202 if (!map_type_contains_progs(map))
2205 if (!bpf_prog_map_compatible(map, fp)) {
2212 mutex_unlock(&aux->used_maps_mutex);
2216 static void bpf_prog_select_func(struct bpf_prog *fp)
2218 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2219 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2221 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2223 fp->bpf_func = __bpf_prog_ret0_warn;
2228 * bpf_prog_select_runtime - select exec runtime for BPF program
2229 * @fp: bpf_prog populated with BPF program
2230 * @err: pointer to error variable
2232 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2233 * The BPF program will be executed via bpf_prog_run() function.
2235 * Return: the &fp argument along with &err set to 0 for success or
2236 * a negative errno code on failure
2238 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2240 /* In case of BPF to BPF calls, verifier did all the prep
2241 * work with regards to JITing, etc.
2243 bool jit_needed = false;
2248 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2249 bpf_prog_has_kfunc_call(fp))
2252 bpf_prog_select_func(fp);
2254 /* eBPF JITs can rewrite the program in case constant
2255 * blinding is active. However, in case of error during
2256 * blinding, bpf_int_jit_compile() must always return a
2257 * valid program, which in this case would simply not
2258 * be JITed, but falls back to the interpreter.
2260 if (!bpf_prog_is_offloaded(fp->aux)) {
2261 *err = bpf_prog_alloc_jited_linfo(fp);
2265 fp = bpf_int_jit_compile(fp);
2266 bpf_prog_jit_attempt_done(fp);
2267 if (!fp->jited && jit_needed) {
2272 *err = bpf_prog_offload_compile(fp);
2278 bpf_prog_lock_ro(fp);
2280 /* The tail call compatibility check can only be done at
2281 * this late stage as we need to determine, if we deal
2282 * with JITed or non JITed program concatenations and not
2283 * all eBPF JITs might immediately support all features.
2285 *err = bpf_check_tail_call(fp);
2289 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2291 static unsigned int __bpf_prog_ret1(const void *ctx,
2292 const struct bpf_insn *insn)
2297 static struct bpf_prog_dummy {
2298 struct bpf_prog prog;
2299 } dummy_bpf_prog = {
2301 .bpf_func = __bpf_prog_ret1,
2305 struct bpf_empty_prog_array bpf_empty_prog_array = {
2308 EXPORT_SYMBOL(bpf_empty_prog_array);
2310 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2313 return kzalloc(sizeof(struct bpf_prog_array) +
2314 sizeof(struct bpf_prog_array_item) *
2318 return &bpf_empty_prog_array.hdr;
2321 void bpf_prog_array_free(struct bpf_prog_array *progs)
2323 if (!progs || progs == &bpf_empty_prog_array.hdr)
2325 kfree_rcu(progs, rcu);
2328 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2330 struct bpf_prog_array *progs;
2332 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2333 * no need to call kfree_rcu(), just call kfree() directly.
2335 progs = container_of(rcu, struct bpf_prog_array, rcu);
2336 if (rcu_trace_implies_rcu_gp())
2339 kfree_rcu(progs, rcu);
2342 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2344 if (!progs || progs == &bpf_empty_prog_array.hdr)
2346 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2349 int bpf_prog_array_length(struct bpf_prog_array *array)
2351 struct bpf_prog_array_item *item;
2354 for (item = array->items; item->prog; item++)
2355 if (item->prog != &dummy_bpf_prog.prog)
2360 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2362 struct bpf_prog_array_item *item;
2364 for (item = array->items; item->prog; item++)
2365 if (item->prog != &dummy_bpf_prog.prog)
2370 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2374 struct bpf_prog_array_item *item;
2377 for (item = array->items; item->prog; item++) {
2378 if (item->prog == &dummy_bpf_prog.prog)
2380 prog_ids[i] = item->prog->aux->id;
2381 if (++i == request_cnt) {
2387 return !!(item->prog);
2390 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2391 __u32 __user *prog_ids, u32 cnt)
2393 unsigned long err = 0;
2397 /* users of this function are doing:
2398 * cnt = bpf_prog_array_length();
2400 * bpf_prog_array_copy_to_user(..., cnt);
2401 * so below kcalloc doesn't need extra cnt > 0 check.
2403 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2406 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2407 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2416 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2417 struct bpf_prog *old_prog)
2419 struct bpf_prog_array_item *item;
2421 for (item = array->items; item->prog; item++)
2422 if (item->prog == old_prog) {
2423 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2429 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2430 * index into the program array with
2431 * a dummy no-op program.
2432 * @array: a bpf_prog_array
2433 * @index: the index of the program to replace
2435 * Skips over dummy programs, by not counting them, when calculating
2436 * the position of the program to replace.
2440 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2441 * * -ENOENT - Index out of range
2443 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2445 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2449 * bpf_prog_array_update_at() - Updates the program at the given index
2450 * into the program array.
2451 * @array: a bpf_prog_array
2452 * @index: the index of the program to update
2453 * @prog: the program to insert into the array
2455 * Skips over dummy programs, by not counting them, when calculating
2456 * the position of the program to update.
2460 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2461 * * -ENOENT - Index out of range
2463 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2464 struct bpf_prog *prog)
2466 struct bpf_prog_array_item *item;
2468 if (unlikely(index < 0))
2471 for (item = array->items; item->prog; item++) {
2472 if (item->prog == &dummy_bpf_prog.prog)
2475 WRITE_ONCE(item->prog, prog);
2483 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2484 struct bpf_prog *exclude_prog,
2485 struct bpf_prog *include_prog,
2487 struct bpf_prog_array **new_array)
2489 int new_prog_cnt, carry_prog_cnt = 0;
2490 struct bpf_prog_array_item *existing, *new;
2491 struct bpf_prog_array *array;
2492 bool found_exclude = false;
2494 /* Figure out how many existing progs we need to carry over to
2498 existing = old_array->items;
2499 for (; existing->prog; existing++) {
2500 if (existing->prog == exclude_prog) {
2501 found_exclude = true;
2504 if (existing->prog != &dummy_bpf_prog.prog)
2506 if (existing->prog == include_prog)
2511 if (exclude_prog && !found_exclude)
2514 /* How many progs (not NULL) will be in the new array? */
2515 new_prog_cnt = carry_prog_cnt;
2519 /* Do we have any prog (not NULL) in the new array? */
2520 if (!new_prog_cnt) {
2525 /* +1 as the end of prog_array is marked with NULL */
2526 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2531 /* Fill in the new prog array */
2532 if (carry_prog_cnt) {
2533 existing = old_array->items;
2534 for (; existing->prog; existing++) {
2535 if (existing->prog == exclude_prog ||
2536 existing->prog == &dummy_bpf_prog.prog)
2539 new->prog = existing->prog;
2540 new->bpf_cookie = existing->bpf_cookie;
2545 new->prog = include_prog;
2546 new->bpf_cookie = bpf_cookie;
2554 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2555 u32 *prog_ids, u32 request_cnt,
2561 cnt = bpf_prog_array_length(array);
2565 /* return early if user requested only program count or nothing to copy */
2566 if (!request_cnt || !cnt)
2569 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2570 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2574 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2575 struct bpf_map **used_maps, u32 len)
2577 struct bpf_map *map;
2580 for (i = 0; i < len; i++) {
2582 if (map->ops->map_poke_untrack)
2583 map->ops->map_poke_untrack(map, aux);
2588 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2590 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2591 kfree(aux->used_maps);
2594 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2595 struct btf_mod_pair *used_btfs, u32 len)
2597 #ifdef CONFIG_BPF_SYSCALL
2598 struct btf_mod_pair *btf_mod;
2601 for (i = 0; i < len; i++) {
2602 btf_mod = &used_btfs[i];
2603 if (btf_mod->module)
2604 module_put(btf_mod->module);
2605 btf_put(btf_mod->btf);
2610 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2612 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2613 kfree(aux->used_btfs);
2616 static void bpf_prog_free_deferred(struct work_struct *work)
2618 struct bpf_prog_aux *aux;
2621 aux = container_of(work, struct bpf_prog_aux, work);
2622 #ifdef CONFIG_BPF_SYSCALL
2623 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2625 #ifdef CONFIG_CGROUP_BPF
2626 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2627 bpf_cgroup_atype_put(aux->cgroup_atype);
2629 bpf_free_used_maps(aux);
2630 bpf_free_used_btfs(aux);
2631 if (bpf_prog_is_dev_bound(aux))
2632 bpf_prog_dev_bound_destroy(aux->prog);
2633 #ifdef CONFIG_PERF_EVENTS
2634 if (aux->prog->has_callchain_buf)
2635 put_callchain_buffers();
2637 if (aux->dst_trampoline)
2638 bpf_trampoline_put(aux->dst_trampoline);
2639 for (i = 0; i < aux->func_cnt; i++) {
2640 /* We can just unlink the subprog poke descriptor table as
2641 * it was originally linked to the main program and is also
2642 * released along with it.
2644 aux->func[i]->aux->poke_tab = NULL;
2645 bpf_jit_free(aux->func[i]);
2647 if (aux->func_cnt) {
2649 bpf_prog_unlock_free(aux->prog);
2651 bpf_jit_free(aux->prog);
2655 void bpf_prog_free(struct bpf_prog *fp)
2657 struct bpf_prog_aux *aux = fp->aux;
2660 bpf_prog_put(aux->dst_prog);
2661 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2662 schedule_work(&aux->work);
2664 EXPORT_SYMBOL_GPL(bpf_prog_free);
2666 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2667 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2669 void bpf_user_rnd_init_once(void)
2671 prandom_init_once(&bpf_user_rnd_state);
2674 BPF_CALL_0(bpf_user_rnd_u32)
2676 /* Should someone ever have the rather unwise idea to use some
2677 * of the registers passed into this function, then note that
2678 * this function is called from native eBPF and classic-to-eBPF
2679 * transformations. Register assignments from both sides are
2680 * different, f.e. classic always sets fn(ctx, A, X) here.
2682 struct rnd_state *state;
2685 state = &get_cpu_var(bpf_user_rnd_state);
2686 res = prandom_u32_state(state);
2687 put_cpu_var(bpf_user_rnd_state);
2692 BPF_CALL_0(bpf_get_raw_cpu_id)
2694 return raw_smp_processor_id();
2697 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2698 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2699 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2700 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2701 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2702 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2703 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2704 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2705 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2706 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2707 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2709 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2710 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2711 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2712 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2713 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2714 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2715 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2717 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2718 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2719 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2720 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2721 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2722 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2723 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2724 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2725 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2726 const struct bpf_func_proto bpf_set_retval_proto __weak;
2727 const struct bpf_func_proto bpf_get_retval_proto __weak;
2729 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2734 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2740 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2741 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2745 EXPORT_SYMBOL_GPL(bpf_event_output);
2747 /* Always built-in helper functions. */
2748 const struct bpf_func_proto bpf_tail_call_proto = {
2751 .ret_type = RET_VOID,
2752 .arg1_type = ARG_PTR_TO_CTX,
2753 .arg2_type = ARG_CONST_MAP_PTR,
2754 .arg3_type = ARG_ANYTHING,
2757 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2758 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2759 * eBPF and implicitly also cBPF can get JITed!
2761 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2766 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2767 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2769 void __weak bpf_jit_compile(struct bpf_prog *prog)
2773 bool __weak bpf_helper_changes_pkt_data(void *func)
2778 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2779 * analysis code and wants explicit zero extension inserted by verifier.
2780 * Otherwise, return FALSE.
2782 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2783 * you don't override this. JITs that don't want these extra insns can detect
2784 * them using insn_is_zext.
2786 bool __weak bpf_jit_needs_zext(void)
2791 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2792 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2797 bool __weak bpf_jit_supports_kfunc_call(void)
2802 bool __weak bpf_jit_supports_far_kfunc_call(void)
2807 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2808 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2810 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2816 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2817 void *addr1, void *addr2)
2822 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2824 return ERR_PTR(-ENOTSUPP);
2827 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2832 #ifdef CONFIG_BPF_SYSCALL
2833 static int __init bpf_global_ma_init(void)
2837 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2838 bpf_global_ma_set = !ret;
2841 late_initcall(bpf_global_ma_init);
2844 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2845 EXPORT_SYMBOL(bpf_stats_enabled_key);
2847 /* All definitions of tracepoints related to BPF. */
2848 #define CREATE_TRACE_POINTS
2849 #include <linux/bpf_trace.h>
2851 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2852 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);