rcu: Defer RCU kthreads wakeup when CPU is dying
[platform/kernel/linux-starfive.git] / kernel / bpf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <jschlst@samba.org>
13  *      Alexei Starovoitov <ast@plumgrid.com>
14  *      Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
43
44 /* Registers */
45 #define BPF_R0  regs[BPF_REG_0]
46 #define BPF_R1  regs[BPF_REG_1]
47 #define BPF_R2  regs[BPF_REG_2]
48 #define BPF_R3  regs[BPF_REG_3]
49 #define BPF_R4  regs[BPF_REG_4]
50 #define BPF_R5  regs[BPF_REG_5]
51 #define BPF_R6  regs[BPF_REG_6]
52 #define BPF_R7  regs[BPF_REG_7]
53 #define BPF_R8  regs[BPF_REG_8]
54 #define BPF_R9  regs[BPF_REG_9]
55 #define BPF_R10 regs[BPF_REG_10]
56
57 /* Named registers */
58 #define DST     regs[insn->dst_reg]
59 #define SRC     regs[insn->src_reg]
60 #define FP      regs[BPF_REG_FP]
61 #define AX      regs[BPF_REG_AX]
62 #define ARG1    regs[BPF_REG_ARG1]
63 #define CTX     regs[BPF_REG_CTX]
64 #define OFF     insn->off
65 #define IMM     insn->imm
66
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
69
70 /* No hurry in this branch
71  *
72  * Exported for the bpf jit load helper.
73  */
74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
75 {
76         u8 *ptr = NULL;
77
78         if (k >= SKF_NET_OFF) {
79                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80         } else if (k >= SKF_LL_OFF) {
81                 if (unlikely(!skb_mac_header_was_set(skb)))
82                         return NULL;
83                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
84         }
85         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
86                 return ptr;
87
88         return NULL;
89 }
90
91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
92 {
93         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
94         struct bpf_prog_aux *aux;
95         struct bpf_prog *fp;
96
97         size = round_up(size, PAGE_SIZE);
98         fp = __vmalloc(size, gfp_flags);
99         if (fp == NULL)
100                 return NULL;
101
102         aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
103         if (aux == NULL) {
104                 vfree(fp);
105                 return NULL;
106         }
107         fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
108         if (!fp->active) {
109                 vfree(fp);
110                 kfree(aux);
111                 return NULL;
112         }
113
114         fp->pages = size / PAGE_SIZE;
115         fp->aux = aux;
116         fp->aux->prog = fp;
117         fp->jit_requested = ebpf_jit_enabled();
118         fp->blinding_requested = bpf_jit_blinding_enabled(fp);
119 #ifdef CONFIG_CGROUP_BPF
120         aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
121 #endif
122
123         INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
124         mutex_init(&fp->aux->used_maps_mutex);
125         mutex_init(&fp->aux->dst_mutex);
126
127         return fp;
128 }
129
130 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
131 {
132         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
133         struct bpf_prog *prog;
134         int cpu;
135
136         prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
137         if (!prog)
138                 return NULL;
139
140         prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
141         if (!prog->stats) {
142                 free_percpu(prog->active);
143                 kfree(prog->aux);
144                 vfree(prog);
145                 return NULL;
146         }
147
148         for_each_possible_cpu(cpu) {
149                 struct bpf_prog_stats *pstats;
150
151                 pstats = per_cpu_ptr(prog->stats, cpu);
152                 u64_stats_init(&pstats->syncp);
153         }
154         return prog;
155 }
156 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
157
158 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
159 {
160         if (!prog->aux->nr_linfo || !prog->jit_requested)
161                 return 0;
162
163         prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
164                                           sizeof(*prog->aux->jited_linfo),
165                                           bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
166         if (!prog->aux->jited_linfo)
167                 return -ENOMEM;
168
169         return 0;
170 }
171
172 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
173 {
174         if (prog->aux->jited_linfo &&
175             (!prog->jited || !prog->aux->jited_linfo[0])) {
176                 kvfree(prog->aux->jited_linfo);
177                 prog->aux->jited_linfo = NULL;
178         }
179
180         kfree(prog->aux->kfunc_tab);
181         prog->aux->kfunc_tab = NULL;
182 }
183
184 /* The jit engine is responsible to provide an array
185  * for insn_off to the jited_off mapping (insn_to_jit_off).
186  *
187  * The idx to this array is the insn_off.  Hence, the insn_off
188  * here is relative to the prog itself instead of the main prog.
189  * This array has one entry for each xlated bpf insn.
190  *
191  * jited_off is the byte off to the end of the jited insn.
192  *
193  * Hence, with
194  * insn_start:
195  *      The first bpf insn off of the prog.  The insn off
196  *      here is relative to the main prog.
197  *      e.g. if prog is a subprog, insn_start > 0
198  * linfo_idx:
199  *      The prog's idx to prog->aux->linfo and jited_linfo
200  *
201  * jited_linfo[linfo_idx] = prog->bpf_func
202  *
203  * For i > linfo_idx,
204  *
205  * jited_linfo[i] = prog->bpf_func +
206  *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
207  */
208 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
209                                const u32 *insn_to_jit_off)
210 {
211         u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
212         const struct bpf_line_info *linfo;
213         void **jited_linfo;
214
215         if (!prog->aux->jited_linfo)
216                 /* Userspace did not provide linfo */
217                 return;
218
219         linfo_idx = prog->aux->linfo_idx;
220         linfo = &prog->aux->linfo[linfo_idx];
221         insn_start = linfo[0].insn_off;
222         insn_end = insn_start + prog->len;
223
224         jited_linfo = &prog->aux->jited_linfo[linfo_idx];
225         jited_linfo[0] = prog->bpf_func;
226
227         nr_linfo = prog->aux->nr_linfo - linfo_idx;
228
229         for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
230                 /* The verifier ensures that linfo[i].insn_off is
231                  * strictly increasing
232                  */
233                 jited_linfo[i] = prog->bpf_func +
234                         insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
235 }
236
237 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
238                                   gfp_t gfp_extra_flags)
239 {
240         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
241         struct bpf_prog *fp;
242         u32 pages;
243
244         size = round_up(size, PAGE_SIZE);
245         pages = size / PAGE_SIZE;
246         if (pages <= fp_old->pages)
247                 return fp_old;
248
249         fp = __vmalloc(size, gfp_flags);
250         if (fp) {
251                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
252                 fp->pages = pages;
253                 fp->aux->prog = fp;
254
255                 /* We keep fp->aux from fp_old around in the new
256                  * reallocated structure.
257                  */
258                 fp_old->aux = NULL;
259                 fp_old->stats = NULL;
260                 fp_old->active = NULL;
261                 __bpf_prog_free(fp_old);
262         }
263
264         return fp;
265 }
266
267 void __bpf_prog_free(struct bpf_prog *fp)
268 {
269         if (fp->aux) {
270                 mutex_destroy(&fp->aux->used_maps_mutex);
271                 mutex_destroy(&fp->aux->dst_mutex);
272                 kfree(fp->aux->poke_tab);
273                 kfree(fp->aux);
274         }
275         free_percpu(fp->stats);
276         free_percpu(fp->active);
277         vfree(fp);
278 }
279
280 int bpf_prog_calc_tag(struct bpf_prog *fp)
281 {
282         const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
283         u32 raw_size = bpf_prog_tag_scratch_size(fp);
284         u32 digest[SHA1_DIGEST_WORDS];
285         u32 ws[SHA1_WORKSPACE_WORDS];
286         u32 i, bsize, psize, blocks;
287         struct bpf_insn *dst;
288         bool was_ld_map;
289         u8 *raw, *todo;
290         __be32 *result;
291         __be64 *bits;
292
293         raw = vmalloc(raw_size);
294         if (!raw)
295                 return -ENOMEM;
296
297         sha1_init(digest);
298         memset(ws, 0, sizeof(ws));
299
300         /* We need to take out the map fd for the digest calculation
301          * since they are unstable from user space side.
302          */
303         dst = (void *)raw;
304         for (i = 0, was_ld_map = false; i < fp->len; i++) {
305                 dst[i] = fp->insnsi[i];
306                 if (!was_ld_map &&
307                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
308                     (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
309                      dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
310                         was_ld_map = true;
311                         dst[i].imm = 0;
312                 } else if (was_ld_map &&
313                            dst[i].code == 0 &&
314                            dst[i].dst_reg == 0 &&
315                            dst[i].src_reg == 0 &&
316                            dst[i].off == 0) {
317                         was_ld_map = false;
318                         dst[i].imm = 0;
319                 } else {
320                         was_ld_map = false;
321                 }
322         }
323
324         psize = bpf_prog_insn_size(fp);
325         memset(&raw[psize], 0, raw_size - psize);
326         raw[psize++] = 0x80;
327
328         bsize  = round_up(psize, SHA1_BLOCK_SIZE);
329         blocks = bsize / SHA1_BLOCK_SIZE;
330         todo   = raw;
331         if (bsize - psize >= sizeof(__be64)) {
332                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
333         } else {
334                 bits = (__be64 *)(todo + bsize + bits_offset);
335                 blocks++;
336         }
337         *bits = cpu_to_be64((psize - 1) << 3);
338
339         while (blocks--) {
340                 sha1_transform(digest, todo, ws);
341                 todo += SHA1_BLOCK_SIZE;
342         }
343
344         result = (__force __be32 *)digest;
345         for (i = 0; i < SHA1_DIGEST_WORDS; i++)
346                 result[i] = cpu_to_be32(digest[i]);
347         memcpy(fp->tag, result, sizeof(fp->tag));
348
349         vfree(raw);
350         return 0;
351 }
352
353 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
354                                 s32 end_new, s32 curr, const bool probe_pass)
355 {
356         const s64 imm_min = S32_MIN, imm_max = S32_MAX;
357         s32 delta = end_new - end_old;
358         s64 imm = insn->imm;
359
360         if (curr < pos && curr + imm + 1 >= end_old)
361                 imm += delta;
362         else if (curr >= end_new && curr + imm + 1 < end_new)
363                 imm -= delta;
364         if (imm < imm_min || imm > imm_max)
365                 return -ERANGE;
366         if (!probe_pass)
367                 insn->imm = imm;
368         return 0;
369 }
370
371 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
372                                 s32 end_new, s32 curr, const bool probe_pass)
373 {
374         s64 off_min, off_max, off;
375         s32 delta = end_new - end_old;
376
377         if (insn->code == (BPF_JMP32 | BPF_JA)) {
378                 off = insn->imm;
379                 off_min = S32_MIN;
380                 off_max = S32_MAX;
381         } else {
382                 off = insn->off;
383                 off_min = S16_MIN;
384                 off_max = S16_MAX;
385         }
386
387         if (curr < pos && curr + off + 1 >= end_old)
388                 off += delta;
389         else if (curr >= end_new && curr + off + 1 < end_new)
390                 off -= delta;
391         if (off < off_min || off > off_max)
392                 return -ERANGE;
393         if (!probe_pass) {
394                 if (insn->code == (BPF_JMP32 | BPF_JA))
395                         insn->imm = off;
396                 else
397                         insn->off = off;
398         }
399         return 0;
400 }
401
402 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
403                             s32 end_new, const bool probe_pass)
404 {
405         u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
406         struct bpf_insn *insn = prog->insnsi;
407         int ret = 0;
408
409         for (i = 0; i < insn_cnt; i++, insn++) {
410                 u8 code;
411
412                 /* In the probing pass we still operate on the original,
413                  * unpatched image in order to check overflows before we
414                  * do any other adjustments. Therefore skip the patchlet.
415                  */
416                 if (probe_pass && i == pos) {
417                         i = end_new;
418                         insn = prog->insnsi + end_old;
419                 }
420                 if (bpf_pseudo_func(insn)) {
421                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
422                                                    end_new, i, probe_pass);
423                         if (ret)
424                                 return ret;
425                         continue;
426                 }
427                 code = insn->code;
428                 if ((BPF_CLASS(code) != BPF_JMP &&
429                      BPF_CLASS(code) != BPF_JMP32) ||
430                     BPF_OP(code) == BPF_EXIT)
431                         continue;
432                 /* Adjust offset of jmps if we cross patch boundaries. */
433                 if (BPF_OP(code) == BPF_CALL) {
434                         if (insn->src_reg != BPF_PSEUDO_CALL)
435                                 continue;
436                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
437                                                    end_new, i, probe_pass);
438                 } else {
439                         ret = bpf_adj_delta_to_off(insn, pos, end_old,
440                                                    end_new, i, probe_pass);
441                 }
442                 if (ret)
443                         break;
444         }
445
446         return ret;
447 }
448
449 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
450 {
451         struct bpf_line_info *linfo;
452         u32 i, nr_linfo;
453
454         nr_linfo = prog->aux->nr_linfo;
455         if (!nr_linfo || !delta)
456                 return;
457
458         linfo = prog->aux->linfo;
459
460         for (i = 0; i < nr_linfo; i++)
461                 if (off < linfo[i].insn_off)
462                         break;
463
464         /* Push all off < linfo[i].insn_off by delta */
465         for (; i < nr_linfo; i++)
466                 linfo[i].insn_off += delta;
467 }
468
469 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
470                                        const struct bpf_insn *patch, u32 len)
471 {
472         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
473         const u32 cnt_max = S16_MAX;
474         struct bpf_prog *prog_adj;
475         int err;
476
477         /* Since our patchlet doesn't expand the image, we're done. */
478         if (insn_delta == 0) {
479                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
480                 return prog;
481         }
482
483         insn_adj_cnt = prog->len + insn_delta;
484
485         /* Reject anything that would potentially let the insn->off
486          * target overflow when we have excessive program expansions.
487          * We need to probe here before we do any reallocation where
488          * we afterwards may not fail anymore.
489          */
490         if (insn_adj_cnt > cnt_max &&
491             (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
492                 return ERR_PTR(err);
493
494         /* Several new instructions need to be inserted. Make room
495          * for them. Likely, there's no need for a new allocation as
496          * last page could have large enough tailroom.
497          */
498         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
499                                     GFP_USER);
500         if (!prog_adj)
501                 return ERR_PTR(-ENOMEM);
502
503         prog_adj->len = insn_adj_cnt;
504
505         /* Patching happens in 3 steps:
506          *
507          * 1) Move over tail of insnsi from next instruction onwards,
508          *    so we can patch the single target insn with one or more
509          *    new ones (patching is always from 1 to n insns, n > 0).
510          * 2) Inject new instructions at the target location.
511          * 3) Adjust branch offsets if necessary.
512          */
513         insn_rest = insn_adj_cnt - off - len;
514
515         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
516                 sizeof(*patch) * insn_rest);
517         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
518
519         /* We are guaranteed to not fail at this point, otherwise
520          * the ship has sailed to reverse to the original state. An
521          * overflow cannot happen at this point.
522          */
523         BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
524
525         bpf_adj_linfo(prog_adj, off, insn_delta);
526
527         return prog_adj;
528 }
529
530 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
531 {
532         /* Branch offsets can't overflow when program is shrinking, no need
533          * to call bpf_adj_branches(..., true) here
534          */
535         memmove(prog->insnsi + off, prog->insnsi + off + cnt,
536                 sizeof(struct bpf_insn) * (prog->len - off - cnt));
537         prog->len -= cnt;
538
539         return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
540 }
541
542 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
543 {
544         int i;
545
546         for (i = 0; i < fp->aux->func_cnt; i++)
547                 bpf_prog_kallsyms_del(fp->aux->func[i]);
548 }
549
550 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
551 {
552         bpf_prog_kallsyms_del_subprogs(fp);
553         bpf_prog_kallsyms_del(fp);
554 }
555
556 #ifdef CONFIG_BPF_JIT
557 /* All BPF JIT sysctl knobs here. */
558 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
559 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
560 int bpf_jit_harden   __read_mostly;
561 long bpf_jit_limit   __read_mostly;
562 long bpf_jit_limit_max __read_mostly;
563
564 static void
565 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
566 {
567         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
568
569         prog->aux->ksym.start = (unsigned long) prog->bpf_func;
570         prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
571 }
572
573 static void
574 bpf_prog_ksym_set_name(struct bpf_prog *prog)
575 {
576         char *sym = prog->aux->ksym.name;
577         const char *end = sym + KSYM_NAME_LEN;
578         const struct btf_type *type;
579         const char *func_name;
580
581         BUILD_BUG_ON(sizeof("bpf_prog_") +
582                      sizeof(prog->tag) * 2 +
583                      /* name has been null terminated.
584                       * We should need +1 for the '_' preceding
585                       * the name.  However, the null character
586                       * is double counted between the name and the
587                       * sizeof("bpf_prog_") above, so we omit
588                       * the +1 here.
589                       */
590                      sizeof(prog->aux->name) > KSYM_NAME_LEN);
591
592         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
593         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
594
595         /* prog->aux->name will be ignored if full btf name is available */
596         if (prog->aux->func_info_cnt) {
597                 type = btf_type_by_id(prog->aux->btf,
598                                       prog->aux->func_info[prog->aux->func_idx].type_id);
599                 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
600                 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
601                 return;
602         }
603
604         if (prog->aux->name[0])
605                 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
606         else
607                 *sym = 0;
608 }
609
610 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
611 {
612         return container_of(n, struct bpf_ksym, tnode)->start;
613 }
614
615 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
616                                           struct latch_tree_node *b)
617 {
618         return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
619 }
620
621 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
622 {
623         unsigned long val = (unsigned long)key;
624         const struct bpf_ksym *ksym;
625
626         ksym = container_of(n, struct bpf_ksym, tnode);
627
628         if (val < ksym->start)
629                 return -1;
630         /* Ensure that we detect return addresses as part of the program, when
631          * the final instruction is a call for a program part of the stack
632          * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
633          */
634         if (val > ksym->end)
635                 return  1;
636
637         return 0;
638 }
639
640 static const struct latch_tree_ops bpf_tree_ops = {
641         .less   = bpf_tree_less,
642         .comp   = bpf_tree_comp,
643 };
644
645 static DEFINE_SPINLOCK(bpf_lock);
646 static LIST_HEAD(bpf_kallsyms);
647 static struct latch_tree_root bpf_tree __cacheline_aligned;
648
649 void bpf_ksym_add(struct bpf_ksym *ksym)
650 {
651         spin_lock_bh(&bpf_lock);
652         WARN_ON_ONCE(!list_empty(&ksym->lnode));
653         list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
654         latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
655         spin_unlock_bh(&bpf_lock);
656 }
657
658 static void __bpf_ksym_del(struct bpf_ksym *ksym)
659 {
660         if (list_empty(&ksym->lnode))
661                 return;
662
663         latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
664         list_del_rcu(&ksym->lnode);
665 }
666
667 void bpf_ksym_del(struct bpf_ksym *ksym)
668 {
669         spin_lock_bh(&bpf_lock);
670         __bpf_ksym_del(ksym);
671         spin_unlock_bh(&bpf_lock);
672 }
673
674 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
675 {
676         return fp->jited && !bpf_prog_was_classic(fp);
677 }
678
679 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
680 {
681         if (!bpf_prog_kallsyms_candidate(fp) ||
682             !bpf_capable())
683                 return;
684
685         bpf_prog_ksym_set_addr(fp);
686         bpf_prog_ksym_set_name(fp);
687         fp->aux->ksym.prog = true;
688
689         bpf_ksym_add(&fp->aux->ksym);
690 }
691
692 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
693 {
694         if (!bpf_prog_kallsyms_candidate(fp))
695                 return;
696
697         bpf_ksym_del(&fp->aux->ksym);
698 }
699
700 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
701 {
702         struct latch_tree_node *n;
703
704         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
705         return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
706 }
707
708 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
709                                  unsigned long *off, char *sym)
710 {
711         struct bpf_ksym *ksym;
712         char *ret = NULL;
713
714         rcu_read_lock();
715         ksym = bpf_ksym_find(addr);
716         if (ksym) {
717                 unsigned long symbol_start = ksym->start;
718                 unsigned long symbol_end = ksym->end;
719
720                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
721
722                 ret = sym;
723                 if (size)
724                         *size = symbol_end - symbol_start;
725                 if (off)
726                         *off  = addr - symbol_start;
727         }
728         rcu_read_unlock();
729
730         return ret;
731 }
732
733 bool is_bpf_text_address(unsigned long addr)
734 {
735         bool ret;
736
737         rcu_read_lock();
738         ret = bpf_ksym_find(addr) != NULL;
739         rcu_read_unlock();
740
741         return ret;
742 }
743
744 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
745 {
746         struct bpf_ksym *ksym = bpf_ksym_find(addr);
747
748         return ksym && ksym->prog ?
749                container_of(ksym, struct bpf_prog_aux, ksym)->prog :
750                NULL;
751 }
752
753 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
754 {
755         const struct exception_table_entry *e = NULL;
756         struct bpf_prog *prog;
757
758         rcu_read_lock();
759         prog = bpf_prog_ksym_find(addr);
760         if (!prog)
761                 goto out;
762         if (!prog->aux->num_exentries)
763                 goto out;
764
765         e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
766 out:
767         rcu_read_unlock();
768         return e;
769 }
770
771 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
772                     char *sym)
773 {
774         struct bpf_ksym *ksym;
775         unsigned int it = 0;
776         int ret = -ERANGE;
777
778         if (!bpf_jit_kallsyms_enabled())
779                 return ret;
780
781         rcu_read_lock();
782         list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
783                 if (it++ != symnum)
784                         continue;
785
786                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
787
788                 *value = ksym->start;
789                 *type  = BPF_SYM_ELF_TYPE;
790
791                 ret = 0;
792                 break;
793         }
794         rcu_read_unlock();
795
796         return ret;
797 }
798
799 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
800                                 struct bpf_jit_poke_descriptor *poke)
801 {
802         struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
803         static const u32 poke_tab_max = 1024;
804         u32 slot = prog->aux->size_poke_tab;
805         u32 size = slot + 1;
806
807         if (size > poke_tab_max)
808                 return -ENOSPC;
809         if (poke->tailcall_target || poke->tailcall_target_stable ||
810             poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
811                 return -EINVAL;
812
813         switch (poke->reason) {
814         case BPF_POKE_REASON_TAIL_CALL:
815                 if (!poke->tail_call.map)
816                         return -EINVAL;
817                 break;
818         default:
819                 return -EINVAL;
820         }
821
822         tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
823         if (!tab)
824                 return -ENOMEM;
825
826         memcpy(&tab[slot], poke, sizeof(*poke));
827         prog->aux->size_poke_tab = size;
828         prog->aux->poke_tab = tab;
829
830         return slot;
831 }
832
833 /*
834  * BPF program pack allocator.
835  *
836  * Most BPF programs are pretty small. Allocating a hole page for each
837  * program is sometime a waste. Many small bpf program also adds pressure
838  * to instruction TLB. To solve this issue, we introduce a BPF program pack
839  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
840  * to host BPF programs.
841  */
842 #define BPF_PROG_CHUNK_SHIFT    6
843 #define BPF_PROG_CHUNK_SIZE     (1 << BPF_PROG_CHUNK_SHIFT)
844 #define BPF_PROG_CHUNK_MASK     (~(BPF_PROG_CHUNK_SIZE - 1))
845
846 struct bpf_prog_pack {
847         struct list_head list;
848         void *ptr;
849         unsigned long bitmap[];
850 };
851
852 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
853 {
854         memset(area, 0, size);
855 }
856
857 #define BPF_PROG_SIZE_TO_NBITS(size)    (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
858
859 static DEFINE_MUTEX(pack_mutex);
860 static LIST_HEAD(pack_list);
861
862 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
863  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
864  */
865 #ifdef PMD_SIZE
866 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
867 #else
868 #define BPF_PROG_PACK_SIZE PAGE_SIZE
869 #endif
870
871 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
872
873 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
874 {
875         struct bpf_prog_pack *pack;
876
877         pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
878                        GFP_KERNEL);
879         if (!pack)
880                 return NULL;
881         pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
882         if (!pack->ptr) {
883                 kfree(pack);
884                 return NULL;
885         }
886         bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
887         bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
888         list_add_tail(&pack->list, &pack_list);
889
890         set_vm_flush_reset_perms(pack->ptr);
891         set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
892         return pack;
893 }
894
895 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
896 {
897         unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
898         struct bpf_prog_pack *pack;
899         unsigned long pos;
900         void *ptr = NULL;
901
902         mutex_lock(&pack_mutex);
903         if (size > BPF_PROG_PACK_SIZE) {
904                 size = round_up(size, PAGE_SIZE);
905                 ptr = bpf_jit_alloc_exec(size);
906                 if (ptr) {
907                         bpf_fill_ill_insns(ptr, size);
908                         set_vm_flush_reset_perms(ptr);
909                         set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
910                 }
911                 goto out;
912         }
913         list_for_each_entry(pack, &pack_list, list) {
914                 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
915                                                  nbits, 0);
916                 if (pos < BPF_PROG_CHUNK_COUNT)
917                         goto found_free_area;
918         }
919
920         pack = alloc_new_pack(bpf_fill_ill_insns);
921         if (!pack)
922                 goto out;
923
924         pos = 0;
925
926 found_free_area:
927         bitmap_set(pack->bitmap, pos, nbits);
928         ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
929
930 out:
931         mutex_unlock(&pack_mutex);
932         return ptr;
933 }
934
935 void bpf_prog_pack_free(struct bpf_binary_header *hdr)
936 {
937         struct bpf_prog_pack *pack = NULL, *tmp;
938         unsigned int nbits;
939         unsigned long pos;
940
941         mutex_lock(&pack_mutex);
942         if (hdr->size > BPF_PROG_PACK_SIZE) {
943                 bpf_jit_free_exec(hdr);
944                 goto out;
945         }
946
947         list_for_each_entry(tmp, &pack_list, list) {
948                 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
949                         pack = tmp;
950                         break;
951                 }
952         }
953
954         if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
955                 goto out;
956
957         nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
958         pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
959
960         WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
961                   "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
962
963         bitmap_clear(pack->bitmap, pos, nbits);
964         if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
965                                        BPF_PROG_CHUNK_COUNT, 0) == 0) {
966                 list_del(&pack->list);
967                 bpf_jit_free_exec(pack->ptr);
968                 kfree(pack);
969         }
970 out:
971         mutex_unlock(&pack_mutex);
972 }
973
974 static atomic_long_t bpf_jit_current;
975
976 /* Can be overridden by an arch's JIT compiler if it has a custom,
977  * dedicated BPF backend memory area, or if neither of the two
978  * below apply.
979  */
980 u64 __weak bpf_jit_alloc_exec_limit(void)
981 {
982 #if defined(MODULES_VADDR)
983         return MODULES_END - MODULES_VADDR;
984 #else
985         return VMALLOC_END - VMALLOC_START;
986 #endif
987 }
988
989 static int __init bpf_jit_charge_init(void)
990 {
991         /* Only used as heuristic here to derive limit. */
992         bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
993         bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
994                                             PAGE_SIZE), LONG_MAX);
995         return 0;
996 }
997 pure_initcall(bpf_jit_charge_init);
998
999 int bpf_jit_charge_modmem(u32 size)
1000 {
1001         if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1002                 if (!bpf_capable()) {
1003                         atomic_long_sub(size, &bpf_jit_current);
1004                         return -EPERM;
1005                 }
1006         }
1007
1008         return 0;
1009 }
1010
1011 void bpf_jit_uncharge_modmem(u32 size)
1012 {
1013         atomic_long_sub(size, &bpf_jit_current);
1014 }
1015
1016 void *__weak bpf_jit_alloc_exec(unsigned long size)
1017 {
1018         return module_alloc(size);
1019 }
1020
1021 void __weak bpf_jit_free_exec(void *addr)
1022 {
1023         module_memfree(addr);
1024 }
1025
1026 struct bpf_binary_header *
1027 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1028                      unsigned int alignment,
1029                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
1030 {
1031         struct bpf_binary_header *hdr;
1032         u32 size, hole, start;
1033
1034         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1035                      alignment > BPF_IMAGE_ALIGNMENT);
1036
1037         /* Most of BPF filters are really small, but if some of them
1038          * fill a page, allow at least 128 extra bytes to insert a
1039          * random section of illegal instructions.
1040          */
1041         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1042
1043         if (bpf_jit_charge_modmem(size))
1044                 return NULL;
1045         hdr = bpf_jit_alloc_exec(size);
1046         if (!hdr) {
1047                 bpf_jit_uncharge_modmem(size);
1048                 return NULL;
1049         }
1050
1051         /* Fill space with illegal/arch-dep instructions. */
1052         bpf_fill_ill_insns(hdr, size);
1053
1054         hdr->size = size;
1055         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1056                      PAGE_SIZE - sizeof(*hdr));
1057         start = get_random_u32_below(hole) & ~(alignment - 1);
1058
1059         /* Leave a random number of instructions before BPF code. */
1060         *image_ptr = &hdr->image[start];
1061
1062         return hdr;
1063 }
1064
1065 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1066 {
1067         u32 size = hdr->size;
1068
1069         bpf_jit_free_exec(hdr);
1070         bpf_jit_uncharge_modmem(size);
1071 }
1072
1073 /* Allocate jit binary from bpf_prog_pack allocator.
1074  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1075  * to the memory. To solve this problem, a RW buffer is also allocated at
1076  * as the same time. The JIT engine should calculate offsets based on the
1077  * RO memory address, but write JITed program to the RW buffer. Once the
1078  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1079  * the JITed program to the RO memory.
1080  */
1081 struct bpf_binary_header *
1082 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1083                           unsigned int alignment,
1084                           struct bpf_binary_header **rw_header,
1085                           u8 **rw_image,
1086                           bpf_jit_fill_hole_t bpf_fill_ill_insns)
1087 {
1088         struct bpf_binary_header *ro_header;
1089         u32 size, hole, start;
1090
1091         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1092                      alignment > BPF_IMAGE_ALIGNMENT);
1093
1094         /* add 16 bytes for a random section of illegal instructions */
1095         size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1096
1097         if (bpf_jit_charge_modmem(size))
1098                 return NULL;
1099         ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1100         if (!ro_header) {
1101                 bpf_jit_uncharge_modmem(size);
1102                 return NULL;
1103         }
1104
1105         *rw_header = kvmalloc(size, GFP_KERNEL);
1106         if (!*rw_header) {
1107                 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1108                 bpf_prog_pack_free(ro_header);
1109                 bpf_jit_uncharge_modmem(size);
1110                 return NULL;
1111         }
1112
1113         /* Fill space with illegal/arch-dep instructions. */
1114         bpf_fill_ill_insns(*rw_header, size);
1115         (*rw_header)->size = size;
1116
1117         hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1118                      BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1119         start = get_random_u32_below(hole) & ~(alignment - 1);
1120
1121         *image_ptr = &ro_header->image[start];
1122         *rw_image = &(*rw_header)->image[start];
1123
1124         return ro_header;
1125 }
1126
1127 /* Copy JITed text from rw_header to its final location, the ro_header. */
1128 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1129                                  struct bpf_binary_header *ro_header,
1130                                  struct bpf_binary_header *rw_header)
1131 {
1132         void *ptr;
1133
1134         ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1135
1136         kvfree(rw_header);
1137
1138         if (IS_ERR(ptr)) {
1139                 bpf_prog_pack_free(ro_header);
1140                 return PTR_ERR(ptr);
1141         }
1142         return 0;
1143 }
1144
1145 /* bpf_jit_binary_pack_free is called in two different scenarios:
1146  *   1) when the program is freed after;
1147  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1148  * For case 2), we need to free both the RO memory and the RW buffer.
1149  *
1150  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1151  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1152  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1153  * bpf_arch_text_copy (when jit fails).
1154  */
1155 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1156                               struct bpf_binary_header *rw_header)
1157 {
1158         u32 size = ro_header->size;
1159
1160         bpf_prog_pack_free(ro_header);
1161         kvfree(rw_header);
1162         bpf_jit_uncharge_modmem(size);
1163 }
1164
1165 struct bpf_binary_header *
1166 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1167 {
1168         unsigned long real_start = (unsigned long)fp->bpf_func;
1169         unsigned long addr;
1170
1171         addr = real_start & BPF_PROG_CHUNK_MASK;
1172         return (void *)addr;
1173 }
1174
1175 static inline struct bpf_binary_header *
1176 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1177 {
1178         unsigned long real_start = (unsigned long)fp->bpf_func;
1179         unsigned long addr;
1180
1181         addr = real_start & PAGE_MASK;
1182         return (void *)addr;
1183 }
1184
1185 /* This symbol is only overridden by archs that have different
1186  * requirements than the usual eBPF JITs, f.e. when they only
1187  * implement cBPF JIT, do not set images read-only, etc.
1188  */
1189 void __weak bpf_jit_free(struct bpf_prog *fp)
1190 {
1191         if (fp->jited) {
1192                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1193
1194                 bpf_jit_binary_free(hdr);
1195                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1196         }
1197
1198         bpf_prog_unlock_free(fp);
1199 }
1200
1201 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1202                           const struct bpf_insn *insn, bool extra_pass,
1203                           u64 *func_addr, bool *func_addr_fixed)
1204 {
1205         s16 off = insn->off;
1206         s32 imm = insn->imm;
1207         u8 *addr;
1208         int err;
1209
1210         *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1211         if (!*func_addr_fixed) {
1212                 /* Place-holder address till the last pass has collected
1213                  * all addresses for JITed subprograms in which case we
1214                  * can pick them up from prog->aux.
1215                  */
1216                 if (!extra_pass)
1217                         addr = NULL;
1218                 else if (prog->aux->func &&
1219                          off >= 0 && off < prog->aux->func_cnt)
1220                         addr = (u8 *)prog->aux->func[off]->bpf_func;
1221                 else
1222                         return -EINVAL;
1223         } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1224                    bpf_jit_supports_far_kfunc_call()) {
1225                 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1226                 if (err)
1227                         return err;
1228         } else {
1229                 /* Address of a BPF helper call. Since part of the core
1230                  * kernel, it's always at a fixed location. __bpf_call_base
1231                  * and the helper with imm relative to it are both in core
1232                  * kernel.
1233                  */
1234                 addr = (u8 *)__bpf_call_base + imm;
1235         }
1236
1237         *func_addr = (unsigned long)addr;
1238         return 0;
1239 }
1240
1241 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1242                               const struct bpf_insn *aux,
1243                               struct bpf_insn *to_buff,
1244                               bool emit_zext)
1245 {
1246         struct bpf_insn *to = to_buff;
1247         u32 imm_rnd = get_random_u32();
1248         s16 off;
1249
1250         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1251         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1252
1253         /* Constraints on AX register:
1254          *
1255          * AX register is inaccessible from user space. It is mapped in
1256          * all JITs, and used here for constant blinding rewrites. It is
1257          * typically "stateless" meaning its contents are only valid within
1258          * the executed instruction, but not across several instructions.
1259          * There are a few exceptions however which are further detailed
1260          * below.
1261          *
1262          * Constant blinding is only used by JITs, not in the interpreter.
1263          * The interpreter uses AX in some occasions as a local temporary
1264          * register e.g. in DIV or MOD instructions.
1265          *
1266          * In restricted circumstances, the verifier can also use the AX
1267          * register for rewrites as long as they do not interfere with
1268          * the above cases!
1269          */
1270         if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1271                 goto out;
1272
1273         if (from->imm == 0 &&
1274             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1275              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1276                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1277                 goto out;
1278         }
1279
1280         switch (from->code) {
1281         case BPF_ALU | BPF_ADD | BPF_K:
1282         case BPF_ALU | BPF_SUB | BPF_K:
1283         case BPF_ALU | BPF_AND | BPF_K:
1284         case BPF_ALU | BPF_OR  | BPF_K:
1285         case BPF_ALU | BPF_XOR | BPF_K:
1286         case BPF_ALU | BPF_MUL | BPF_K:
1287         case BPF_ALU | BPF_MOV | BPF_K:
1288         case BPF_ALU | BPF_DIV | BPF_K:
1289         case BPF_ALU | BPF_MOD | BPF_K:
1290                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1291                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1292                 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1293                 break;
1294
1295         case BPF_ALU64 | BPF_ADD | BPF_K:
1296         case BPF_ALU64 | BPF_SUB | BPF_K:
1297         case BPF_ALU64 | BPF_AND | BPF_K:
1298         case BPF_ALU64 | BPF_OR  | BPF_K:
1299         case BPF_ALU64 | BPF_XOR | BPF_K:
1300         case BPF_ALU64 | BPF_MUL | BPF_K:
1301         case BPF_ALU64 | BPF_MOV | BPF_K:
1302         case BPF_ALU64 | BPF_DIV | BPF_K:
1303         case BPF_ALU64 | BPF_MOD | BPF_K:
1304                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1305                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1306                 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1307                 break;
1308
1309         case BPF_JMP | BPF_JEQ  | BPF_K:
1310         case BPF_JMP | BPF_JNE  | BPF_K:
1311         case BPF_JMP | BPF_JGT  | BPF_K:
1312         case BPF_JMP | BPF_JLT  | BPF_K:
1313         case BPF_JMP | BPF_JGE  | BPF_K:
1314         case BPF_JMP | BPF_JLE  | BPF_K:
1315         case BPF_JMP | BPF_JSGT | BPF_K:
1316         case BPF_JMP | BPF_JSLT | BPF_K:
1317         case BPF_JMP | BPF_JSGE | BPF_K:
1318         case BPF_JMP | BPF_JSLE | BPF_K:
1319         case BPF_JMP | BPF_JSET | BPF_K:
1320                 /* Accommodate for extra offset in case of a backjump. */
1321                 off = from->off;
1322                 if (off < 0)
1323                         off -= 2;
1324                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1325                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1326                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1327                 break;
1328
1329         case BPF_JMP32 | BPF_JEQ  | BPF_K:
1330         case BPF_JMP32 | BPF_JNE  | BPF_K:
1331         case BPF_JMP32 | BPF_JGT  | BPF_K:
1332         case BPF_JMP32 | BPF_JLT  | BPF_K:
1333         case BPF_JMP32 | BPF_JGE  | BPF_K:
1334         case BPF_JMP32 | BPF_JLE  | BPF_K:
1335         case BPF_JMP32 | BPF_JSGT | BPF_K:
1336         case BPF_JMP32 | BPF_JSLT | BPF_K:
1337         case BPF_JMP32 | BPF_JSGE | BPF_K:
1338         case BPF_JMP32 | BPF_JSLE | BPF_K:
1339         case BPF_JMP32 | BPF_JSET | BPF_K:
1340                 /* Accommodate for extra offset in case of a backjump. */
1341                 off = from->off;
1342                 if (off < 0)
1343                         off -= 2;
1344                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1345                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1346                 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1347                                       off);
1348                 break;
1349
1350         case BPF_LD | BPF_IMM | BPF_DW:
1351                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1352                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1353                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1354                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1355                 break;
1356         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1357                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1358                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1359                 if (emit_zext)
1360                         *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1361                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1362                 break;
1363
1364         case BPF_ST | BPF_MEM | BPF_DW:
1365         case BPF_ST | BPF_MEM | BPF_W:
1366         case BPF_ST | BPF_MEM | BPF_H:
1367         case BPF_ST | BPF_MEM | BPF_B:
1368                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1369                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1370                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1371                 break;
1372         }
1373 out:
1374         return to - to_buff;
1375 }
1376
1377 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1378                                               gfp_t gfp_extra_flags)
1379 {
1380         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1381         struct bpf_prog *fp;
1382
1383         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1384         if (fp != NULL) {
1385                 /* aux->prog still points to the fp_other one, so
1386                  * when promoting the clone to the real program,
1387                  * this still needs to be adapted.
1388                  */
1389                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1390         }
1391
1392         return fp;
1393 }
1394
1395 static void bpf_prog_clone_free(struct bpf_prog *fp)
1396 {
1397         /* aux was stolen by the other clone, so we cannot free
1398          * it from this path! It will be freed eventually by the
1399          * other program on release.
1400          *
1401          * At this point, we don't need a deferred release since
1402          * clone is guaranteed to not be locked.
1403          */
1404         fp->aux = NULL;
1405         fp->stats = NULL;
1406         fp->active = NULL;
1407         __bpf_prog_free(fp);
1408 }
1409
1410 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1411 {
1412         /* We have to repoint aux->prog to self, as we don't
1413          * know whether fp here is the clone or the original.
1414          */
1415         fp->aux->prog = fp;
1416         bpf_prog_clone_free(fp_other);
1417 }
1418
1419 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1420 {
1421         struct bpf_insn insn_buff[16], aux[2];
1422         struct bpf_prog *clone, *tmp;
1423         int insn_delta, insn_cnt;
1424         struct bpf_insn *insn;
1425         int i, rewritten;
1426
1427         if (!prog->blinding_requested || prog->blinded)
1428                 return prog;
1429
1430         clone = bpf_prog_clone_create(prog, GFP_USER);
1431         if (!clone)
1432                 return ERR_PTR(-ENOMEM);
1433
1434         insn_cnt = clone->len;
1435         insn = clone->insnsi;
1436
1437         for (i = 0; i < insn_cnt; i++, insn++) {
1438                 if (bpf_pseudo_func(insn)) {
1439                         /* ld_imm64 with an address of bpf subprog is not
1440                          * a user controlled constant. Don't randomize it,
1441                          * since it will conflict with jit_subprogs() logic.
1442                          */
1443                         insn++;
1444                         i++;
1445                         continue;
1446                 }
1447
1448                 /* We temporarily need to hold the original ld64 insn
1449                  * so that we can still access the first part in the
1450                  * second blinding run.
1451                  */
1452                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1453                     insn[1].code == 0)
1454                         memcpy(aux, insn, sizeof(aux));
1455
1456                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1457                                                 clone->aux->verifier_zext);
1458                 if (!rewritten)
1459                         continue;
1460
1461                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1462                 if (IS_ERR(tmp)) {
1463                         /* Patching may have repointed aux->prog during
1464                          * realloc from the original one, so we need to
1465                          * fix it up here on error.
1466                          */
1467                         bpf_jit_prog_release_other(prog, clone);
1468                         return tmp;
1469                 }
1470
1471                 clone = tmp;
1472                 insn_delta = rewritten - 1;
1473
1474                 /* Walk new program and skip insns we just inserted. */
1475                 insn = clone->insnsi + i + insn_delta;
1476                 insn_cnt += insn_delta;
1477                 i        += insn_delta;
1478         }
1479
1480         clone->blinded = 1;
1481         return clone;
1482 }
1483 #endif /* CONFIG_BPF_JIT */
1484
1485 /* Base function for offset calculation. Needs to go into .text section,
1486  * therefore keeping it non-static as well; will also be used by JITs
1487  * anyway later on, so do not let the compiler omit it. This also needs
1488  * to go into kallsyms for correlation from e.g. bpftool, so naming
1489  * must not change.
1490  */
1491 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1492 {
1493         return 0;
1494 }
1495 EXPORT_SYMBOL_GPL(__bpf_call_base);
1496
1497 /* All UAPI available opcodes. */
1498 #define BPF_INSN_MAP(INSN_2, INSN_3)            \
1499         /* 32 bit ALU operations. */            \
1500         /*   Register based. */                 \
1501         INSN_3(ALU, ADD,  X),                   \
1502         INSN_3(ALU, SUB,  X),                   \
1503         INSN_3(ALU, AND,  X),                   \
1504         INSN_3(ALU, OR,   X),                   \
1505         INSN_3(ALU, LSH,  X),                   \
1506         INSN_3(ALU, RSH,  X),                   \
1507         INSN_3(ALU, XOR,  X),                   \
1508         INSN_3(ALU, MUL,  X),                   \
1509         INSN_3(ALU, MOV,  X),                   \
1510         INSN_3(ALU, ARSH, X),                   \
1511         INSN_3(ALU, DIV,  X),                   \
1512         INSN_3(ALU, MOD,  X),                   \
1513         INSN_2(ALU, NEG),                       \
1514         INSN_3(ALU, END, TO_BE),                \
1515         INSN_3(ALU, END, TO_LE),                \
1516         /*   Immediate based. */                \
1517         INSN_3(ALU, ADD,  K),                   \
1518         INSN_3(ALU, SUB,  K),                   \
1519         INSN_3(ALU, AND,  K),                   \
1520         INSN_3(ALU, OR,   K),                   \
1521         INSN_3(ALU, LSH,  K),                   \
1522         INSN_3(ALU, RSH,  K),                   \
1523         INSN_3(ALU, XOR,  K),                   \
1524         INSN_3(ALU, MUL,  K),                   \
1525         INSN_3(ALU, MOV,  K),                   \
1526         INSN_3(ALU, ARSH, K),                   \
1527         INSN_3(ALU, DIV,  K),                   \
1528         INSN_3(ALU, MOD,  K),                   \
1529         /* 64 bit ALU operations. */            \
1530         /*   Register based. */                 \
1531         INSN_3(ALU64, ADD,  X),                 \
1532         INSN_3(ALU64, SUB,  X),                 \
1533         INSN_3(ALU64, AND,  X),                 \
1534         INSN_3(ALU64, OR,   X),                 \
1535         INSN_3(ALU64, LSH,  X),                 \
1536         INSN_3(ALU64, RSH,  X),                 \
1537         INSN_3(ALU64, XOR,  X),                 \
1538         INSN_3(ALU64, MUL,  X),                 \
1539         INSN_3(ALU64, MOV,  X),                 \
1540         INSN_3(ALU64, ARSH, X),                 \
1541         INSN_3(ALU64, DIV,  X),                 \
1542         INSN_3(ALU64, MOD,  X),                 \
1543         INSN_2(ALU64, NEG),                     \
1544         INSN_3(ALU64, END, TO_LE),              \
1545         /*   Immediate based. */                \
1546         INSN_3(ALU64, ADD,  K),                 \
1547         INSN_3(ALU64, SUB,  K),                 \
1548         INSN_3(ALU64, AND,  K),                 \
1549         INSN_3(ALU64, OR,   K),                 \
1550         INSN_3(ALU64, LSH,  K),                 \
1551         INSN_3(ALU64, RSH,  K),                 \
1552         INSN_3(ALU64, XOR,  K),                 \
1553         INSN_3(ALU64, MUL,  K),                 \
1554         INSN_3(ALU64, MOV,  K),                 \
1555         INSN_3(ALU64, ARSH, K),                 \
1556         INSN_3(ALU64, DIV,  K),                 \
1557         INSN_3(ALU64, MOD,  K),                 \
1558         /* Call instruction. */                 \
1559         INSN_2(JMP, CALL),                      \
1560         /* Exit instruction. */                 \
1561         INSN_2(JMP, EXIT),                      \
1562         /* 32-bit Jump instructions. */         \
1563         /*   Register based. */                 \
1564         INSN_3(JMP32, JEQ,  X),                 \
1565         INSN_3(JMP32, JNE,  X),                 \
1566         INSN_3(JMP32, JGT,  X),                 \
1567         INSN_3(JMP32, JLT,  X),                 \
1568         INSN_3(JMP32, JGE,  X),                 \
1569         INSN_3(JMP32, JLE,  X),                 \
1570         INSN_3(JMP32, JSGT, X),                 \
1571         INSN_3(JMP32, JSLT, X),                 \
1572         INSN_3(JMP32, JSGE, X),                 \
1573         INSN_3(JMP32, JSLE, X),                 \
1574         INSN_3(JMP32, JSET, X),                 \
1575         /*   Immediate based. */                \
1576         INSN_3(JMP32, JEQ,  K),                 \
1577         INSN_3(JMP32, JNE,  K),                 \
1578         INSN_3(JMP32, JGT,  K),                 \
1579         INSN_3(JMP32, JLT,  K),                 \
1580         INSN_3(JMP32, JGE,  K),                 \
1581         INSN_3(JMP32, JLE,  K),                 \
1582         INSN_3(JMP32, JSGT, K),                 \
1583         INSN_3(JMP32, JSLT, K),                 \
1584         INSN_3(JMP32, JSGE, K),                 \
1585         INSN_3(JMP32, JSLE, K),                 \
1586         INSN_3(JMP32, JSET, K),                 \
1587         /* Jump instructions. */                \
1588         /*   Register based. */                 \
1589         INSN_3(JMP, JEQ,  X),                   \
1590         INSN_3(JMP, JNE,  X),                   \
1591         INSN_3(JMP, JGT,  X),                   \
1592         INSN_3(JMP, JLT,  X),                   \
1593         INSN_3(JMP, JGE,  X),                   \
1594         INSN_3(JMP, JLE,  X),                   \
1595         INSN_3(JMP, JSGT, X),                   \
1596         INSN_3(JMP, JSLT, X),                   \
1597         INSN_3(JMP, JSGE, X),                   \
1598         INSN_3(JMP, JSLE, X),                   \
1599         INSN_3(JMP, JSET, X),                   \
1600         /*   Immediate based. */                \
1601         INSN_3(JMP, JEQ,  K),                   \
1602         INSN_3(JMP, JNE,  K),                   \
1603         INSN_3(JMP, JGT,  K),                   \
1604         INSN_3(JMP, JLT,  K),                   \
1605         INSN_3(JMP, JGE,  K),                   \
1606         INSN_3(JMP, JLE,  K),                   \
1607         INSN_3(JMP, JSGT, K),                   \
1608         INSN_3(JMP, JSLT, K),                   \
1609         INSN_3(JMP, JSGE, K),                   \
1610         INSN_3(JMP, JSLE, K),                   \
1611         INSN_3(JMP, JSET, K),                   \
1612         INSN_2(JMP, JA),                        \
1613         INSN_2(JMP32, JA),                      \
1614         /* Store instructions. */               \
1615         /*   Register based. */                 \
1616         INSN_3(STX, MEM,  B),                   \
1617         INSN_3(STX, MEM,  H),                   \
1618         INSN_3(STX, MEM,  W),                   \
1619         INSN_3(STX, MEM,  DW),                  \
1620         INSN_3(STX, ATOMIC, W),                 \
1621         INSN_3(STX, ATOMIC, DW),                \
1622         /*   Immediate based. */                \
1623         INSN_3(ST, MEM, B),                     \
1624         INSN_3(ST, MEM, H),                     \
1625         INSN_3(ST, MEM, W),                     \
1626         INSN_3(ST, MEM, DW),                    \
1627         /* Load instructions. */                \
1628         /*   Register based. */                 \
1629         INSN_3(LDX, MEM, B),                    \
1630         INSN_3(LDX, MEM, H),                    \
1631         INSN_3(LDX, MEM, W),                    \
1632         INSN_3(LDX, MEM, DW),                   \
1633         INSN_3(LDX, MEMSX, B),                  \
1634         INSN_3(LDX, MEMSX, H),                  \
1635         INSN_3(LDX, MEMSX, W),                  \
1636         /*   Immediate based. */                \
1637         INSN_3(LD, IMM, DW)
1638
1639 bool bpf_opcode_in_insntable(u8 code)
1640 {
1641 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1642 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1643         static const bool public_insntable[256] = {
1644                 [0 ... 255] = false,
1645                 /* Now overwrite non-defaults ... */
1646                 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1647                 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1648                 [BPF_LD | BPF_ABS | BPF_B] = true,
1649                 [BPF_LD | BPF_ABS | BPF_H] = true,
1650                 [BPF_LD | BPF_ABS | BPF_W] = true,
1651                 [BPF_LD | BPF_IND | BPF_B] = true,
1652                 [BPF_LD | BPF_IND | BPF_H] = true,
1653                 [BPF_LD | BPF_IND | BPF_W] = true,
1654         };
1655 #undef BPF_INSN_3_TBL
1656 #undef BPF_INSN_2_TBL
1657         return public_insntable[code];
1658 }
1659
1660 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1661 /**
1662  *      ___bpf_prog_run - run eBPF program on a given context
1663  *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1664  *      @insn: is the array of eBPF instructions
1665  *
1666  * Decode and execute eBPF instructions.
1667  *
1668  * Return: whatever value is in %BPF_R0 at program exit
1669  */
1670 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1671 {
1672 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1673 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1674         static const void * const jumptable[256] __annotate_jump_table = {
1675                 [0 ... 255] = &&default_label,
1676                 /* Now overwrite non-defaults ... */
1677                 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1678                 /* Non-UAPI available opcodes. */
1679                 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1680                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1681                 [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1682                 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1683                 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1684                 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1685                 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1686                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1687                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1688                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1689         };
1690 #undef BPF_INSN_3_LBL
1691 #undef BPF_INSN_2_LBL
1692         u32 tail_call_cnt = 0;
1693
1694 #define CONT     ({ insn++; goto select_insn; })
1695 #define CONT_JMP ({ insn++; goto select_insn; })
1696
1697 select_insn:
1698         goto *jumptable[insn->code];
1699
1700         /* Explicitly mask the register-based shift amounts with 63 or 31
1701          * to avoid undefined behavior. Normally this won't affect the
1702          * generated code, for example, in case of native 64 bit archs such
1703          * as x86-64 or arm64, the compiler is optimizing the AND away for
1704          * the interpreter. In case of JITs, each of the JIT backends compiles
1705          * the BPF shift operations to machine instructions which produce
1706          * implementation-defined results in such a case; the resulting
1707          * contents of the register may be arbitrary, but program behaviour
1708          * as a whole remains defined. In other words, in case of JIT backends,
1709          * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1710          */
1711         /* ALU (shifts) */
1712 #define SHT(OPCODE, OP)                                 \
1713         ALU64_##OPCODE##_X:                             \
1714                 DST = DST OP (SRC & 63);                \
1715                 CONT;                                   \
1716         ALU_##OPCODE##_X:                               \
1717                 DST = (u32) DST OP ((u32) SRC & 31);    \
1718                 CONT;                                   \
1719         ALU64_##OPCODE##_K:                             \
1720                 DST = DST OP IMM;                       \
1721                 CONT;                                   \
1722         ALU_##OPCODE##_K:                               \
1723                 DST = (u32) DST OP (u32) IMM;           \
1724                 CONT;
1725         /* ALU (rest) */
1726 #define ALU(OPCODE, OP)                                 \
1727         ALU64_##OPCODE##_X:                             \
1728                 DST = DST OP SRC;                       \
1729                 CONT;                                   \
1730         ALU_##OPCODE##_X:                               \
1731                 DST = (u32) DST OP (u32) SRC;           \
1732                 CONT;                                   \
1733         ALU64_##OPCODE##_K:                             \
1734                 DST = DST OP IMM;                       \
1735                 CONT;                                   \
1736         ALU_##OPCODE##_K:                               \
1737                 DST = (u32) DST OP (u32) IMM;           \
1738                 CONT;
1739         ALU(ADD,  +)
1740         ALU(SUB,  -)
1741         ALU(AND,  &)
1742         ALU(OR,   |)
1743         ALU(XOR,  ^)
1744         ALU(MUL,  *)
1745         SHT(LSH, <<)
1746         SHT(RSH, >>)
1747 #undef SHT
1748 #undef ALU
1749         ALU_NEG:
1750                 DST = (u32) -DST;
1751                 CONT;
1752         ALU64_NEG:
1753                 DST = -DST;
1754                 CONT;
1755         ALU_MOV_X:
1756                 switch (OFF) {
1757                 case 0:
1758                         DST = (u32) SRC;
1759                         break;
1760                 case 8:
1761                         DST = (u32)(s8) SRC;
1762                         break;
1763                 case 16:
1764                         DST = (u32)(s16) SRC;
1765                         break;
1766                 }
1767                 CONT;
1768         ALU_MOV_K:
1769                 DST = (u32) IMM;
1770                 CONT;
1771         ALU64_MOV_X:
1772                 switch (OFF) {
1773                 case 0:
1774                         DST = SRC;
1775                         break;
1776                 case 8:
1777                         DST = (s8) SRC;
1778                         break;
1779                 case 16:
1780                         DST = (s16) SRC;
1781                         break;
1782                 case 32:
1783                         DST = (s32) SRC;
1784                         break;
1785                 }
1786                 CONT;
1787         ALU64_MOV_K:
1788                 DST = IMM;
1789                 CONT;
1790         LD_IMM_DW:
1791                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1792                 insn++;
1793                 CONT;
1794         ALU_ARSH_X:
1795                 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1796                 CONT;
1797         ALU_ARSH_K:
1798                 DST = (u64) (u32) (((s32) DST) >> IMM);
1799                 CONT;
1800         ALU64_ARSH_X:
1801                 (*(s64 *) &DST) >>= (SRC & 63);
1802                 CONT;
1803         ALU64_ARSH_K:
1804                 (*(s64 *) &DST) >>= IMM;
1805                 CONT;
1806         ALU64_MOD_X:
1807                 switch (OFF) {
1808                 case 0:
1809                         div64_u64_rem(DST, SRC, &AX);
1810                         DST = AX;
1811                         break;
1812                 case 1:
1813                         AX = div64_s64(DST, SRC);
1814                         DST = DST - AX * SRC;
1815                         break;
1816                 }
1817                 CONT;
1818         ALU_MOD_X:
1819                 switch (OFF) {
1820                 case 0:
1821                         AX = (u32) DST;
1822                         DST = do_div(AX, (u32) SRC);
1823                         break;
1824                 case 1:
1825                         AX = abs((s32)DST);
1826                         AX = do_div(AX, abs((s32)SRC));
1827                         if ((s32)DST < 0)
1828                                 DST = (u32)-AX;
1829                         else
1830                                 DST = (u32)AX;
1831                         break;
1832                 }
1833                 CONT;
1834         ALU64_MOD_K:
1835                 switch (OFF) {
1836                 case 0:
1837                         div64_u64_rem(DST, IMM, &AX);
1838                         DST = AX;
1839                         break;
1840                 case 1:
1841                         AX = div64_s64(DST, IMM);
1842                         DST = DST - AX * IMM;
1843                         break;
1844                 }
1845                 CONT;
1846         ALU_MOD_K:
1847                 switch (OFF) {
1848                 case 0:
1849                         AX = (u32) DST;
1850                         DST = do_div(AX, (u32) IMM);
1851                         break;
1852                 case 1:
1853                         AX = abs((s32)DST);
1854                         AX = do_div(AX, abs((s32)IMM));
1855                         if ((s32)DST < 0)
1856                                 DST = (u32)-AX;
1857                         else
1858                                 DST = (u32)AX;
1859                         break;
1860                 }
1861                 CONT;
1862         ALU64_DIV_X:
1863                 switch (OFF) {
1864                 case 0:
1865                         DST = div64_u64(DST, SRC);
1866                         break;
1867                 case 1:
1868                         DST = div64_s64(DST, SRC);
1869                         break;
1870                 }
1871                 CONT;
1872         ALU_DIV_X:
1873                 switch (OFF) {
1874                 case 0:
1875                         AX = (u32) DST;
1876                         do_div(AX, (u32) SRC);
1877                         DST = (u32) AX;
1878                         break;
1879                 case 1:
1880                         AX = abs((s32)DST);
1881                         do_div(AX, abs((s32)SRC));
1882                         if (((s32)DST < 0) == ((s32)SRC < 0))
1883                                 DST = (u32)AX;
1884                         else
1885                                 DST = (u32)-AX;
1886                         break;
1887                 }
1888                 CONT;
1889         ALU64_DIV_K:
1890                 switch (OFF) {
1891                 case 0:
1892                         DST = div64_u64(DST, IMM);
1893                         break;
1894                 case 1:
1895                         DST = div64_s64(DST, IMM);
1896                         break;
1897                 }
1898                 CONT;
1899         ALU_DIV_K:
1900                 switch (OFF) {
1901                 case 0:
1902                         AX = (u32) DST;
1903                         do_div(AX, (u32) IMM);
1904                         DST = (u32) AX;
1905                         break;
1906                 case 1:
1907                         AX = abs((s32)DST);
1908                         do_div(AX, abs((s32)IMM));
1909                         if (((s32)DST < 0) == ((s32)IMM < 0))
1910                                 DST = (u32)AX;
1911                         else
1912                                 DST = (u32)-AX;
1913                         break;
1914                 }
1915                 CONT;
1916         ALU_END_TO_BE:
1917                 switch (IMM) {
1918                 case 16:
1919                         DST = (__force u16) cpu_to_be16(DST);
1920                         break;
1921                 case 32:
1922                         DST = (__force u32) cpu_to_be32(DST);
1923                         break;
1924                 case 64:
1925                         DST = (__force u64) cpu_to_be64(DST);
1926                         break;
1927                 }
1928                 CONT;
1929         ALU_END_TO_LE:
1930                 switch (IMM) {
1931                 case 16:
1932                         DST = (__force u16) cpu_to_le16(DST);
1933                         break;
1934                 case 32:
1935                         DST = (__force u32) cpu_to_le32(DST);
1936                         break;
1937                 case 64:
1938                         DST = (__force u64) cpu_to_le64(DST);
1939                         break;
1940                 }
1941                 CONT;
1942         ALU64_END_TO_LE:
1943                 switch (IMM) {
1944                 case 16:
1945                         DST = (__force u16) __swab16(DST);
1946                         break;
1947                 case 32:
1948                         DST = (__force u32) __swab32(DST);
1949                         break;
1950                 case 64:
1951                         DST = (__force u64) __swab64(DST);
1952                         break;
1953                 }
1954                 CONT;
1955
1956         /* CALL */
1957         JMP_CALL:
1958                 /* Function call scratches BPF_R1-BPF_R5 registers,
1959                  * preserves BPF_R6-BPF_R9, and stores return value
1960                  * into BPF_R0.
1961                  */
1962                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1963                                                        BPF_R4, BPF_R5);
1964                 CONT;
1965
1966         JMP_CALL_ARGS:
1967                 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1968                                                             BPF_R3, BPF_R4,
1969                                                             BPF_R5,
1970                                                             insn + insn->off + 1);
1971                 CONT;
1972
1973         JMP_TAIL_CALL: {
1974                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1975                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1976                 struct bpf_prog *prog;
1977                 u32 index = BPF_R3;
1978
1979                 if (unlikely(index >= array->map.max_entries))
1980                         goto out;
1981
1982                 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1983                         goto out;
1984
1985                 tail_call_cnt++;
1986
1987                 prog = READ_ONCE(array->ptrs[index]);
1988                 if (!prog)
1989                         goto out;
1990
1991                 /* ARG1 at this point is guaranteed to point to CTX from
1992                  * the verifier side due to the fact that the tail call is
1993                  * handled like a helper, that is, bpf_tail_call_proto,
1994                  * where arg1_type is ARG_PTR_TO_CTX.
1995                  */
1996                 insn = prog->insnsi;
1997                 goto select_insn;
1998 out:
1999                 CONT;
2000         }
2001         JMP_JA:
2002                 insn += insn->off;
2003                 CONT;
2004         JMP32_JA:
2005                 insn += insn->imm;
2006                 CONT;
2007         JMP_EXIT:
2008                 return BPF_R0;
2009         /* JMP */
2010 #define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
2011         JMP_##OPCODE##_X:                                       \
2012                 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
2013                         insn += insn->off;                      \
2014                         CONT_JMP;                               \
2015                 }                                               \
2016                 CONT;                                           \
2017         JMP32_##OPCODE##_X:                                     \
2018                 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
2019                         insn += insn->off;                      \
2020                         CONT_JMP;                               \
2021                 }                                               \
2022                 CONT;                                           \
2023         JMP_##OPCODE##_K:                                       \
2024                 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
2025                         insn += insn->off;                      \
2026                         CONT_JMP;                               \
2027                 }                                               \
2028                 CONT;                                           \
2029         JMP32_##OPCODE##_K:                                     \
2030                 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
2031                         insn += insn->off;                      \
2032                         CONT_JMP;                               \
2033                 }                                               \
2034                 CONT;
2035         COND_JMP(u, JEQ, ==)
2036         COND_JMP(u, JNE, !=)
2037         COND_JMP(u, JGT, >)
2038         COND_JMP(u, JLT, <)
2039         COND_JMP(u, JGE, >=)
2040         COND_JMP(u, JLE, <=)
2041         COND_JMP(u, JSET, &)
2042         COND_JMP(s, JSGT, >)
2043         COND_JMP(s, JSLT, <)
2044         COND_JMP(s, JSGE, >=)
2045         COND_JMP(s, JSLE, <=)
2046 #undef COND_JMP
2047         /* ST, STX and LDX*/
2048         ST_NOSPEC:
2049                 /* Speculation barrier for mitigating Speculative Store Bypass.
2050                  * In case of arm64, we rely on the firmware mitigation as
2051                  * controlled via the ssbd kernel parameter. Whenever the
2052                  * mitigation is enabled, it works for all of the kernel code
2053                  * with no need to provide any additional instructions here.
2054                  * In case of x86, we use 'lfence' insn for mitigation. We
2055                  * reuse preexisting logic from Spectre v1 mitigation that
2056                  * happens to produce the required code on x86 for v4 as well.
2057                  */
2058                 barrier_nospec();
2059                 CONT;
2060 #define LDST(SIZEOP, SIZE)                                              \
2061         STX_MEM_##SIZEOP:                                               \
2062                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
2063                 CONT;                                                   \
2064         ST_MEM_##SIZEOP:                                                \
2065                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
2066                 CONT;                                                   \
2067         LDX_MEM_##SIZEOP:                                               \
2068                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
2069                 CONT;                                                   \
2070         LDX_PROBE_MEM_##SIZEOP:                                         \
2071                 bpf_probe_read_kernel_common(&DST, sizeof(SIZE),        \
2072                               (const void *)(long) (SRC + insn->off));  \
2073                 DST = *((SIZE *)&DST);                                  \
2074                 CONT;
2075
2076         LDST(B,   u8)
2077         LDST(H,  u16)
2078         LDST(W,  u32)
2079         LDST(DW, u64)
2080 #undef LDST
2081
2082 #define LDSX(SIZEOP, SIZE)                                              \
2083         LDX_MEMSX_##SIZEOP:                                             \
2084                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
2085                 CONT;                                                   \
2086         LDX_PROBE_MEMSX_##SIZEOP:                                       \
2087                 bpf_probe_read_kernel_common(&DST, sizeof(SIZE),                \
2088                                       (const void *)(long) (SRC + insn->off));  \
2089                 DST = *((SIZE *)&DST);                                  \
2090                 CONT;
2091
2092         LDSX(B,   s8)
2093         LDSX(H,  s16)
2094         LDSX(W,  s32)
2095 #undef LDSX
2096
2097 #define ATOMIC_ALU_OP(BOP, KOP)                                         \
2098                 case BOP:                                               \
2099                         if (BPF_SIZE(insn->code) == BPF_W)              \
2100                                 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2101                                              (DST + insn->off));        \
2102                         else                                            \
2103                                 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2104                                                (DST + insn->off));      \
2105                         break;                                          \
2106                 case BOP | BPF_FETCH:                                   \
2107                         if (BPF_SIZE(insn->code) == BPF_W)              \
2108                                 SRC = (u32) atomic_fetch_##KOP(         \
2109                                         (u32) SRC,                      \
2110                                         (atomic_t *)(unsigned long) (DST + insn->off)); \
2111                         else                                            \
2112                                 SRC = (u64) atomic64_fetch_##KOP(       \
2113                                         (u64) SRC,                      \
2114                                         (atomic64_t *)(unsigned long) (DST + insn->off)); \
2115                         break;
2116
2117         STX_ATOMIC_DW:
2118         STX_ATOMIC_W:
2119                 switch (IMM) {
2120                 ATOMIC_ALU_OP(BPF_ADD, add)
2121                 ATOMIC_ALU_OP(BPF_AND, and)
2122                 ATOMIC_ALU_OP(BPF_OR, or)
2123                 ATOMIC_ALU_OP(BPF_XOR, xor)
2124 #undef ATOMIC_ALU_OP
2125
2126                 case BPF_XCHG:
2127                         if (BPF_SIZE(insn->code) == BPF_W)
2128                                 SRC = (u32) atomic_xchg(
2129                                         (atomic_t *)(unsigned long) (DST + insn->off),
2130                                         (u32) SRC);
2131                         else
2132                                 SRC = (u64) atomic64_xchg(
2133                                         (atomic64_t *)(unsigned long) (DST + insn->off),
2134                                         (u64) SRC);
2135                         break;
2136                 case BPF_CMPXCHG:
2137                         if (BPF_SIZE(insn->code) == BPF_W)
2138                                 BPF_R0 = (u32) atomic_cmpxchg(
2139                                         (atomic_t *)(unsigned long) (DST + insn->off),
2140                                         (u32) BPF_R0, (u32) SRC);
2141                         else
2142                                 BPF_R0 = (u64) atomic64_cmpxchg(
2143                                         (atomic64_t *)(unsigned long) (DST + insn->off),
2144                                         (u64) BPF_R0, (u64) SRC);
2145                         break;
2146
2147                 default:
2148                         goto default_label;
2149                 }
2150                 CONT;
2151
2152         default_label:
2153                 /* If we ever reach this, we have a bug somewhere. Die hard here
2154                  * instead of just returning 0; we could be somewhere in a subprog,
2155                  * so execution could continue otherwise which we do /not/ want.
2156                  *
2157                  * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2158                  */
2159                 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2160                         insn->code, insn->imm);
2161                 BUG_ON(1);
2162                 return 0;
2163 }
2164
2165 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2166 #define DEFINE_BPF_PROG_RUN(stack_size) \
2167 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2168 { \
2169         u64 stack[stack_size / sizeof(u64)]; \
2170         u64 regs[MAX_BPF_EXT_REG] = {}; \
2171 \
2172         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2173         ARG1 = (u64) (unsigned long) ctx; \
2174         return ___bpf_prog_run(regs, insn); \
2175 }
2176
2177 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2178 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2179 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2180                                       const struct bpf_insn *insn) \
2181 { \
2182         u64 stack[stack_size / sizeof(u64)]; \
2183         u64 regs[MAX_BPF_EXT_REG]; \
2184 \
2185         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2186         BPF_R1 = r1; \
2187         BPF_R2 = r2; \
2188         BPF_R3 = r3; \
2189         BPF_R4 = r4; \
2190         BPF_R5 = r5; \
2191         return ___bpf_prog_run(regs, insn); \
2192 }
2193
2194 #define EVAL1(FN, X) FN(X)
2195 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2196 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2197 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2198 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2199 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2200
2201 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2202 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2203 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2204
2205 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2206 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2207 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2208
2209 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2210
2211 static unsigned int (*interpreters[])(const void *ctx,
2212                                       const struct bpf_insn *insn) = {
2213 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2214 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2215 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2216 };
2217 #undef PROG_NAME_LIST
2218 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2219 static __maybe_unused
2220 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2221                            const struct bpf_insn *insn) = {
2222 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2223 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2224 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2225 };
2226 #undef PROG_NAME_LIST
2227
2228 #ifdef CONFIG_BPF_SYSCALL
2229 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2230 {
2231         stack_depth = max_t(u32, stack_depth, 1);
2232         insn->off = (s16) insn->imm;
2233         insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2234                 __bpf_call_base_args;
2235         insn->code = BPF_JMP | BPF_CALL_ARGS;
2236 }
2237 #endif
2238 #else
2239 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2240                                          const struct bpf_insn *insn)
2241 {
2242         /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2243          * is not working properly, so warn about it!
2244          */
2245         WARN_ON_ONCE(1);
2246         return 0;
2247 }
2248 #endif
2249
2250 bool bpf_prog_map_compatible(struct bpf_map *map,
2251                              const struct bpf_prog *fp)
2252 {
2253         enum bpf_prog_type prog_type = resolve_prog_type(fp);
2254         bool ret;
2255
2256         if (fp->kprobe_override)
2257                 return false;
2258
2259         /* XDP programs inserted into maps are not guaranteed to run on
2260          * a particular netdev (and can run outside driver context entirely
2261          * in the case of devmap and cpumap). Until device checks
2262          * are implemented, prohibit adding dev-bound programs to program maps.
2263          */
2264         if (bpf_prog_is_dev_bound(fp->aux))
2265                 return false;
2266
2267         spin_lock(&map->owner.lock);
2268         if (!map->owner.type) {
2269                 /* There's no owner yet where we could check for
2270                  * compatibility.
2271                  */
2272                 map->owner.type  = prog_type;
2273                 map->owner.jited = fp->jited;
2274                 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2275                 ret = true;
2276         } else {
2277                 ret = map->owner.type  == prog_type &&
2278                       map->owner.jited == fp->jited &&
2279                       map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2280         }
2281         spin_unlock(&map->owner.lock);
2282
2283         return ret;
2284 }
2285
2286 static int bpf_check_tail_call(const struct bpf_prog *fp)
2287 {
2288         struct bpf_prog_aux *aux = fp->aux;
2289         int i, ret = 0;
2290
2291         mutex_lock(&aux->used_maps_mutex);
2292         for (i = 0; i < aux->used_map_cnt; i++) {
2293                 struct bpf_map *map = aux->used_maps[i];
2294
2295                 if (!map_type_contains_progs(map))
2296                         continue;
2297
2298                 if (!bpf_prog_map_compatible(map, fp)) {
2299                         ret = -EINVAL;
2300                         goto out;
2301                 }
2302         }
2303
2304 out:
2305         mutex_unlock(&aux->used_maps_mutex);
2306         return ret;
2307 }
2308
2309 static void bpf_prog_select_func(struct bpf_prog *fp)
2310 {
2311 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2312         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2313
2314         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2315 #else
2316         fp->bpf_func = __bpf_prog_ret0_warn;
2317 #endif
2318 }
2319
2320 /**
2321  *      bpf_prog_select_runtime - select exec runtime for BPF program
2322  *      @fp: bpf_prog populated with BPF program
2323  *      @err: pointer to error variable
2324  *
2325  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2326  * The BPF program will be executed via bpf_prog_run() function.
2327  *
2328  * Return: the &fp argument along with &err set to 0 for success or
2329  * a negative errno code on failure
2330  */
2331 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2332 {
2333         /* In case of BPF to BPF calls, verifier did all the prep
2334          * work with regards to JITing, etc.
2335          */
2336         bool jit_needed = false;
2337
2338         if (fp->bpf_func)
2339                 goto finalize;
2340
2341         if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2342             bpf_prog_has_kfunc_call(fp))
2343                 jit_needed = true;
2344
2345         bpf_prog_select_func(fp);
2346
2347         /* eBPF JITs can rewrite the program in case constant
2348          * blinding is active. However, in case of error during
2349          * blinding, bpf_int_jit_compile() must always return a
2350          * valid program, which in this case would simply not
2351          * be JITed, but falls back to the interpreter.
2352          */
2353         if (!bpf_prog_is_offloaded(fp->aux)) {
2354                 *err = bpf_prog_alloc_jited_linfo(fp);
2355                 if (*err)
2356                         return fp;
2357
2358                 fp = bpf_int_jit_compile(fp);
2359                 bpf_prog_jit_attempt_done(fp);
2360                 if (!fp->jited && jit_needed) {
2361                         *err = -ENOTSUPP;
2362                         return fp;
2363                 }
2364         } else {
2365                 *err = bpf_prog_offload_compile(fp);
2366                 if (*err)
2367                         return fp;
2368         }
2369
2370 finalize:
2371         bpf_prog_lock_ro(fp);
2372
2373         /* The tail call compatibility check can only be done at
2374          * this late stage as we need to determine, if we deal
2375          * with JITed or non JITed program concatenations and not
2376          * all eBPF JITs might immediately support all features.
2377          */
2378         *err = bpf_check_tail_call(fp);
2379
2380         return fp;
2381 }
2382 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2383
2384 static unsigned int __bpf_prog_ret1(const void *ctx,
2385                                     const struct bpf_insn *insn)
2386 {
2387         return 1;
2388 }
2389
2390 static struct bpf_prog_dummy {
2391         struct bpf_prog prog;
2392 } dummy_bpf_prog = {
2393         .prog = {
2394                 .bpf_func = __bpf_prog_ret1,
2395         },
2396 };
2397
2398 struct bpf_empty_prog_array bpf_empty_prog_array = {
2399         .null_prog = NULL,
2400 };
2401 EXPORT_SYMBOL(bpf_empty_prog_array);
2402
2403 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2404 {
2405         if (prog_cnt)
2406                 return kzalloc(sizeof(struct bpf_prog_array) +
2407                                sizeof(struct bpf_prog_array_item) *
2408                                (prog_cnt + 1),
2409                                flags);
2410
2411         return &bpf_empty_prog_array.hdr;
2412 }
2413
2414 void bpf_prog_array_free(struct bpf_prog_array *progs)
2415 {
2416         if (!progs || progs == &bpf_empty_prog_array.hdr)
2417                 return;
2418         kfree_rcu(progs, rcu);
2419 }
2420
2421 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2422 {
2423         struct bpf_prog_array *progs;
2424
2425         /* If RCU Tasks Trace grace period implies RCU grace period, there is
2426          * no need to call kfree_rcu(), just call kfree() directly.
2427          */
2428         progs = container_of(rcu, struct bpf_prog_array, rcu);
2429         if (rcu_trace_implies_rcu_gp())
2430                 kfree(progs);
2431         else
2432                 kfree_rcu(progs, rcu);
2433 }
2434
2435 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2436 {
2437         if (!progs || progs == &bpf_empty_prog_array.hdr)
2438                 return;
2439         call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2440 }
2441
2442 int bpf_prog_array_length(struct bpf_prog_array *array)
2443 {
2444         struct bpf_prog_array_item *item;
2445         u32 cnt = 0;
2446
2447         for (item = array->items; item->prog; item++)
2448                 if (item->prog != &dummy_bpf_prog.prog)
2449                         cnt++;
2450         return cnt;
2451 }
2452
2453 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2454 {
2455         struct bpf_prog_array_item *item;
2456
2457         for (item = array->items; item->prog; item++)
2458                 if (item->prog != &dummy_bpf_prog.prog)
2459                         return false;
2460         return true;
2461 }
2462
2463 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2464                                      u32 *prog_ids,
2465                                      u32 request_cnt)
2466 {
2467         struct bpf_prog_array_item *item;
2468         int i = 0;
2469
2470         for (item = array->items; item->prog; item++) {
2471                 if (item->prog == &dummy_bpf_prog.prog)
2472                         continue;
2473                 prog_ids[i] = item->prog->aux->id;
2474                 if (++i == request_cnt) {
2475                         item++;
2476                         break;
2477                 }
2478         }
2479
2480         return !!(item->prog);
2481 }
2482
2483 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2484                                 __u32 __user *prog_ids, u32 cnt)
2485 {
2486         unsigned long err = 0;
2487         bool nospc;
2488         u32 *ids;
2489
2490         /* users of this function are doing:
2491          * cnt = bpf_prog_array_length();
2492          * if (cnt > 0)
2493          *     bpf_prog_array_copy_to_user(..., cnt);
2494          * so below kcalloc doesn't need extra cnt > 0 check.
2495          */
2496         ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2497         if (!ids)
2498                 return -ENOMEM;
2499         nospc = bpf_prog_array_copy_core(array, ids, cnt);
2500         err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2501         kfree(ids);
2502         if (err)
2503                 return -EFAULT;
2504         if (nospc)
2505                 return -ENOSPC;
2506         return 0;
2507 }
2508
2509 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2510                                 struct bpf_prog *old_prog)
2511 {
2512         struct bpf_prog_array_item *item;
2513
2514         for (item = array->items; item->prog; item++)
2515                 if (item->prog == old_prog) {
2516                         WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2517                         break;
2518                 }
2519 }
2520
2521 /**
2522  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2523  *                                   index into the program array with
2524  *                                   a dummy no-op program.
2525  * @array: a bpf_prog_array
2526  * @index: the index of the program to replace
2527  *
2528  * Skips over dummy programs, by not counting them, when calculating
2529  * the position of the program to replace.
2530  *
2531  * Return:
2532  * * 0          - Success
2533  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2534  * * -ENOENT    - Index out of range
2535  */
2536 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2537 {
2538         return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2539 }
2540
2541 /**
2542  * bpf_prog_array_update_at() - Updates the program at the given index
2543  *                              into the program array.
2544  * @array: a bpf_prog_array
2545  * @index: the index of the program to update
2546  * @prog: the program to insert into the array
2547  *
2548  * Skips over dummy programs, by not counting them, when calculating
2549  * the position of the program to update.
2550  *
2551  * Return:
2552  * * 0          - Success
2553  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2554  * * -ENOENT    - Index out of range
2555  */
2556 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2557                              struct bpf_prog *prog)
2558 {
2559         struct bpf_prog_array_item *item;
2560
2561         if (unlikely(index < 0))
2562                 return -EINVAL;
2563
2564         for (item = array->items; item->prog; item++) {
2565                 if (item->prog == &dummy_bpf_prog.prog)
2566                         continue;
2567                 if (!index) {
2568                         WRITE_ONCE(item->prog, prog);
2569                         return 0;
2570                 }
2571                 index--;
2572         }
2573         return -ENOENT;
2574 }
2575
2576 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2577                         struct bpf_prog *exclude_prog,
2578                         struct bpf_prog *include_prog,
2579                         u64 bpf_cookie,
2580                         struct bpf_prog_array **new_array)
2581 {
2582         int new_prog_cnt, carry_prog_cnt = 0;
2583         struct bpf_prog_array_item *existing, *new;
2584         struct bpf_prog_array *array;
2585         bool found_exclude = false;
2586
2587         /* Figure out how many existing progs we need to carry over to
2588          * the new array.
2589          */
2590         if (old_array) {
2591                 existing = old_array->items;
2592                 for (; existing->prog; existing++) {
2593                         if (existing->prog == exclude_prog) {
2594                                 found_exclude = true;
2595                                 continue;
2596                         }
2597                         if (existing->prog != &dummy_bpf_prog.prog)
2598                                 carry_prog_cnt++;
2599                         if (existing->prog == include_prog)
2600                                 return -EEXIST;
2601                 }
2602         }
2603
2604         if (exclude_prog && !found_exclude)
2605                 return -ENOENT;
2606
2607         /* How many progs (not NULL) will be in the new array? */
2608         new_prog_cnt = carry_prog_cnt;
2609         if (include_prog)
2610                 new_prog_cnt += 1;
2611
2612         /* Do we have any prog (not NULL) in the new array? */
2613         if (!new_prog_cnt) {
2614                 *new_array = NULL;
2615                 return 0;
2616         }
2617
2618         /* +1 as the end of prog_array is marked with NULL */
2619         array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2620         if (!array)
2621                 return -ENOMEM;
2622         new = array->items;
2623
2624         /* Fill in the new prog array */
2625         if (carry_prog_cnt) {
2626                 existing = old_array->items;
2627                 for (; existing->prog; existing++) {
2628                         if (existing->prog == exclude_prog ||
2629                             existing->prog == &dummy_bpf_prog.prog)
2630                                 continue;
2631
2632                         new->prog = existing->prog;
2633                         new->bpf_cookie = existing->bpf_cookie;
2634                         new++;
2635                 }
2636         }
2637         if (include_prog) {
2638                 new->prog = include_prog;
2639                 new->bpf_cookie = bpf_cookie;
2640                 new++;
2641         }
2642         new->prog = NULL;
2643         *new_array = array;
2644         return 0;
2645 }
2646
2647 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2648                              u32 *prog_ids, u32 request_cnt,
2649                              u32 *prog_cnt)
2650 {
2651         u32 cnt = 0;
2652
2653         if (array)
2654                 cnt = bpf_prog_array_length(array);
2655
2656         *prog_cnt = cnt;
2657
2658         /* return early if user requested only program count or nothing to copy */
2659         if (!request_cnt || !cnt)
2660                 return 0;
2661
2662         /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2663         return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2664                                                                      : 0;
2665 }
2666
2667 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2668                           struct bpf_map **used_maps, u32 len)
2669 {
2670         struct bpf_map *map;
2671         u32 i;
2672
2673         for (i = 0; i < len; i++) {
2674                 map = used_maps[i];
2675                 if (map->ops->map_poke_untrack)
2676                         map->ops->map_poke_untrack(map, aux);
2677                 bpf_map_put(map);
2678         }
2679 }
2680
2681 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2682 {
2683         __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2684         kfree(aux->used_maps);
2685 }
2686
2687 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2688                           struct btf_mod_pair *used_btfs, u32 len)
2689 {
2690 #ifdef CONFIG_BPF_SYSCALL
2691         struct btf_mod_pair *btf_mod;
2692         u32 i;
2693
2694         for (i = 0; i < len; i++) {
2695                 btf_mod = &used_btfs[i];
2696                 if (btf_mod->module)
2697                         module_put(btf_mod->module);
2698                 btf_put(btf_mod->btf);
2699         }
2700 #endif
2701 }
2702
2703 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2704 {
2705         __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2706         kfree(aux->used_btfs);
2707 }
2708
2709 static void bpf_prog_free_deferred(struct work_struct *work)
2710 {
2711         struct bpf_prog_aux *aux;
2712         int i;
2713
2714         aux = container_of(work, struct bpf_prog_aux, work);
2715 #ifdef CONFIG_BPF_SYSCALL
2716         bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2717 #endif
2718 #ifdef CONFIG_CGROUP_BPF
2719         if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2720                 bpf_cgroup_atype_put(aux->cgroup_atype);
2721 #endif
2722         bpf_free_used_maps(aux);
2723         bpf_free_used_btfs(aux);
2724         if (bpf_prog_is_dev_bound(aux))
2725                 bpf_prog_dev_bound_destroy(aux->prog);
2726 #ifdef CONFIG_PERF_EVENTS
2727         if (aux->prog->has_callchain_buf)
2728                 put_callchain_buffers();
2729 #endif
2730         if (aux->dst_trampoline)
2731                 bpf_trampoline_put(aux->dst_trampoline);
2732         for (i = 0; i < aux->func_cnt; i++) {
2733                 /* We can just unlink the subprog poke descriptor table as
2734                  * it was originally linked to the main program and is also
2735                  * released along with it.
2736                  */
2737                 aux->func[i]->aux->poke_tab = NULL;
2738                 bpf_jit_free(aux->func[i]);
2739         }
2740         if (aux->func_cnt) {
2741                 kfree(aux->func);
2742                 bpf_prog_unlock_free(aux->prog);
2743         } else {
2744                 bpf_jit_free(aux->prog);
2745         }
2746 }
2747
2748 void bpf_prog_free(struct bpf_prog *fp)
2749 {
2750         struct bpf_prog_aux *aux = fp->aux;
2751
2752         if (aux->dst_prog)
2753                 bpf_prog_put(aux->dst_prog);
2754         INIT_WORK(&aux->work, bpf_prog_free_deferred);
2755         schedule_work(&aux->work);
2756 }
2757 EXPORT_SYMBOL_GPL(bpf_prog_free);
2758
2759 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2760 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2761
2762 void bpf_user_rnd_init_once(void)
2763 {
2764         prandom_init_once(&bpf_user_rnd_state);
2765 }
2766
2767 BPF_CALL_0(bpf_user_rnd_u32)
2768 {
2769         /* Should someone ever have the rather unwise idea to use some
2770          * of the registers passed into this function, then note that
2771          * this function is called from native eBPF and classic-to-eBPF
2772          * transformations. Register assignments from both sides are
2773          * different, f.e. classic always sets fn(ctx, A, X) here.
2774          */
2775         struct rnd_state *state;
2776         u32 res;
2777
2778         state = &get_cpu_var(bpf_user_rnd_state);
2779         res = prandom_u32_state(state);
2780         put_cpu_var(bpf_user_rnd_state);
2781
2782         return res;
2783 }
2784
2785 BPF_CALL_0(bpf_get_raw_cpu_id)
2786 {
2787         return raw_smp_processor_id();
2788 }
2789
2790 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2791 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2792 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2793 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2794 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2795 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2796 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2797 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2798 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2799 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2800 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2801
2802 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2803 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2804 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2805 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2806 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2807 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2808 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2809
2810 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2811 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2812 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2813 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2814 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2815 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2816 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2817 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2818 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2819 const struct bpf_func_proto bpf_set_retval_proto __weak;
2820 const struct bpf_func_proto bpf_get_retval_proto __weak;
2821
2822 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2823 {
2824         return NULL;
2825 }
2826
2827 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2828 {
2829         return NULL;
2830 }
2831
2832 u64 __weak
2833 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2834                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2835 {
2836         return -ENOTSUPP;
2837 }
2838 EXPORT_SYMBOL_GPL(bpf_event_output);
2839
2840 /* Always built-in helper functions. */
2841 const struct bpf_func_proto bpf_tail_call_proto = {
2842         .func           = NULL,
2843         .gpl_only       = false,
2844         .ret_type       = RET_VOID,
2845         .arg1_type      = ARG_PTR_TO_CTX,
2846         .arg2_type      = ARG_CONST_MAP_PTR,
2847         .arg3_type      = ARG_ANYTHING,
2848 };
2849
2850 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2851  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2852  * eBPF and implicitly also cBPF can get JITed!
2853  */
2854 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2855 {
2856         return prog;
2857 }
2858
2859 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2860  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2861  */
2862 void __weak bpf_jit_compile(struct bpf_prog *prog)
2863 {
2864 }
2865
2866 bool __weak bpf_helper_changes_pkt_data(void *func)
2867 {
2868         return false;
2869 }
2870
2871 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2872  * analysis code and wants explicit zero extension inserted by verifier.
2873  * Otherwise, return FALSE.
2874  *
2875  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2876  * you don't override this. JITs that don't want these extra insns can detect
2877  * them using insn_is_zext.
2878  */
2879 bool __weak bpf_jit_needs_zext(void)
2880 {
2881         return false;
2882 }
2883
2884 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2885 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2886 {
2887         return false;
2888 }
2889
2890 bool __weak bpf_jit_supports_kfunc_call(void)
2891 {
2892         return false;
2893 }
2894
2895 bool __weak bpf_jit_supports_far_kfunc_call(void)
2896 {
2897         return false;
2898 }
2899
2900 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2901  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2902  */
2903 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2904                          int len)
2905 {
2906         return -EFAULT;
2907 }
2908
2909 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2910                               void *addr1, void *addr2)
2911 {
2912         return -ENOTSUPP;
2913 }
2914
2915 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2916 {
2917         return ERR_PTR(-ENOTSUPP);
2918 }
2919
2920 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2921 {
2922         return -ENOTSUPP;
2923 }
2924
2925 #ifdef CONFIG_BPF_SYSCALL
2926 static int __init bpf_global_ma_init(void)
2927 {
2928         int ret;
2929
2930         ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2931         bpf_global_ma_set = !ret;
2932         return ret;
2933 }
2934 late_initcall(bpf_global_ma_init);
2935 #endif
2936
2937 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2938 EXPORT_SYMBOL(bpf_stats_enabled_key);
2939
2940 /* All definitions of tracepoints related to BPF. */
2941 #define CREATE_TRACE_POINTS
2942 #include <linux/bpf_trace.h>
2943
2944 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2945 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);