Merge branch 'master' into mm-hotfixes-stable
[platform/kernel/linux-rpi.git] / kernel / bpf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <jschlst@samba.org>
13  *      Alexei Starovoitov <ast@plumgrid.com>
14  *      Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37
38 #include <asm/barrier.h>
39 #include <asm/unaligned.h>
40
41 /* Registers */
42 #define BPF_R0  regs[BPF_REG_0]
43 #define BPF_R1  regs[BPF_REG_1]
44 #define BPF_R2  regs[BPF_REG_2]
45 #define BPF_R3  regs[BPF_REG_3]
46 #define BPF_R4  regs[BPF_REG_4]
47 #define BPF_R5  regs[BPF_REG_5]
48 #define BPF_R6  regs[BPF_REG_6]
49 #define BPF_R7  regs[BPF_REG_7]
50 #define BPF_R8  regs[BPF_REG_8]
51 #define BPF_R9  regs[BPF_REG_9]
52 #define BPF_R10 regs[BPF_REG_10]
53
54 /* Named registers */
55 #define DST     regs[insn->dst_reg]
56 #define SRC     regs[insn->src_reg]
57 #define FP      regs[BPF_REG_FP]
58 #define AX      regs[BPF_REG_AX]
59 #define ARG1    regs[BPF_REG_ARG1]
60 #define CTX     regs[BPF_REG_CTX]
61 #define IMM     insn->imm
62
63 /* No hurry in this branch
64  *
65  * Exported for the bpf jit load helper.
66  */
67 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
68 {
69         u8 *ptr = NULL;
70
71         if (k >= SKF_NET_OFF) {
72                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
73         } else if (k >= SKF_LL_OFF) {
74                 if (unlikely(!skb_mac_header_was_set(skb)))
75                         return NULL;
76                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
77         }
78         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
79                 return ptr;
80
81         return NULL;
82 }
83
84 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
85 {
86         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
87         struct bpf_prog_aux *aux;
88         struct bpf_prog *fp;
89
90         size = round_up(size, PAGE_SIZE);
91         fp = __vmalloc(size, gfp_flags);
92         if (fp == NULL)
93                 return NULL;
94
95         aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
96         if (aux == NULL) {
97                 vfree(fp);
98                 return NULL;
99         }
100         fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
101         if (!fp->active) {
102                 vfree(fp);
103                 kfree(aux);
104                 return NULL;
105         }
106
107         fp->pages = size / PAGE_SIZE;
108         fp->aux = aux;
109         fp->aux->prog = fp;
110         fp->jit_requested = ebpf_jit_enabled();
111         fp->blinding_requested = bpf_jit_blinding_enabled(fp);
112 #ifdef CONFIG_CGROUP_BPF
113         aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
114 #endif
115
116         INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
117         mutex_init(&fp->aux->used_maps_mutex);
118         mutex_init(&fp->aux->dst_mutex);
119
120         return fp;
121 }
122
123 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
124 {
125         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
126         struct bpf_prog *prog;
127         int cpu;
128
129         prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
130         if (!prog)
131                 return NULL;
132
133         prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
134         if (!prog->stats) {
135                 free_percpu(prog->active);
136                 kfree(prog->aux);
137                 vfree(prog);
138                 return NULL;
139         }
140
141         for_each_possible_cpu(cpu) {
142                 struct bpf_prog_stats *pstats;
143
144                 pstats = per_cpu_ptr(prog->stats, cpu);
145                 u64_stats_init(&pstats->syncp);
146         }
147         return prog;
148 }
149 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
150
151 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
152 {
153         if (!prog->aux->nr_linfo || !prog->jit_requested)
154                 return 0;
155
156         prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
157                                           sizeof(*prog->aux->jited_linfo),
158                                           GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
159         if (!prog->aux->jited_linfo)
160                 return -ENOMEM;
161
162         return 0;
163 }
164
165 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
166 {
167         if (prog->aux->jited_linfo &&
168             (!prog->jited || !prog->aux->jited_linfo[0])) {
169                 kvfree(prog->aux->jited_linfo);
170                 prog->aux->jited_linfo = NULL;
171         }
172
173         kfree(prog->aux->kfunc_tab);
174         prog->aux->kfunc_tab = NULL;
175 }
176
177 /* The jit engine is responsible to provide an array
178  * for insn_off to the jited_off mapping (insn_to_jit_off).
179  *
180  * The idx to this array is the insn_off.  Hence, the insn_off
181  * here is relative to the prog itself instead of the main prog.
182  * This array has one entry for each xlated bpf insn.
183  *
184  * jited_off is the byte off to the end of the jited insn.
185  *
186  * Hence, with
187  * insn_start:
188  *      The first bpf insn off of the prog.  The insn off
189  *      here is relative to the main prog.
190  *      e.g. if prog is a subprog, insn_start > 0
191  * linfo_idx:
192  *      The prog's idx to prog->aux->linfo and jited_linfo
193  *
194  * jited_linfo[linfo_idx] = prog->bpf_func
195  *
196  * For i > linfo_idx,
197  *
198  * jited_linfo[i] = prog->bpf_func +
199  *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
200  */
201 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
202                                const u32 *insn_to_jit_off)
203 {
204         u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
205         const struct bpf_line_info *linfo;
206         void **jited_linfo;
207
208         if (!prog->aux->jited_linfo)
209                 /* Userspace did not provide linfo */
210                 return;
211
212         linfo_idx = prog->aux->linfo_idx;
213         linfo = &prog->aux->linfo[linfo_idx];
214         insn_start = linfo[0].insn_off;
215         insn_end = insn_start + prog->len;
216
217         jited_linfo = &prog->aux->jited_linfo[linfo_idx];
218         jited_linfo[0] = prog->bpf_func;
219
220         nr_linfo = prog->aux->nr_linfo - linfo_idx;
221
222         for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
223                 /* The verifier ensures that linfo[i].insn_off is
224                  * strictly increasing
225                  */
226                 jited_linfo[i] = prog->bpf_func +
227                         insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
228 }
229
230 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
231                                   gfp_t gfp_extra_flags)
232 {
233         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
234         struct bpf_prog *fp;
235         u32 pages;
236
237         size = round_up(size, PAGE_SIZE);
238         pages = size / PAGE_SIZE;
239         if (pages <= fp_old->pages)
240                 return fp_old;
241
242         fp = __vmalloc(size, gfp_flags);
243         if (fp) {
244                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
245                 fp->pages = pages;
246                 fp->aux->prog = fp;
247
248                 /* We keep fp->aux from fp_old around in the new
249                  * reallocated structure.
250                  */
251                 fp_old->aux = NULL;
252                 fp_old->stats = NULL;
253                 fp_old->active = NULL;
254                 __bpf_prog_free(fp_old);
255         }
256
257         return fp;
258 }
259
260 void __bpf_prog_free(struct bpf_prog *fp)
261 {
262         if (fp->aux) {
263                 mutex_destroy(&fp->aux->used_maps_mutex);
264                 mutex_destroy(&fp->aux->dst_mutex);
265                 kfree(fp->aux->poke_tab);
266                 kfree(fp->aux);
267         }
268         free_percpu(fp->stats);
269         free_percpu(fp->active);
270         vfree(fp);
271 }
272
273 int bpf_prog_calc_tag(struct bpf_prog *fp)
274 {
275         const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
276         u32 raw_size = bpf_prog_tag_scratch_size(fp);
277         u32 digest[SHA1_DIGEST_WORDS];
278         u32 ws[SHA1_WORKSPACE_WORDS];
279         u32 i, bsize, psize, blocks;
280         struct bpf_insn *dst;
281         bool was_ld_map;
282         u8 *raw, *todo;
283         __be32 *result;
284         __be64 *bits;
285
286         raw = vmalloc(raw_size);
287         if (!raw)
288                 return -ENOMEM;
289
290         sha1_init(digest);
291         memset(ws, 0, sizeof(ws));
292
293         /* We need to take out the map fd for the digest calculation
294          * since they are unstable from user space side.
295          */
296         dst = (void *)raw;
297         for (i = 0, was_ld_map = false; i < fp->len; i++) {
298                 dst[i] = fp->insnsi[i];
299                 if (!was_ld_map &&
300                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
301                     (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
302                      dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
303                         was_ld_map = true;
304                         dst[i].imm = 0;
305                 } else if (was_ld_map &&
306                            dst[i].code == 0 &&
307                            dst[i].dst_reg == 0 &&
308                            dst[i].src_reg == 0 &&
309                            dst[i].off == 0) {
310                         was_ld_map = false;
311                         dst[i].imm = 0;
312                 } else {
313                         was_ld_map = false;
314                 }
315         }
316
317         psize = bpf_prog_insn_size(fp);
318         memset(&raw[psize], 0, raw_size - psize);
319         raw[psize++] = 0x80;
320
321         bsize  = round_up(psize, SHA1_BLOCK_SIZE);
322         blocks = bsize / SHA1_BLOCK_SIZE;
323         todo   = raw;
324         if (bsize - psize >= sizeof(__be64)) {
325                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
326         } else {
327                 bits = (__be64 *)(todo + bsize + bits_offset);
328                 blocks++;
329         }
330         *bits = cpu_to_be64((psize - 1) << 3);
331
332         while (blocks--) {
333                 sha1_transform(digest, todo, ws);
334                 todo += SHA1_BLOCK_SIZE;
335         }
336
337         result = (__force __be32 *)digest;
338         for (i = 0; i < SHA1_DIGEST_WORDS; i++)
339                 result[i] = cpu_to_be32(digest[i]);
340         memcpy(fp->tag, result, sizeof(fp->tag));
341
342         vfree(raw);
343         return 0;
344 }
345
346 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
347                                 s32 end_new, s32 curr, const bool probe_pass)
348 {
349         const s64 imm_min = S32_MIN, imm_max = S32_MAX;
350         s32 delta = end_new - end_old;
351         s64 imm = insn->imm;
352
353         if (curr < pos && curr + imm + 1 >= end_old)
354                 imm += delta;
355         else if (curr >= end_new && curr + imm + 1 < end_new)
356                 imm -= delta;
357         if (imm < imm_min || imm > imm_max)
358                 return -ERANGE;
359         if (!probe_pass)
360                 insn->imm = imm;
361         return 0;
362 }
363
364 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
365                                 s32 end_new, s32 curr, const bool probe_pass)
366 {
367         const s32 off_min = S16_MIN, off_max = S16_MAX;
368         s32 delta = end_new - end_old;
369         s32 off = insn->off;
370
371         if (curr < pos && curr + off + 1 >= end_old)
372                 off += delta;
373         else if (curr >= end_new && curr + off + 1 < end_new)
374                 off -= delta;
375         if (off < off_min || off > off_max)
376                 return -ERANGE;
377         if (!probe_pass)
378                 insn->off = off;
379         return 0;
380 }
381
382 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
383                             s32 end_new, const bool probe_pass)
384 {
385         u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
386         struct bpf_insn *insn = prog->insnsi;
387         int ret = 0;
388
389         for (i = 0; i < insn_cnt; i++, insn++) {
390                 u8 code;
391
392                 /* In the probing pass we still operate on the original,
393                  * unpatched image in order to check overflows before we
394                  * do any other adjustments. Therefore skip the patchlet.
395                  */
396                 if (probe_pass && i == pos) {
397                         i = end_new;
398                         insn = prog->insnsi + end_old;
399                 }
400                 if (bpf_pseudo_func(insn)) {
401                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
402                                                    end_new, i, probe_pass);
403                         if (ret)
404                                 return ret;
405                         continue;
406                 }
407                 code = insn->code;
408                 if ((BPF_CLASS(code) != BPF_JMP &&
409                      BPF_CLASS(code) != BPF_JMP32) ||
410                     BPF_OP(code) == BPF_EXIT)
411                         continue;
412                 /* Adjust offset of jmps if we cross patch boundaries. */
413                 if (BPF_OP(code) == BPF_CALL) {
414                         if (insn->src_reg != BPF_PSEUDO_CALL)
415                                 continue;
416                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
417                                                    end_new, i, probe_pass);
418                 } else {
419                         ret = bpf_adj_delta_to_off(insn, pos, end_old,
420                                                    end_new, i, probe_pass);
421                 }
422                 if (ret)
423                         break;
424         }
425
426         return ret;
427 }
428
429 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
430 {
431         struct bpf_line_info *linfo;
432         u32 i, nr_linfo;
433
434         nr_linfo = prog->aux->nr_linfo;
435         if (!nr_linfo || !delta)
436                 return;
437
438         linfo = prog->aux->linfo;
439
440         for (i = 0; i < nr_linfo; i++)
441                 if (off < linfo[i].insn_off)
442                         break;
443
444         /* Push all off < linfo[i].insn_off by delta */
445         for (; i < nr_linfo; i++)
446                 linfo[i].insn_off += delta;
447 }
448
449 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
450                                        const struct bpf_insn *patch, u32 len)
451 {
452         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
453         const u32 cnt_max = S16_MAX;
454         struct bpf_prog *prog_adj;
455         int err;
456
457         /* Since our patchlet doesn't expand the image, we're done. */
458         if (insn_delta == 0) {
459                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
460                 return prog;
461         }
462
463         insn_adj_cnt = prog->len + insn_delta;
464
465         /* Reject anything that would potentially let the insn->off
466          * target overflow when we have excessive program expansions.
467          * We need to probe here before we do any reallocation where
468          * we afterwards may not fail anymore.
469          */
470         if (insn_adj_cnt > cnt_max &&
471             (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
472                 return ERR_PTR(err);
473
474         /* Several new instructions need to be inserted. Make room
475          * for them. Likely, there's no need for a new allocation as
476          * last page could have large enough tailroom.
477          */
478         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
479                                     GFP_USER);
480         if (!prog_adj)
481                 return ERR_PTR(-ENOMEM);
482
483         prog_adj->len = insn_adj_cnt;
484
485         /* Patching happens in 3 steps:
486          *
487          * 1) Move over tail of insnsi from next instruction onwards,
488          *    so we can patch the single target insn with one or more
489          *    new ones (patching is always from 1 to n insns, n > 0).
490          * 2) Inject new instructions at the target location.
491          * 3) Adjust branch offsets if necessary.
492          */
493         insn_rest = insn_adj_cnt - off - len;
494
495         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
496                 sizeof(*patch) * insn_rest);
497         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
498
499         /* We are guaranteed to not fail at this point, otherwise
500          * the ship has sailed to reverse to the original state. An
501          * overflow cannot happen at this point.
502          */
503         BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
504
505         bpf_adj_linfo(prog_adj, off, insn_delta);
506
507         return prog_adj;
508 }
509
510 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
511 {
512         /* Branch offsets can't overflow when program is shrinking, no need
513          * to call bpf_adj_branches(..., true) here
514          */
515         memmove(prog->insnsi + off, prog->insnsi + off + cnt,
516                 sizeof(struct bpf_insn) * (prog->len - off - cnt));
517         prog->len -= cnt;
518
519         return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
520 }
521
522 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
523 {
524         int i;
525
526         for (i = 0; i < fp->aux->func_cnt; i++)
527                 bpf_prog_kallsyms_del(fp->aux->func[i]);
528 }
529
530 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
531 {
532         bpf_prog_kallsyms_del_subprogs(fp);
533         bpf_prog_kallsyms_del(fp);
534 }
535
536 #ifdef CONFIG_BPF_JIT
537 /* All BPF JIT sysctl knobs here. */
538 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
539 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
540 int bpf_jit_harden   __read_mostly;
541 long bpf_jit_limit   __read_mostly;
542 long bpf_jit_limit_max __read_mostly;
543
544 static void
545 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
546 {
547         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
548
549         prog->aux->ksym.start = (unsigned long) prog->bpf_func;
550         prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
551 }
552
553 static void
554 bpf_prog_ksym_set_name(struct bpf_prog *prog)
555 {
556         char *sym = prog->aux->ksym.name;
557         const char *end = sym + KSYM_NAME_LEN;
558         const struct btf_type *type;
559         const char *func_name;
560
561         BUILD_BUG_ON(sizeof("bpf_prog_") +
562                      sizeof(prog->tag) * 2 +
563                      /* name has been null terminated.
564                       * We should need +1 for the '_' preceding
565                       * the name.  However, the null character
566                       * is double counted between the name and the
567                       * sizeof("bpf_prog_") above, so we omit
568                       * the +1 here.
569                       */
570                      sizeof(prog->aux->name) > KSYM_NAME_LEN);
571
572         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
573         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
574
575         /* prog->aux->name will be ignored if full btf name is available */
576         if (prog->aux->func_info_cnt) {
577                 type = btf_type_by_id(prog->aux->btf,
578                                       prog->aux->func_info[prog->aux->func_idx].type_id);
579                 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
580                 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
581                 return;
582         }
583
584         if (prog->aux->name[0])
585                 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
586         else
587                 *sym = 0;
588 }
589
590 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
591 {
592         return container_of(n, struct bpf_ksym, tnode)->start;
593 }
594
595 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
596                                           struct latch_tree_node *b)
597 {
598         return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
599 }
600
601 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
602 {
603         unsigned long val = (unsigned long)key;
604         const struct bpf_ksym *ksym;
605
606         ksym = container_of(n, struct bpf_ksym, tnode);
607
608         if (val < ksym->start)
609                 return -1;
610         if (val >= ksym->end)
611                 return  1;
612
613         return 0;
614 }
615
616 static const struct latch_tree_ops bpf_tree_ops = {
617         .less   = bpf_tree_less,
618         .comp   = bpf_tree_comp,
619 };
620
621 static DEFINE_SPINLOCK(bpf_lock);
622 static LIST_HEAD(bpf_kallsyms);
623 static struct latch_tree_root bpf_tree __cacheline_aligned;
624
625 void bpf_ksym_add(struct bpf_ksym *ksym)
626 {
627         spin_lock_bh(&bpf_lock);
628         WARN_ON_ONCE(!list_empty(&ksym->lnode));
629         list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
630         latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
631         spin_unlock_bh(&bpf_lock);
632 }
633
634 static void __bpf_ksym_del(struct bpf_ksym *ksym)
635 {
636         if (list_empty(&ksym->lnode))
637                 return;
638
639         latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
640         list_del_rcu(&ksym->lnode);
641 }
642
643 void bpf_ksym_del(struct bpf_ksym *ksym)
644 {
645         spin_lock_bh(&bpf_lock);
646         __bpf_ksym_del(ksym);
647         spin_unlock_bh(&bpf_lock);
648 }
649
650 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
651 {
652         return fp->jited && !bpf_prog_was_classic(fp);
653 }
654
655 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
656 {
657         if (!bpf_prog_kallsyms_candidate(fp) ||
658             !bpf_capable())
659                 return;
660
661         bpf_prog_ksym_set_addr(fp);
662         bpf_prog_ksym_set_name(fp);
663         fp->aux->ksym.prog = true;
664
665         bpf_ksym_add(&fp->aux->ksym);
666 }
667
668 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
669 {
670         if (!bpf_prog_kallsyms_candidate(fp))
671                 return;
672
673         bpf_ksym_del(&fp->aux->ksym);
674 }
675
676 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
677 {
678         struct latch_tree_node *n;
679
680         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
681         return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
682 }
683
684 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
685                                  unsigned long *off, char *sym)
686 {
687         struct bpf_ksym *ksym;
688         char *ret = NULL;
689
690         rcu_read_lock();
691         ksym = bpf_ksym_find(addr);
692         if (ksym) {
693                 unsigned long symbol_start = ksym->start;
694                 unsigned long symbol_end = ksym->end;
695
696                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
697
698                 ret = sym;
699                 if (size)
700                         *size = symbol_end - symbol_start;
701                 if (off)
702                         *off  = addr - symbol_start;
703         }
704         rcu_read_unlock();
705
706         return ret;
707 }
708
709 bool is_bpf_text_address(unsigned long addr)
710 {
711         bool ret;
712
713         rcu_read_lock();
714         ret = bpf_ksym_find(addr) != NULL;
715         rcu_read_unlock();
716
717         return ret;
718 }
719
720 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
721 {
722         struct bpf_ksym *ksym = bpf_ksym_find(addr);
723
724         return ksym && ksym->prog ?
725                container_of(ksym, struct bpf_prog_aux, ksym)->prog :
726                NULL;
727 }
728
729 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
730 {
731         const struct exception_table_entry *e = NULL;
732         struct bpf_prog *prog;
733
734         rcu_read_lock();
735         prog = bpf_prog_ksym_find(addr);
736         if (!prog)
737                 goto out;
738         if (!prog->aux->num_exentries)
739                 goto out;
740
741         e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
742 out:
743         rcu_read_unlock();
744         return e;
745 }
746
747 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
748                     char *sym)
749 {
750         struct bpf_ksym *ksym;
751         unsigned int it = 0;
752         int ret = -ERANGE;
753
754         if (!bpf_jit_kallsyms_enabled())
755                 return ret;
756
757         rcu_read_lock();
758         list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
759                 if (it++ != symnum)
760                         continue;
761
762                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
763
764                 *value = ksym->start;
765                 *type  = BPF_SYM_ELF_TYPE;
766
767                 ret = 0;
768                 break;
769         }
770         rcu_read_unlock();
771
772         return ret;
773 }
774
775 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
776                                 struct bpf_jit_poke_descriptor *poke)
777 {
778         struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
779         static const u32 poke_tab_max = 1024;
780         u32 slot = prog->aux->size_poke_tab;
781         u32 size = slot + 1;
782
783         if (size > poke_tab_max)
784                 return -ENOSPC;
785         if (poke->tailcall_target || poke->tailcall_target_stable ||
786             poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
787                 return -EINVAL;
788
789         switch (poke->reason) {
790         case BPF_POKE_REASON_TAIL_CALL:
791                 if (!poke->tail_call.map)
792                         return -EINVAL;
793                 break;
794         default:
795                 return -EINVAL;
796         }
797
798         tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
799         if (!tab)
800                 return -ENOMEM;
801
802         memcpy(&tab[slot], poke, sizeof(*poke));
803         prog->aux->size_poke_tab = size;
804         prog->aux->poke_tab = tab;
805
806         return slot;
807 }
808
809 /*
810  * BPF program pack allocator.
811  *
812  * Most BPF programs are pretty small. Allocating a hole page for each
813  * program is sometime a waste. Many small bpf program also adds pressure
814  * to instruction TLB. To solve this issue, we introduce a BPF program pack
815  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
816  * to host BPF programs.
817  */
818 #define BPF_PROG_CHUNK_SHIFT    6
819 #define BPF_PROG_CHUNK_SIZE     (1 << BPF_PROG_CHUNK_SHIFT)
820 #define BPF_PROG_CHUNK_MASK     (~(BPF_PROG_CHUNK_SIZE - 1))
821
822 struct bpf_prog_pack {
823         struct list_head list;
824         void *ptr;
825         unsigned long bitmap[];
826 };
827
828 #define BPF_PROG_SIZE_TO_NBITS(size)    (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
829
830 static DEFINE_MUTEX(pack_mutex);
831 static LIST_HEAD(pack_list);
832
833 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
834  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
835  */
836 #ifdef PMD_SIZE
837 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
838 #else
839 #define BPF_PROG_PACK_SIZE PAGE_SIZE
840 #endif
841
842 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
843
844 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
845 {
846         struct bpf_prog_pack *pack;
847
848         pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
849                        GFP_KERNEL);
850         if (!pack)
851                 return NULL;
852         pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
853         if (!pack->ptr) {
854                 kfree(pack);
855                 return NULL;
856         }
857         bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
858         bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
859         list_add_tail(&pack->list, &pack_list);
860
861         set_vm_flush_reset_perms(pack->ptr);
862         set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
863         set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
864         return pack;
865 }
866
867 static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
868 {
869         unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
870         struct bpf_prog_pack *pack;
871         unsigned long pos;
872         void *ptr = NULL;
873
874         mutex_lock(&pack_mutex);
875         if (size > BPF_PROG_PACK_SIZE) {
876                 size = round_up(size, PAGE_SIZE);
877                 ptr = module_alloc(size);
878                 if (ptr) {
879                         bpf_fill_ill_insns(ptr, size);
880                         set_vm_flush_reset_perms(ptr);
881                         set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
882                         set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
883                 }
884                 goto out;
885         }
886         list_for_each_entry(pack, &pack_list, list) {
887                 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
888                                                  nbits, 0);
889                 if (pos < BPF_PROG_CHUNK_COUNT)
890                         goto found_free_area;
891         }
892
893         pack = alloc_new_pack(bpf_fill_ill_insns);
894         if (!pack)
895                 goto out;
896
897         pos = 0;
898
899 found_free_area:
900         bitmap_set(pack->bitmap, pos, nbits);
901         ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
902
903 out:
904         mutex_unlock(&pack_mutex);
905         return ptr;
906 }
907
908 static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
909 {
910         struct bpf_prog_pack *pack = NULL, *tmp;
911         unsigned int nbits;
912         unsigned long pos;
913
914         mutex_lock(&pack_mutex);
915         if (hdr->size > BPF_PROG_PACK_SIZE) {
916                 module_memfree(hdr);
917                 goto out;
918         }
919
920         list_for_each_entry(tmp, &pack_list, list) {
921                 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
922                         pack = tmp;
923                         break;
924                 }
925         }
926
927         if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
928                 goto out;
929
930         nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
931         pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
932
933         WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
934                   "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
935
936         bitmap_clear(pack->bitmap, pos, nbits);
937         if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
938                                        BPF_PROG_CHUNK_COUNT, 0) == 0) {
939                 list_del(&pack->list);
940                 module_memfree(pack->ptr);
941                 kfree(pack);
942         }
943 out:
944         mutex_unlock(&pack_mutex);
945 }
946
947 static atomic_long_t bpf_jit_current;
948
949 /* Can be overridden by an arch's JIT compiler if it has a custom,
950  * dedicated BPF backend memory area, or if neither of the two
951  * below apply.
952  */
953 u64 __weak bpf_jit_alloc_exec_limit(void)
954 {
955 #if defined(MODULES_VADDR)
956         return MODULES_END - MODULES_VADDR;
957 #else
958         return VMALLOC_END - VMALLOC_START;
959 #endif
960 }
961
962 static int __init bpf_jit_charge_init(void)
963 {
964         /* Only used as heuristic here to derive limit. */
965         bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
966         bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
967                                             PAGE_SIZE), LONG_MAX);
968         return 0;
969 }
970 pure_initcall(bpf_jit_charge_init);
971
972 int bpf_jit_charge_modmem(u32 size)
973 {
974         if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
975                 if (!bpf_capable()) {
976                         atomic_long_sub(size, &bpf_jit_current);
977                         return -EPERM;
978                 }
979         }
980
981         return 0;
982 }
983
984 void bpf_jit_uncharge_modmem(u32 size)
985 {
986         atomic_long_sub(size, &bpf_jit_current);
987 }
988
989 void *__weak bpf_jit_alloc_exec(unsigned long size)
990 {
991         return module_alloc(size);
992 }
993
994 void __weak bpf_jit_free_exec(void *addr)
995 {
996         module_memfree(addr);
997 }
998
999 struct bpf_binary_header *
1000 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1001                      unsigned int alignment,
1002                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
1003 {
1004         struct bpf_binary_header *hdr;
1005         u32 size, hole, start;
1006
1007         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1008                      alignment > BPF_IMAGE_ALIGNMENT);
1009
1010         /* Most of BPF filters are really small, but if some of them
1011          * fill a page, allow at least 128 extra bytes to insert a
1012          * random section of illegal instructions.
1013          */
1014         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1015
1016         if (bpf_jit_charge_modmem(size))
1017                 return NULL;
1018         hdr = bpf_jit_alloc_exec(size);
1019         if (!hdr) {
1020                 bpf_jit_uncharge_modmem(size);
1021                 return NULL;
1022         }
1023
1024         /* Fill space with illegal/arch-dep instructions. */
1025         bpf_fill_ill_insns(hdr, size);
1026
1027         hdr->size = size;
1028         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1029                      PAGE_SIZE - sizeof(*hdr));
1030         start = (get_random_int() % hole) & ~(alignment - 1);
1031
1032         /* Leave a random number of instructions before BPF code. */
1033         *image_ptr = &hdr->image[start];
1034
1035         return hdr;
1036 }
1037
1038 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1039 {
1040         u32 size = hdr->size;
1041
1042         bpf_jit_free_exec(hdr);
1043         bpf_jit_uncharge_modmem(size);
1044 }
1045
1046 /* Allocate jit binary from bpf_prog_pack allocator.
1047  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1048  * to the memory. To solve this problem, a RW buffer is also allocated at
1049  * as the same time. The JIT engine should calculate offsets based on the
1050  * RO memory address, but write JITed program to the RW buffer. Once the
1051  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1052  * the JITed program to the RO memory.
1053  */
1054 struct bpf_binary_header *
1055 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1056                           unsigned int alignment,
1057                           struct bpf_binary_header **rw_header,
1058                           u8 **rw_image,
1059                           bpf_jit_fill_hole_t bpf_fill_ill_insns)
1060 {
1061         struct bpf_binary_header *ro_header;
1062         u32 size, hole, start;
1063
1064         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1065                      alignment > BPF_IMAGE_ALIGNMENT);
1066
1067         /* add 16 bytes for a random section of illegal instructions */
1068         size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1069
1070         if (bpf_jit_charge_modmem(size))
1071                 return NULL;
1072         ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1073         if (!ro_header) {
1074                 bpf_jit_uncharge_modmem(size);
1075                 return NULL;
1076         }
1077
1078         *rw_header = kvmalloc(size, GFP_KERNEL);
1079         if (!*rw_header) {
1080                 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1081                 bpf_prog_pack_free(ro_header);
1082                 bpf_jit_uncharge_modmem(size);
1083                 return NULL;
1084         }
1085
1086         /* Fill space with illegal/arch-dep instructions. */
1087         bpf_fill_ill_insns(*rw_header, size);
1088         (*rw_header)->size = size;
1089
1090         hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1091                      BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1092         start = (get_random_int() % hole) & ~(alignment - 1);
1093
1094         *image_ptr = &ro_header->image[start];
1095         *rw_image = &(*rw_header)->image[start];
1096
1097         return ro_header;
1098 }
1099
1100 /* Copy JITed text from rw_header to its final location, the ro_header. */
1101 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1102                                  struct bpf_binary_header *ro_header,
1103                                  struct bpf_binary_header *rw_header)
1104 {
1105         void *ptr;
1106
1107         ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1108
1109         kvfree(rw_header);
1110
1111         if (IS_ERR(ptr)) {
1112                 bpf_prog_pack_free(ro_header);
1113                 return PTR_ERR(ptr);
1114         }
1115         return 0;
1116 }
1117
1118 /* bpf_jit_binary_pack_free is called in two different scenarios:
1119  *   1) when the program is freed after;
1120  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1121  * For case 2), we need to free both the RO memory and the RW buffer.
1122  *
1123  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1124  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1125  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1126  * bpf_arch_text_copy (when jit fails).
1127  */
1128 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1129                               struct bpf_binary_header *rw_header)
1130 {
1131         u32 size = ro_header->size;
1132
1133         bpf_prog_pack_free(ro_header);
1134         kvfree(rw_header);
1135         bpf_jit_uncharge_modmem(size);
1136 }
1137
1138 struct bpf_binary_header *
1139 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1140 {
1141         unsigned long real_start = (unsigned long)fp->bpf_func;
1142         unsigned long addr;
1143
1144         addr = real_start & BPF_PROG_CHUNK_MASK;
1145         return (void *)addr;
1146 }
1147
1148 static inline struct bpf_binary_header *
1149 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1150 {
1151         unsigned long real_start = (unsigned long)fp->bpf_func;
1152         unsigned long addr;
1153
1154         addr = real_start & PAGE_MASK;
1155         return (void *)addr;
1156 }
1157
1158 /* This symbol is only overridden by archs that have different
1159  * requirements than the usual eBPF JITs, f.e. when they only
1160  * implement cBPF JIT, do not set images read-only, etc.
1161  */
1162 void __weak bpf_jit_free(struct bpf_prog *fp)
1163 {
1164         if (fp->jited) {
1165                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1166
1167                 bpf_jit_binary_free(hdr);
1168                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1169         }
1170
1171         bpf_prog_unlock_free(fp);
1172 }
1173
1174 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1175                           const struct bpf_insn *insn, bool extra_pass,
1176                           u64 *func_addr, bool *func_addr_fixed)
1177 {
1178         s16 off = insn->off;
1179         s32 imm = insn->imm;
1180         u8 *addr;
1181
1182         *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1183         if (!*func_addr_fixed) {
1184                 /* Place-holder address till the last pass has collected
1185                  * all addresses for JITed subprograms in which case we
1186                  * can pick them up from prog->aux.
1187                  */
1188                 if (!extra_pass)
1189                         addr = NULL;
1190                 else if (prog->aux->func &&
1191                          off >= 0 && off < prog->aux->func_cnt)
1192                         addr = (u8 *)prog->aux->func[off]->bpf_func;
1193                 else
1194                         return -EINVAL;
1195         } else {
1196                 /* Address of a BPF helper call. Since part of the core
1197                  * kernel, it's always at a fixed location. __bpf_call_base
1198                  * and the helper with imm relative to it are both in core
1199                  * kernel.
1200                  */
1201                 addr = (u8 *)__bpf_call_base + imm;
1202         }
1203
1204         *func_addr = (unsigned long)addr;
1205         return 0;
1206 }
1207
1208 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1209                               const struct bpf_insn *aux,
1210                               struct bpf_insn *to_buff,
1211                               bool emit_zext)
1212 {
1213         struct bpf_insn *to = to_buff;
1214         u32 imm_rnd = get_random_int();
1215         s16 off;
1216
1217         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1218         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1219
1220         /* Constraints on AX register:
1221          *
1222          * AX register is inaccessible from user space. It is mapped in
1223          * all JITs, and used here for constant blinding rewrites. It is
1224          * typically "stateless" meaning its contents are only valid within
1225          * the executed instruction, but not across several instructions.
1226          * There are a few exceptions however which are further detailed
1227          * below.
1228          *
1229          * Constant blinding is only used by JITs, not in the interpreter.
1230          * The interpreter uses AX in some occasions as a local temporary
1231          * register e.g. in DIV or MOD instructions.
1232          *
1233          * In restricted circumstances, the verifier can also use the AX
1234          * register for rewrites as long as they do not interfere with
1235          * the above cases!
1236          */
1237         if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1238                 goto out;
1239
1240         if (from->imm == 0 &&
1241             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1242              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1243                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1244                 goto out;
1245         }
1246
1247         switch (from->code) {
1248         case BPF_ALU | BPF_ADD | BPF_K:
1249         case BPF_ALU | BPF_SUB | BPF_K:
1250         case BPF_ALU | BPF_AND | BPF_K:
1251         case BPF_ALU | BPF_OR  | BPF_K:
1252         case BPF_ALU | BPF_XOR | BPF_K:
1253         case BPF_ALU | BPF_MUL | BPF_K:
1254         case BPF_ALU | BPF_MOV | BPF_K:
1255         case BPF_ALU | BPF_DIV | BPF_K:
1256         case BPF_ALU | BPF_MOD | BPF_K:
1257                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1258                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1259                 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1260                 break;
1261
1262         case BPF_ALU64 | BPF_ADD | BPF_K:
1263         case BPF_ALU64 | BPF_SUB | BPF_K:
1264         case BPF_ALU64 | BPF_AND | BPF_K:
1265         case BPF_ALU64 | BPF_OR  | BPF_K:
1266         case BPF_ALU64 | BPF_XOR | BPF_K:
1267         case BPF_ALU64 | BPF_MUL | BPF_K:
1268         case BPF_ALU64 | BPF_MOV | BPF_K:
1269         case BPF_ALU64 | BPF_DIV | BPF_K:
1270         case BPF_ALU64 | BPF_MOD | BPF_K:
1271                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1272                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1273                 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1274                 break;
1275
1276         case BPF_JMP | BPF_JEQ  | BPF_K:
1277         case BPF_JMP | BPF_JNE  | BPF_K:
1278         case BPF_JMP | BPF_JGT  | BPF_K:
1279         case BPF_JMP | BPF_JLT  | BPF_K:
1280         case BPF_JMP | BPF_JGE  | BPF_K:
1281         case BPF_JMP | BPF_JLE  | BPF_K:
1282         case BPF_JMP | BPF_JSGT | BPF_K:
1283         case BPF_JMP | BPF_JSLT | BPF_K:
1284         case BPF_JMP | BPF_JSGE | BPF_K:
1285         case BPF_JMP | BPF_JSLE | BPF_K:
1286         case BPF_JMP | BPF_JSET | BPF_K:
1287                 /* Accommodate for extra offset in case of a backjump. */
1288                 off = from->off;
1289                 if (off < 0)
1290                         off -= 2;
1291                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1292                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1293                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1294                 break;
1295
1296         case BPF_JMP32 | BPF_JEQ  | BPF_K:
1297         case BPF_JMP32 | BPF_JNE  | BPF_K:
1298         case BPF_JMP32 | BPF_JGT  | BPF_K:
1299         case BPF_JMP32 | BPF_JLT  | BPF_K:
1300         case BPF_JMP32 | BPF_JGE  | BPF_K:
1301         case BPF_JMP32 | BPF_JLE  | BPF_K:
1302         case BPF_JMP32 | BPF_JSGT | BPF_K:
1303         case BPF_JMP32 | BPF_JSLT | BPF_K:
1304         case BPF_JMP32 | BPF_JSGE | BPF_K:
1305         case BPF_JMP32 | BPF_JSLE | BPF_K:
1306         case BPF_JMP32 | BPF_JSET | BPF_K:
1307                 /* Accommodate for extra offset in case of a backjump. */
1308                 off = from->off;
1309                 if (off < 0)
1310                         off -= 2;
1311                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1312                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1313                 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1314                                       off);
1315                 break;
1316
1317         case BPF_LD | BPF_IMM | BPF_DW:
1318                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1319                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1320                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1321                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1322                 break;
1323         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1324                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1325                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1326                 if (emit_zext)
1327                         *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1328                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1329                 break;
1330
1331         case BPF_ST | BPF_MEM | BPF_DW:
1332         case BPF_ST | BPF_MEM | BPF_W:
1333         case BPF_ST | BPF_MEM | BPF_H:
1334         case BPF_ST | BPF_MEM | BPF_B:
1335                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1336                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1337                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1338                 break;
1339         }
1340 out:
1341         return to - to_buff;
1342 }
1343
1344 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1345                                               gfp_t gfp_extra_flags)
1346 {
1347         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1348         struct bpf_prog *fp;
1349
1350         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1351         if (fp != NULL) {
1352                 /* aux->prog still points to the fp_other one, so
1353                  * when promoting the clone to the real program,
1354                  * this still needs to be adapted.
1355                  */
1356                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1357         }
1358
1359         return fp;
1360 }
1361
1362 static void bpf_prog_clone_free(struct bpf_prog *fp)
1363 {
1364         /* aux was stolen by the other clone, so we cannot free
1365          * it from this path! It will be freed eventually by the
1366          * other program on release.
1367          *
1368          * At this point, we don't need a deferred release since
1369          * clone is guaranteed to not be locked.
1370          */
1371         fp->aux = NULL;
1372         fp->stats = NULL;
1373         fp->active = NULL;
1374         __bpf_prog_free(fp);
1375 }
1376
1377 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1378 {
1379         /* We have to repoint aux->prog to self, as we don't
1380          * know whether fp here is the clone or the original.
1381          */
1382         fp->aux->prog = fp;
1383         bpf_prog_clone_free(fp_other);
1384 }
1385
1386 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1387 {
1388         struct bpf_insn insn_buff[16], aux[2];
1389         struct bpf_prog *clone, *tmp;
1390         int insn_delta, insn_cnt;
1391         struct bpf_insn *insn;
1392         int i, rewritten;
1393
1394         if (!prog->blinding_requested || prog->blinded)
1395                 return prog;
1396
1397         clone = bpf_prog_clone_create(prog, GFP_USER);
1398         if (!clone)
1399                 return ERR_PTR(-ENOMEM);
1400
1401         insn_cnt = clone->len;
1402         insn = clone->insnsi;
1403
1404         for (i = 0; i < insn_cnt; i++, insn++) {
1405                 if (bpf_pseudo_func(insn)) {
1406                         /* ld_imm64 with an address of bpf subprog is not
1407                          * a user controlled constant. Don't randomize it,
1408                          * since it will conflict with jit_subprogs() logic.
1409                          */
1410                         insn++;
1411                         i++;
1412                         continue;
1413                 }
1414
1415                 /* We temporarily need to hold the original ld64 insn
1416                  * so that we can still access the first part in the
1417                  * second blinding run.
1418                  */
1419                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1420                     insn[1].code == 0)
1421                         memcpy(aux, insn, sizeof(aux));
1422
1423                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1424                                                 clone->aux->verifier_zext);
1425                 if (!rewritten)
1426                         continue;
1427
1428                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1429                 if (IS_ERR(tmp)) {
1430                         /* Patching may have repointed aux->prog during
1431                          * realloc from the original one, so we need to
1432                          * fix it up here on error.
1433                          */
1434                         bpf_jit_prog_release_other(prog, clone);
1435                         return tmp;
1436                 }
1437
1438                 clone = tmp;
1439                 insn_delta = rewritten - 1;
1440
1441                 /* Walk new program and skip insns we just inserted. */
1442                 insn = clone->insnsi + i + insn_delta;
1443                 insn_cnt += insn_delta;
1444                 i        += insn_delta;
1445         }
1446
1447         clone->blinded = 1;
1448         return clone;
1449 }
1450 #endif /* CONFIG_BPF_JIT */
1451
1452 /* Base function for offset calculation. Needs to go into .text section,
1453  * therefore keeping it non-static as well; will also be used by JITs
1454  * anyway later on, so do not let the compiler omit it. This also needs
1455  * to go into kallsyms for correlation from e.g. bpftool, so naming
1456  * must not change.
1457  */
1458 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1459 {
1460         return 0;
1461 }
1462 EXPORT_SYMBOL_GPL(__bpf_call_base);
1463
1464 /* All UAPI available opcodes. */
1465 #define BPF_INSN_MAP(INSN_2, INSN_3)            \
1466         /* 32 bit ALU operations. */            \
1467         /*   Register based. */                 \
1468         INSN_3(ALU, ADD,  X),                   \
1469         INSN_3(ALU, SUB,  X),                   \
1470         INSN_3(ALU, AND,  X),                   \
1471         INSN_3(ALU, OR,   X),                   \
1472         INSN_3(ALU, LSH,  X),                   \
1473         INSN_3(ALU, RSH,  X),                   \
1474         INSN_3(ALU, XOR,  X),                   \
1475         INSN_3(ALU, MUL,  X),                   \
1476         INSN_3(ALU, MOV,  X),                   \
1477         INSN_3(ALU, ARSH, X),                   \
1478         INSN_3(ALU, DIV,  X),                   \
1479         INSN_3(ALU, MOD,  X),                   \
1480         INSN_2(ALU, NEG),                       \
1481         INSN_3(ALU, END, TO_BE),                \
1482         INSN_3(ALU, END, TO_LE),                \
1483         /*   Immediate based. */                \
1484         INSN_3(ALU, ADD,  K),                   \
1485         INSN_3(ALU, SUB,  K),                   \
1486         INSN_3(ALU, AND,  K),                   \
1487         INSN_3(ALU, OR,   K),                   \
1488         INSN_3(ALU, LSH,  K),                   \
1489         INSN_3(ALU, RSH,  K),                   \
1490         INSN_3(ALU, XOR,  K),                   \
1491         INSN_3(ALU, MUL,  K),                   \
1492         INSN_3(ALU, MOV,  K),                   \
1493         INSN_3(ALU, ARSH, K),                   \
1494         INSN_3(ALU, DIV,  K),                   \
1495         INSN_3(ALU, MOD,  K),                   \
1496         /* 64 bit ALU operations. */            \
1497         /*   Register based. */                 \
1498         INSN_3(ALU64, ADD,  X),                 \
1499         INSN_3(ALU64, SUB,  X),                 \
1500         INSN_3(ALU64, AND,  X),                 \
1501         INSN_3(ALU64, OR,   X),                 \
1502         INSN_3(ALU64, LSH,  X),                 \
1503         INSN_3(ALU64, RSH,  X),                 \
1504         INSN_3(ALU64, XOR,  X),                 \
1505         INSN_3(ALU64, MUL,  X),                 \
1506         INSN_3(ALU64, MOV,  X),                 \
1507         INSN_3(ALU64, ARSH, X),                 \
1508         INSN_3(ALU64, DIV,  X),                 \
1509         INSN_3(ALU64, MOD,  X),                 \
1510         INSN_2(ALU64, NEG),                     \
1511         /*   Immediate based. */                \
1512         INSN_3(ALU64, ADD,  K),                 \
1513         INSN_3(ALU64, SUB,  K),                 \
1514         INSN_3(ALU64, AND,  K),                 \
1515         INSN_3(ALU64, OR,   K),                 \
1516         INSN_3(ALU64, LSH,  K),                 \
1517         INSN_3(ALU64, RSH,  K),                 \
1518         INSN_3(ALU64, XOR,  K),                 \
1519         INSN_3(ALU64, MUL,  K),                 \
1520         INSN_3(ALU64, MOV,  K),                 \
1521         INSN_3(ALU64, ARSH, K),                 \
1522         INSN_3(ALU64, DIV,  K),                 \
1523         INSN_3(ALU64, MOD,  K),                 \
1524         /* Call instruction. */                 \
1525         INSN_2(JMP, CALL),                      \
1526         /* Exit instruction. */                 \
1527         INSN_2(JMP, EXIT),                      \
1528         /* 32-bit Jump instructions. */         \
1529         /*   Register based. */                 \
1530         INSN_3(JMP32, JEQ,  X),                 \
1531         INSN_3(JMP32, JNE,  X),                 \
1532         INSN_3(JMP32, JGT,  X),                 \
1533         INSN_3(JMP32, JLT,  X),                 \
1534         INSN_3(JMP32, JGE,  X),                 \
1535         INSN_3(JMP32, JLE,  X),                 \
1536         INSN_3(JMP32, JSGT, X),                 \
1537         INSN_3(JMP32, JSLT, X),                 \
1538         INSN_3(JMP32, JSGE, X),                 \
1539         INSN_3(JMP32, JSLE, X),                 \
1540         INSN_3(JMP32, JSET, X),                 \
1541         /*   Immediate based. */                \
1542         INSN_3(JMP32, JEQ,  K),                 \
1543         INSN_3(JMP32, JNE,  K),                 \
1544         INSN_3(JMP32, JGT,  K),                 \
1545         INSN_3(JMP32, JLT,  K),                 \
1546         INSN_3(JMP32, JGE,  K),                 \
1547         INSN_3(JMP32, JLE,  K),                 \
1548         INSN_3(JMP32, JSGT, K),                 \
1549         INSN_3(JMP32, JSLT, K),                 \
1550         INSN_3(JMP32, JSGE, K),                 \
1551         INSN_3(JMP32, JSLE, K),                 \
1552         INSN_3(JMP32, JSET, K),                 \
1553         /* Jump instructions. */                \
1554         /*   Register based. */                 \
1555         INSN_3(JMP, JEQ,  X),                   \
1556         INSN_3(JMP, JNE,  X),                   \
1557         INSN_3(JMP, JGT,  X),                   \
1558         INSN_3(JMP, JLT,  X),                   \
1559         INSN_3(JMP, JGE,  X),                   \
1560         INSN_3(JMP, JLE,  X),                   \
1561         INSN_3(JMP, JSGT, X),                   \
1562         INSN_3(JMP, JSLT, X),                   \
1563         INSN_3(JMP, JSGE, X),                   \
1564         INSN_3(JMP, JSLE, X),                   \
1565         INSN_3(JMP, JSET, X),                   \
1566         /*   Immediate based. */                \
1567         INSN_3(JMP, JEQ,  K),                   \
1568         INSN_3(JMP, JNE,  K),                   \
1569         INSN_3(JMP, JGT,  K),                   \
1570         INSN_3(JMP, JLT,  K),                   \
1571         INSN_3(JMP, JGE,  K),                   \
1572         INSN_3(JMP, JLE,  K),                   \
1573         INSN_3(JMP, JSGT, K),                   \
1574         INSN_3(JMP, JSLT, K),                   \
1575         INSN_3(JMP, JSGE, K),                   \
1576         INSN_3(JMP, JSLE, K),                   \
1577         INSN_3(JMP, JSET, K),                   \
1578         INSN_2(JMP, JA),                        \
1579         /* Store instructions. */               \
1580         /*   Register based. */                 \
1581         INSN_3(STX, MEM,  B),                   \
1582         INSN_3(STX, MEM,  H),                   \
1583         INSN_3(STX, MEM,  W),                   \
1584         INSN_3(STX, MEM,  DW),                  \
1585         INSN_3(STX, ATOMIC, W),                 \
1586         INSN_3(STX, ATOMIC, DW),                \
1587         /*   Immediate based. */                \
1588         INSN_3(ST, MEM, B),                     \
1589         INSN_3(ST, MEM, H),                     \
1590         INSN_3(ST, MEM, W),                     \
1591         INSN_3(ST, MEM, DW),                    \
1592         /* Load instructions. */                \
1593         /*   Register based. */                 \
1594         INSN_3(LDX, MEM, B),                    \
1595         INSN_3(LDX, MEM, H),                    \
1596         INSN_3(LDX, MEM, W),                    \
1597         INSN_3(LDX, MEM, DW),                   \
1598         /*   Immediate based. */                \
1599         INSN_3(LD, IMM, DW)
1600
1601 bool bpf_opcode_in_insntable(u8 code)
1602 {
1603 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1604 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1605         static const bool public_insntable[256] = {
1606                 [0 ... 255] = false,
1607                 /* Now overwrite non-defaults ... */
1608                 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1609                 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1610                 [BPF_LD | BPF_ABS | BPF_B] = true,
1611                 [BPF_LD | BPF_ABS | BPF_H] = true,
1612                 [BPF_LD | BPF_ABS | BPF_W] = true,
1613                 [BPF_LD | BPF_IND | BPF_B] = true,
1614                 [BPF_LD | BPF_IND | BPF_H] = true,
1615                 [BPF_LD | BPF_IND | BPF_W] = true,
1616         };
1617 #undef BPF_INSN_3_TBL
1618 #undef BPF_INSN_2_TBL
1619         return public_insntable[code];
1620 }
1621
1622 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1623 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1624 {
1625         memset(dst, 0, size);
1626         return -EFAULT;
1627 }
1628
1629 /**
1630  *      ___bpf_prog_run - run eBPF program on a given context
1631  *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1632  *      @insn: is the array of eBPF instructions
1633  *
1634  * Decode and execute eBPF instructions.
1635  *
1636  * Return: whatever value is in %BPF_R0 at program exit
1637  */
1638 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1639 {
1640 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1641 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1642         static const void * const jumptable[256] __annotate_jump_table = {
1643                 [0 ... 255] = &&default_label,
1644                 /* Now overwrite non-defaults ... */
1645                 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1646                 /* Non-UAPI available opcodes. */
1647                 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1648                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1649                 [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1650                 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1651                 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1652                 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1653                 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1654         };
1655 #undef BPF_INSN_3_LBL
1656 #undef BPF_INSN_2_LBL
1657         u32 tail_call_cnt = 0;
1658
1659 #define CONT     ({ insn++; goto select_insn; })
1660 #define CONT_JMP ({ insn++; goto select_insn; })
1661
1662 select_insn:
1663         goto *jumptable[insn->code];
1664
1665         /* Explicitly mask the register-based shift amounts with 63 or 31
1666          * to avoid undefined behavior. Normally this won't affect the
1667          * generated code, for example, in case of native 64 bit archs such
1668          * as x86-64 or arm64, the compiler is optimizing the AND away for
1669          * the interpreter. In case of JITs, each of the JIT backends compiles
1670          * the BPF shift operations to machine instructions which produce
1671          * implementation-defined results in such a case; the resulting
1672          * contents of the register may be arbitrary, but program behaviour
1673          * as a whole remains defined. In other words, in case of JIT backends,
1674          * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1675          */
1676         /* ALU (shifts) */
1677 #define SHT(OPCODE, OP)                                 \
1678         ALU64_##OPCODE##_X:                             \
1679                 DST = DST OP (SRC & 63);                \
1680                 CONT;                                   \
1681         ALU_##OPCODE##_X:                               \
1682                 DST = (u32) DST OP ((u32) SRC & 31);    \
1683                 CONT;                                   \
1684         ALU64_##OPCODE##_K:                             \
1685                 DST = DST OP IMM;                       \
1686                 CONT;                                   \
1687         ALU_##OPCODE##_K:                               \
1688                 DST = (u32) DST OP (u32) IMM;           \
1689                 CONT;
1690         /* ALU (rest) */
1691 #define ALU(OPCODE, OP)                                 \
1692         ALU64_##OPCODE##_X:                             \
1693                 DST = DST OP SRC;                       \
1694                 CONT;                                   \
1695         ALU_##OPCODE##_X:                               \
1696                 DST = (u32) DST OP (u32) SRC;           \
1697                 CONT;                                   \
1698         ALU64_##OPCODE##_K:                             \
1699                 DST = DST OP IMM;                       \
1700                 CONT;                                   \
1701         ALU_##OPCODE##_K:                               \
1702                 DST = (u32) DST OP (u32) IMM;           \
1703                 CONT;
1704         ALU(ADD,  +)
1705         ALU(SUB,  -)
1706         ALU(AND,  &)
1707         ALU(OR,   |)
1708         ALU(XOR,  ^)
1709         ALU(MUL,  *)
1710         SHT(LSH, <<)
1711         SHT(RSH, >>)
1712 #undef SHT
1713 #undef ALU
1714         ALU_NEG:
1715                 DST = (u32) -DST;
1716                 CONT;
1717         ALU64_NEG:
1718                 DST = -DST;
1719                 CONT;
1720         ALU_MOV_X:
1721                 DST = (u32) SRC;
1722                 CONT;
1723         ALU_MOV_K:
1724                 DST = (u32) IMM;
1725                 CONT;
1726         ALU64_MOV_X:
1727                 DST = SRC;
1728                 CONT;
1729         ALU64_MOV_K:
1730                 DST = IMM;
1731                 CONT;
1732         LD_IMM_DW:
1733                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1734                 insn++;
1735                 CONT;
1736         ALU_ARSH_X:
1737                 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1738                 CONT;
1739         ALU_ARSH_K:
1740                 DST = (u64) (u32) (((s32) DST) >> IMM);
1741                 CONT;
1742         ALU64_ARSH_X:
1743                 (*(s64 *) &DST) >>= (SRC & 63);
1744                 CONT;
1745         ALU64_ARSH_K:
1746                 (*(s64 *) &DST) >>= IMM;
1747                 CONT;
1748         ALU64_MOD_X:
1749                 div64_u64_rem(DST, SRC, &AX);
1750                 DST = AX;
1751                 CONT;
1752         ALU_MOD_X:
1753                 AX = (u32) DST;
1754                 DST = do_div(AX, (u32) SRC);
1755                 CONT;
1756         ALU64_MOD_K:
1757                 div64_u64_rem(DST, IMM, &AX);
1758                 DST = AX;
1759                 CONT;
1760         ALU_MOD_K:
1761                 AX = (u32) DST;
1762                 DST = do_div(AX, (u32) IMM);
1763                 CONT;
1764         ALU64_DIV_X:
1765                 DST = div64_u64(DST, SRC);
1766                 CONT;
1767         ALU_DIV_X:
1768                 AX = (u32) DST;
1769                 do_div(AX, (u32) SRC);
1770                 DST = (u32) AX;
1771                 CONT;
1772         ALU64_DIV_K:
1773                 DST = div64_u64(DST, IMM);
1774                 CONT;
1775         ALU_DIV_K:
1776                 AX = (u32) DST;
1777                 do_div(AX, (u32) IMM);
1778                 DST = (u32) AX;
1779                 CONT;
1780         ALU_END_TO_BE:
1781                 switch (IMM) {
1782                 case 16:
1783                         DST = (__force u16) cpu_to_be16(DST);
1784                         break;
1785                 case 32:
1786                         DST = (__force u32) cpu_to_be32(DST);
1787                         break;
1788                 case 64:
1789                         DST = (__force u64) cpu_to_be64(DST);
1790                         break;
1791                 }
1792                 CONT;
1793         ALU_END_TO_LE:
1794                 switch (IMM) {
1795                 case 16:
1796                         DST = (__force u16) cpu_to_le16(DST);
1797                         break;
1798                 case 32:
1799                         DST = (__force u32) cpu_to_le32(DST);
1800                         break;
1801                 case 64:
1802                         DST = (__force u64) cpu_to_le64(DST);
1803                         break;
1804                 }
1805                 CONT;
1806
1807         /* CALL */
1808         JMP_CALL:
1809                 /* Function call scratches BPF_R1-BPF_R5 registers,
1810                  * preserves BPF_R6-BPF_R9, and stores return value
1811                  * into BPF_R0.
1812                  */
1813                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1814                                                        BPF_R4, BPF_R5);
1815                 CONT;
1816
1817         JMP_CALL_ARGS:
1818                 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1819                                                             BPF_R3, BPF_R4,
1820                                                             BPF_R5,
1821                                                             insn + insn->off + 1);
1822                 CONT;
1823
1824         JMP_TAIL_CALL: {
1825                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1826                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1827                 struct bpf_prog *prog;
1828                 u32 index = BPF_R3;
1829
1830                 if (unlikely(index >= array->map.max_entries))
1831                         goto out;
1832
1833                 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1834                         goto out;
1835
1836                 tail_call_cnt++;
1837
1838                 prog = READ_ONCE(array->ptrs[index]);
1839                 if (!prog)
1840                         goto out;
1841
1842                 /* ARG1 at this point is guaranteed to point to CTX from
1843                  * the verifier side due to the fact that the tail call is
1844                  * handled like a helper, that is, bpf_tail_call_proto,
1845                  * where arg1_type is ARG_PTR_TO_CTX.
1846                  */
1847                 insn = prog->insnsi;
1848                 goto select_insn;
1849 out:
1850                 CONT;
1851         }
1852         JMP_JA:
1853                 insn += insn->off;
1854                 CONT;
1855         JMP_EXIT:
1856                 return BPF_R0;
1857         /* JMP */
1858 #define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
1859         JMP_##OPCODE##_X:                                       \
1860                 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
1861                         insn += insn->off;                      \
1862                         CONT_JMP;                               \
1863                 }                                               \
1864                 CONT;                                           \
1865         JMP32_##OPCODE##_X:                                     \
1866                 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
1867                         insn += insn->off;                      \
1868                         CONT_JMP;                               \
1869                 }                                               \
1870                 CONT;                                           \
1871         JMP_##OPCODE##_K:                                       \
1872                 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
1873                         insn += insn->off;                      \
1874                         CONT_JMP;                               \
1875                 }                                               \
1876                 CONT;                                           \
1877         JMP32_##OPCODE##_K:                                     \
1878                 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
1879                         insn += insn->off;                      \
1880                         CONT_JMP;                               \
1881                 }                                               \
1882                 CONT;
1883         COND_JMP(u, JEQ, ==)
1884         COND_JMP(u, JNE, !=)
1885         COND_JMP(u, JGT, >)
1886         COND_JMP(u, JLT, <)
1887         COND_JMP(u, JGE, >=)
1888         COND_JMP(u, JLE, <=)
1889         COND_JMP(u, JSET, &)
1890         COND_JMP(s, JSGT, >)
1891         COND_JMP(s, JSLT, <)
1892         COND_JMP(s, JSGE, >=)
1893         COND_JMP(s, JSLE, <=)
1894 #undef COND_JMP
1895         /* ST, STX and LDX*/
1896         ST_NOSPEC:
1897                 /* Speculation barrier for mitigating Speculative Store Bypass.
1898                  * In case of arm64, we rely on the firmware mitigation as
1899                  * controlled via the ssbd kernel parameter. Whenever the
1900                  * mitigation is enabled, it works for all of the kernel code
1901                  * with no need to provide any additional instructions here.
1902                  * In case of x86, we use 'lfence' insn for mitigation. We
1903                  * reuse preexisting logic from Spectre v1 mitigation that
1904                  * happens to produce the required code on x86 for v4 as well.
1905                  */
1906 #ifdef CONFIG_X86
1907                 barrier_nospec();
1908 #endif
1909                 CONT;
1910 #define LDST(SIZEOP, SIZE)                                              \
1911         STX_MEM_##SIZEOP:                                               \
1912                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1913                 CONT;                                                   \
1914         ST_MEM_##SIZEOP:                                                \
1915                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1916                 CONT;                                                   \
1917         LDX_MEM_##SIZEOP:                                               \
1918                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1919                 CONT;                                                   \
1920         LDX_PROBE_MEM_##SIZEOP:                                         \
1921                 bpf_probe_read_kernel(&DST, sizeof(SIZE),               \
1922                                       (const void *)(long) (SRC + insn->off));  \
1923                 DST = *((SIZE *)&DST);                                  \
1924                 CONT;
1925
1926         LDST(B,   u8)
1927         LDST(H,  u16)
1928         LDST(W,  u32)
1929         LDST(DW, u64)
1930 #undef LDST
1931
1932 #define ATOMIC_ALU_OP(BOP, KOP)                                         \
1933                 case BOP:                                               \
1934                         if (BPF_SIZE(insn->code) == BPF_W)              \
1935                                 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1936                                              (DST + insn->off));        \
1937                         else                                            \
1938                                 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1939                                                (DST + insn->off));      \
1940                         break;                                          \
1941                 case BOP | BPF_FETCH:                                   \
1942                         if (BPF_SIZE(insn->code) == BPF_W)              \
1943                                 SRC = (u32) atomic_fetch_##KOP(         \
1944                                         (u32) SRC,                      \
1945                                         (atomic_t *)(unsigned long) (DST + insn->off)); \
1946                         else                                            \
1947                                 SRC = (u64) atomic64_fetch_##KOP(       \
1948                                         (u64) SRC,                      \
1949                                         (atomic64_t *)(unsigned long) (DST + insn->off)); \
1950                         break;
1951
1952         STX_ATOMIC_DW:
1953         STX_ATOMIC_W:
1954                 switch (IMM) {
1955                 ATOMIC_ALU_OP(BPF_ADD, add)
1956                 ATOMIC_ALU_OP(BPF_AND, and)
1957                 ATOMIC_ALU_OP(BPF_OR, or)
1958                 ATOMIC_ALU_OP(BPF_XOR, xor)
1959 #undef ATOMIC_ALU_OP
1960
1961                 case BPF_XCHG:
1962                         if (BPF_SIZE(insn->code) == BPF_W)
1963                                 SRC = (u32) atomic_xchg(
1964                                         (atomic_t *)(unsigned long) (DST + insn->off),
1965                                         (u32) SRC);
1966                         else
1967                                 SRC = (u64) atomic64_xchg(
1968                                         (atomic64_t *)(unsigned long) (DST + insn->off),
1969                                         (u64) SRC);
1970                         break;
1971                 case BPF_CMPXCHG:
1972                         if (BPF_SIZE(insn->code) == BPF_W)
1973                                 BPF_R0 = (u32) atomic_cmpxchg(
1974                                         (atomic_t *)(unsigned long) (DST + insn->off),
1975                                         (u32) BPF_R0, (u32) SRC);
1976                         else
1977                                 BPF_R0 = (u64) atomic64_cmpxchg(
1978                                         (atomic64_t *)(unsigned long) (DST + insn->off),
1979                                         (u64) BPF_R0, (u64) SRC);
1980                         break;
1981
1982                 default:
1983                         goto default_label;
1984                 }
1985                 CONT;
1986
1987         default_label:
1988                 /* If we ever reach this, we have a bug somewhere. Die hard here
1989                  * instead of just returning 0; we could be somewhere in a subprog,
1990                  * so execution could continue otherwise which we do /not/ want.
1991                  *
1992                  * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1993                  */
1994                 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1995                         insn->code, insn->imm);
1996                 BUG_ON(1);
1997                 return 0;
1998 }
1999
2000 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2001 #define DEFINE_BPF_PROG_RUN(stack_size) \
2002 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2003 { \
2004         u64 stack[stack_size / sizeof(u64)]; \
2005         u64 regs[MAX_BPF_EXT_REG]; \
2006 \
2007         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2008         ARG1 = (u64) (unsigned long) ctx; \
2009         return ___bpf_prog_run(regs, insn); \
2010 }
2011
2012 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2013 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2014 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2015                                       const struct bpf_insn *insn) \
2016 { \
2017         u64 stack[stack_size / sizeof(u64)]; \
2018         u64 regs[MAX_BPF_EXT_REG]; \
2019 \
2020         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2021         BPF_R1 = r1; \
2022         BPF_R2 = r2; \
2023         BPF_R3 = r3; \
2024         BPF_R4 = r4; \
2025         BPF_R5 = r5; \
2026         return ___bpf_prog_run(regs, insn); \
2027 }
2028
2029 #define EVAL1(FN, X) FN(X)
2030 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2031 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2032 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2033 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2034 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2035
2036 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2037 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2038 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2039
2040 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2041 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2042 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2043
2044 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2045
2046 static unsigned int (*interpreters[])(const void *ctx,
2047                                       const struct bpf_insn *insn) = {
2048 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2049 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2050 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2051 };
2052 #undef PROG_NAME_LIST
2053 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2054 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2055                                   const struct bpf_insn *insn) = {
2056 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2057 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2058 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2059 };
2060 #undef PROG_NAME_LIST
2061
2062 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2063 {
2064         stack_depth = max_t(u32, stack_depth, 1);
2065         insn->off = (s16) insn->imm;
2066         insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2067                 __bpf_call_base_args;
2068         insn->code = BPF_JMP | BPF_CALL_ARGS;
2069 }
2070
2071 #else
2072 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2073                                          const struct bpf_insn *insn)
2074 {
2075         /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2076          * is not working properly, so warn about it!
2077          */
2078         WARN_ON_ONCE(1);
2079         return 0;
2080 }
2081 #endif
2082
2083 bool bpf_prog_map_compatible(struct bpf_map *map,
2084                              const struct bpf_prog *fp)
2085 {
2086         bool ret;
2087
2088         if (fp->kprobe_override)
2089                 return false;
2090
2091         spin_lock(&map->owner.lock);
2092         if (!map->owner.type) {
2093                 /* There's no owner yet where we could check for
2094                  * compatibility.
2095                  */
2096                 map->owner.type  = fp->type;
2097                 map->owner.jited = fp->jited;
2098                 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2099                 ret = true;
2100         } else {
2101                 ret = map->owner.type  == fp->type &&
2102                       map->owner.jited == fp->jited &&
2103                       map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2104         }
2105         spin_unlock(&map->owner.lock);
2106
2107         return ret;
2108 }
2109
2110 static int bpf_check_tail_call(const struct bpf_prog *fp)
2111 {
2112         struct bpf_prog_aux *aux = fp->aux;
2113         int i, ret = 0;
2114
2115         mutex_lock(&aux->used_maps_mutex);
2116         for (i = 0; i < aux->used_map_cnt; i++) {
2117                 struct bpf_map *map = aux->used_maps[i];
2118
2119                 if (!map_type_contains_progs(map))
2120                         continue;
2121
2122                 if (!bpf_prog_map_compatible(map, fp)) {
2123                         ret = -EINVAL;
2124                         goto out;
2125                 }
2126         }
2127
2128 out:
2129         mutex_unlock(&aux->used_maps_mutex);
2130         return ret;
2131 }
2132
2133 static void bpf_prog_select_func(struct bpf_prog *fp)
2134 {
2135 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2136         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2137
2138         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2139 #else
2140         fp->bpf_func = __bpf_prog_ret0_warn;
2141 #endif
2142 }
2143
2144 /**
2145  *      bpf_prog_select_runtime - select exec runtime for BPF program
2146  *      @fp: bpf_prog populated with BPF program
2147  *      @err: pointer to error variable
2148  *
2149  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2150  * The BPF program will be executed via bpf_prog_run() function.
2151  *
2152  * Return: the &fp argument along with &err set to 0 for success or
2153  * a negative errno code on failure
2154  */
2155 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2156 {
2157         /* In case of BPF to BPF calls, verifier did all the prep
2158          * work with regards to JITing, etc.
2159          */
2160         bool jit_needed = false;
2161
2162         if (fp->bpf_func)
2163                 goto finalize;
2164
2165         if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2166             bpf_prog_has_kfunc_call(fp))
2167                 jit_needed = true;
2168
2169         bpf_prog_select_func(fp);
2170
2171         /* eBPF JITs can rewrite the program in case constant
2172          * blinding is active. However, in case of error during
2173          * blinding, bpf_int_jit_compile() must always return a
2174          * valid program, which in this case would simply not
2175          * be JITed, but falls back to the interpreter.
2176          */
2177         if (!bpf_prog_is_dev_bound(fp->aux)) {
2178                 *err = bpf_prog_alloc_jited_linfo(fp);
2179                 if (*err)
2180                         return fp;
2181
2182                 fp = bpf_int_jit_compile(fp);
2183                 bpf_prog_jit_attempt_done(fp);
2184                 if (!fp->jited && jit_needed) {
2185                         *err = -ENOTSUPP;
2186                         return fp;
2187                 }
2188         } else {
2189                 *err = bpf_prog_offload_compile(fp);
2190                 if (*err)
2191                         return fp;
2192         }
2193
2194 finalize:
2195         bpf_prog_lock_ro(fp);
2196
2197         /* The tail call compatibility check can only be done at
2198          * this late stage as we need to determine, if we deal
2199          * with JITed or non JITed program concatenations and not
2200          * all eBPF JITs might immediately support all features.
2201          */
2202         *err = bpf_check_tail_call(fp);
2203
2204         return fp;
2205 }
2206 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2207
2208 static unsigned int __bpf_prog_ret1(const void *ctx,
2209                                     const struct bpf_insn *insn)
2210 {
2211         return 1;
2212 }
2213
2214 static struct bpf_prog_dummy {
2215         struct bpf_prog prog;
2216 } dummy_bpf_prog = {
2217         .prog = {
2218                 .bpf_func = __bpf_prog_ret1,
2219         },
2220 };
2221
2222 struct bpf_empty_prog_array bpf_empty_prog_array = {
2223         .null_prog = NULL,
2224 };
2225 EXPORT_SYMBOL(bpf_empty_prog_array);
2226
2227 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2228 {
2229         if (prog_cnt)
2230                 return kzalloc(sizeof(struct bpf_prog_array) +
2231                                sizeof(struct bpf_prog_array_item) *
2232                                (prog_cnt + 1),
2233                                flags);
2234
2235         return &bpf_empty_prog_array.hdr;
2236 }
2237
2238 void bpf_prog_array_free(struct bpf_prog_array *progs)
2239 {
2240         if (!progs || progs == &bpf_empty_prog_array.hdr)
2241                 return;
2242         kfree_rcu(progs, rcu);
2243 }
2244
2245 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2246 {
2247         struct bpf_prog_array *progs;
2248
2249         progs = container_of(rcu, struct bpf_prog_array, rcu);
2250         kfree_rcu(progs, rcu);
2251 }
2252
2253 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2254 {
2255         if (!progs || progs == &bpf_empty_prog_array.hdr)
2256                 return;
2257         call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2258 }
2259
2260 int bpf_prog_array_length(struct bpf_prog_array *array)
2261 {
2262         struct bpf_prog_array_item *item;
2263         u32 cnt = 0;
2264
2265         for (item = array->items; item->prog; item++)
2266                 if (item->prog != &dummy_bpf_prog.prog)
2267                         cnt++;
2268         return cnt;
2269 }
2270
2271 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2272 {
2273         struct bpf_prog_array_item *item;
2274
2275         for (item = array->items; item->prog; item++)
2276                 if (item->prog != &dummy_bpf_prog.prog)
2277                         return false;
2278         return true;
2279 }
2280
2281 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2282                                      u32 *prog_ids,
2283                                      u32 request_cnt)
2284 {
2285         struct bpf_prog_array_item *item;
2286         int i = 0;
2287
2288         for (item = array->items; item->prog; item++) {
2289                 if (item->prog == &dummy_bpf_prog.prog)
2290                         continue;
2291                 prog_ids[i] = item->prog->aux->id;
2292                 if (++i == request_cnt) {
2293                         item++;
2294                         break;
2295                 }
2296         }
2297
2298         return !!(item->prog);
2299 }
2300
2301 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2302                                 __u32 __user *prog_ids, u32 cnt)
2303 {
2304         unsigned long err = 0;
2305         bool nospc;
2306         u32 *ids;
2307
2308         /* users of this function are doing:
2309          * cnt = bpf_prog_array_length();
2310          * if (cnt > 0)
2311          *     bpf_prog_array_copy_to_user(..., cnt);
2312          * so below kcalloc doesn't need extra cnt > 0 check.
2313          */
2314         ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2315         if (!ids)
2316                 return -ENOMEM;
2317         nospc = bpf_prog_array_copy_core(array, ids, cnt);
2318         err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2319         kfree(ids);
2320         if (err)
2321                 return -EFAULT;
2322         if (nospc)
2323                 return -ENOSPC;
2324         return 0;
2325 }
2326
2327 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2328                                 struct bpf_prog *old_prog)
2329 {
2330         struct bpf_prog_array_item *item;
2331
2332         for (item = array->items; item->prog; item++)
2333                 if (item->prog == old_prog) {
2334                         WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2335                         break;
2336                 }
2337 }
2338
2339 /**
2340  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2341  *                                   index into the program array with
2342  *                                   a dummy no-op program.
2343  * @array: a bpf_prog_array
2344  * @index: the index of the program to replace
2345  *
2346  * Skips over dummy programs, by not counting them, when calculating
2347  * the position of the program to replace.
2348  *
2349  * Return:
2350  * * 0          - Success
2351  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2352  * * -ENOENT    - Index out of range
2353  */
2354 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2355 {
2356         return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2357 }
2358
2359 /**
2360  * bpf_prog_array_update_at() - Updates the program at the given index
2361  *                              into the program array.
2362  * @array: a bpf_prog_array
2363  * @index: the index of the program to update
2364  * @prog: the program to insert into the array
2365  *
2366  * Skips over dummy programs, by not counting them, when calculating
2367  * the position of the program to update.
2368  *
2369  * Return:
2370  * * 0          - Success
2371  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2372  * * -ENOENT    - Index out of range
2373  */
2374 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2375                              struct bpf_prog *prog)
2376 {
2377         struct bpf_prog_array_item *item;
2378
2379         if (unlikely(index < 0))
2380                 return -EINVAL;
2381
2382         for (item = array->items; item->prog; item++) {
2383                 if (item->prog == &dummy_bpf_prog.prog)
2384                         continue;
2385                 if (!index) {
2386                         WRITE_ONCE(item->prog, prog);
2387                         return 0;
2388                 }
2389                 index--;
2390         }
2391         return -ENOENT;
2392 }
2393
2394 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2395                         struct bpf_prog *exclude_prog,
2396                         struct bpf_prog *include_prog,
2397                         u64 bpf_cookie,
2398                         struct bpf_prog_array **new_array)
2399 {
2400         int new_prog_cnt, carry_prog_cnt = 0;
2401         struct bpf_prog_array_item *existing, *new;
2402         struct bpf_prog_array *array;
2403         bool found_exclude = false;
2404
2405         /* Figure out how many existing progs we need to carry over to
2406          * the new array.
2407          */
2408         if (old_array) {
2409                 existing = old_array->items;
2410                 for (; existing->prog; existing++) {
2411                         if (existing->prog == exclude_prog) {
2412                                 found_exclude = true;
2413                                 continue;
2414                         }
2415                         if (existing->prog != &dummy_bpf_prog.prog)
2416                                 carry_prog_cnt++;
2417                         if (existing->prog == include_prog)
2418                                 return -EEXIST;
2419                 }
2420         }
2421
2422         if (exclude_prog && !found_exclude)
2423                 return -ENOENT;
2424
2425         /* How many progs (not NULL) will be in the new array? */
2426         new_prog_cnt = carry_prog_cnt;
2427         if (include_prog)
2428                 new_prog_cnt += 1;
2429
2430         /* Do we have any prog (not NULL) in the new array? */
2431         if (!new_prog_cnt) {
2432                 *new_array = NULL;
2433                 return 0;
2434         }
2435
2436         /* +1 as the end of prog_array is marked with NULL */
2437         array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2438         if (!array)
2439                 return -ENOMEM;
2440         new = array->items;
2441
2442         /* Fill in the new prog array */
2443         if (carry_prog_cnt) {
2444                 existing = old_array->items;
2445                 for (; existing->prog; existing++) {
2446                         if (existing->prog == exclude_prog ||
2447                             existing->prog == &dummy_bpf_prog.prog)
2448                                 continue;
2449
2450                         new->prog = existing->prog;
2451                         new->bpf_cookie = existing->bpf_cookie;
2452                         new++;
2453                 }
2454         }
2455         if (include_prog) {
2456                 new->prog = include_prog;
2457                 new->bpf_cookie = bpf_cookie;
2458                 new++;
2459         }
2460         new->prog = NULL;
2461         *new_array = array;
2462         return 0;
2463 }
2464
2465 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2466                              u32 *prog_ids, u32 request_cnt,
2467                              u32 *prog_cnt)
2468 {
2469         u32 cnt = 0;
2470
2471         if (array)
2472                 cnt = bpf_prog_array_length(array);
2473
2474         *prog_cnt = cnt;
2475
2476         /* return early if user requested only program count or nothing to copy */
2477         if (!request_cnt || !cnt)
2478                 return 0;
2479
2480         /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2481         return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2482                                                                      : 0;
2483 }
2484
2485 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2486                           struct bpf_map **used_maps, u32 len)
2487 {
2488         struct bpf_map *map;
2489         u32 i;
2490
2491         for (i = 0; i < len; i++) {
2492                 map = used_maps[i];
2493                 if (map->ops->map_poke_untrack)
2494                         map->ops->map_poke_untrack(map, aux);
2495                 bpf_map_put(map);
2496         }
2497 }
2498
2499 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2500 {
2501         __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2502         kfree(aux->used_maps);
2503 }
2504
2505 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2506                           struct btf_mod_pair *used_btfs, u32 len)
2507 {
2508 #ifdef CONFIG_BPF_SYSCALL
2509         struct btf_mod_pair *btf_mod;
2510         u32 i;
2511
2512         for (i = 0; i < len; i++) {
2513                 btf_mod = &used_btfs[i];
2514                 if (btf_mod->module)
2515                         module_put(btf_mod->module);
2516                 btf_put(btf_mod->btf);
2517         }
2518 #endif
2519 }
2520
2521 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2522 {
2523         __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2524         kfree(aux->used_btfs);
2525 }
2526
2527 static void bpf_prog_free_deferred(struct work_struct *work)
2528 {
2529         struct bpf_prog_aux *aux;
2530         int i;
2531
2532         aux = container_of(work, struct bpf_prog_aux, work);
2533 #ifdef CONFIG_BPF_SYSCALL
2534         bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2535 #endif
2536 #ifdef CONFIG_CGROUP_BPF
2537         if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2538                 bpf_cgroup_atype_put(aux->cgroup_atype);
2539 #endif
2540         bpf_free_used_maps(aux);
2541         bpf_free_used_btfs(aux);
2542         if (bpf_prog_is_dev_bound(aux))
2543                 bpf_prog_offload_destroy(aux->prog);
2544 #ifdef CONFIG_PERF_EVENTS
2545         if (aux->prog->has_callchain_buf)
2546                 put_callchain_buffers();
2547 #endif
2548         if (aux->dst_trampoline)
2549                 bpf_trampoline_put(aux->dst_trampoline);
2550         for (i = 0; i < aux->func_cnt; i++) {
2551                 /* We can just unlink the subprog poke descriptor table as
2552                  * it was originally linked to the main program and is also
2553                  * released along with it.
2554                  */
2555                 aux->func[i]->aux->poke_tab = NULL;
2556                 bpf_jit_free(aux->func[i]);
2557         }
2558         if (aux->func_cnt) {
2559                 kfree(aux->func);
2560                 bpf_prog_unlock_free(aux->prog);
2561         } else {
2562                 bpf_jit_free(aux->prog);
2563         }
2564 }
2565
2566 void bpf_prog_free(struct bpf_prog *fp)
2567 {
2568         struct bpf_prog_aux *aux = fp->aux;
2569
2570         if (aux->dst_prog)
2571                 bpf_prog_put(aux->dst_prog);
2572         INIT_WORK(&aux->work, bpf_prog_free_deferred);
2573         schedule_work(&aux->work);
2574 }
2575 EXPORT_SYMBOL_GPL(bpf_prog_free);
2576
2577 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2578 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2579
2580 void bpf_user_rnd_init_once(void)
2581 {
2582         prandom_init_once(&bpf_user_rnd_state);
2583 }
2584
2585 BPF_CALL_0(bpf_user_rnd_u32)
2586 {
2587         /* Should someone ever have the rather unwise idea to use some
2588          * of the registers passed into this function, then note that
2589          * this function is called from native eBPF and classic-to-eBPF
2590          * transformations. Register assignments from both sides are
2591          * different, f.e. classic always sets fn(ctx, A, X) here.
2592          */
2593         struct rnd_state *state;
2594         u32 res;
2595
2596         state = &get_cpu_var(bpf_user_rnd_state);
2597         res = prandom_u32_state(state);
2598         put_cpu_var(bpf_user_rnd_state);
2599
2600         return res;
2601 }
2602
2603 BPF_CALL_0(bpf_get_raw_cpu_id)
2604 {
2605         return raw_smp_processor_id();
2606 }
2607
2608 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2609 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2610 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2611 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2612 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2613 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2614 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2615 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2616 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2617 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2618 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2619
2620 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2621 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2622 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2623 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2624 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2625 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2626
2627 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2628 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2629 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2630 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2631 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2632 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2633 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2634 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2635 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2636 const struct bpf_func_proto bpf_set_retval_proto __weak;
2637 const struct bpf_func_proto bpf_get_retval_proto __weak;
2638
2639 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2640 {
2641         return NULL;
2642 }
2643
2644 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2645 {
2646         return NULL;
2647 }
2648
2649 u64 __weak
2650 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2651                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2652 {
2653         return -ENOTSUPP;
2654 }
2655 EXPORT_SYMBOL_GPL(bpf_event_output);
2656
2657 /* Always built-in helper functions. */
2658 const struct bpf_func_proto bpf_tail_call_proto = {
2659         .func           = NULL,
2660         .gpl_only       = false,
2661         .ret_type       = RET_VOID,
2662         .arg1_type      = ARG_PTR_TO_CTX,
2663         .arg2_type      = ARG_CONST_MAP_PTR,
2664         .arg3_type      = ARG_ANYTHING,
2665 };
2666
2667 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2668  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2669  * eBPF and implicitly also cBPF can get JITed!
2670  */
2671 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2672 {
2673         return prog;
2674 }
2675
2676 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2677  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2678  */
2679 void __weak bpf_jit_compile(struct bpf_prog *prog)
2680 {
2681 }
2682
2683 bool __weak bpf_helper_changes_pkt_data(void *func)
2684 {
2685         return false;
2686 }
2687
2688 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2689  * analysis code and wants explicit zero extension inserted by verifier.
2690  * Otherwise, return FALSE.
2691  *
2692  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2693  * you don't override this. JITs that don't want these extra insns can detect
2694  * them using insn_is_zext.
2695  */
2696 bool __weak bpf_jit_needs_zext(void)
2697 {
2698         return false;
2699 }
2700
2701 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2702 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2703 {
2704         return false;
2705 }
2706
2707 bool __weak bpf_jit_supports_kfunc_call(void)
2708 {
2709         return false;
2710 }
2711
2712 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2713  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2714  */
2715 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2716                          int len)
2717 {
2718         return -EFAULT;
2719 }
2720
2721 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2722                               void *addr1, void *addr2)
2723 {
2724         return -ENOTSUPP;
2725 }
2726
2727 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2728 {
2729         return ERR_PTR(-ENOTSUPP);
2730 }
2731
2732 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2733 {
2734         return -ENOTSUPP;
2735 }
2736
2737 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2738 EXPORT_SYMBOL(bpf_stats_enabled_key);
2739
2740 /* All definitions of tracepoints related to BPF. */
2741 #define CREATE_TRACE_POINTS
2742 #include <linux/bpf_trace.h>
2743
2744 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2745 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);