bpf: Add get_func_[arg|ret|arg_cnt] helpers
[platform/kernel/linux-starfive.git] / kernel / trace / bpf_trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20
21 #include <net/bpf_sk_storage.h>
22
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
25
26 #include <asm/tlb.h>
27
28 #include "trace_probe.h"
29 #include "trace.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
33
34 #define bpf_event_rcu_dereference(p)                                    \
35         rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
37 #ifdef CONFIG_MODULES
38 struct bpf_trace_module {
39         struct module *module;
40         struct list_head list;
41 };
42
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
45
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47 {
48         struct bpf_raw_event_map *btp, *ret = NULL;
49         struct bpf_trace_module *btm;
50         unsigned int i;
51
52         mutex_lock(&bpf_module_mutex);
53         list_for_each_entry(btm, &bpf_trace_modules, list) {
54                 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55                         btp = &btm->module->bpf_raw_events[i];
56                         if (!strcmp(btp->tp->name, name)) {
57                                 if (try_module_get(btm->module))
58                                         ret = btp;
59                                 goto out;
60                         }
61                 }
62         }
63 out:
64         mutex_unlock(&bpf_module_mutex);
65         return ret;
66 }
67 #else
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 {
70         return NULL;
71 }
72 #endif /* CONFIG_MODULES */
73
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
76
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78                                   u64 flags, const struct btf **btf,
79                                   s32 *btf_id);
80
81 /**
82  * trace_call_bpf - invoke BPF program
83  * @call: tracepoint event
84  * @ctx: opaque context pointer
85  *
86  * kprobe handlers execute BPF programs via this helper.
87  * Can be used from static tracepoints in the future.
88  *
89  * Return: BPF programs always return an integer which is interpreted by
90  * kprobe handler as:
91  * 0 - return from kprobe (event is filtered out)
92  * 1 - store kprobe event into ring buffer
93  * Other values are reserved and currently alias to 1
94  */
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 {
97         unsigned int ret;
98
99         cant_sleep();
100
101         if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102                 /*
103                  * since some bpf program is already running on this cpu,
104                  * don't call into another bpf program (same or different)
105                  * and don't send kprobe event into ring-buffer,
106                  * so return zero here
107                  */
108                 ret = 0;
109                 goto out;
110         }
111
112         /*
113          * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114          * to all call sites, we did a bpf_prog_array_valid() there to check
115          * whether call->prog_array is empty or not, which is
116          * a heuristic to speed up execution.
117          *
118          * If bpf_prog_array_valid() fetched prog_array was
119          * non-NULL, we go into trace_call_bpf() and do the actual
120          * proper rcu_dereference() under RCU lock.
121          * If it turns out that prog_array is NULL then, we bail out.
122          * For the opposite, if the bpf_prog_array_valid() fetched pointer
123          * was NULL, you'll skip the prog_array with the risk of missing
124          * out of events when it was updated in between this and the
125          * rcu_dereference() which is accepted risk.
126          */
127         ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run);
128
129  out:
130         __this_cpu_dec(bpf_prog_active);
131
132         return ret;
133 }
134
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137 {
138         regs_set_return_value(regs, rc);
139         override_function_with_return(regs);
140         return 0;
141 }
142
143 static const struct bpf_func_proto bpf_override_return_proto = {
144         .func           = bpf_override_return,
145         .gpl_only       = true,
146         .ret_type       = RET_INTEGER,
147         .arg1_type      = ARG_PTR_TO_CTX,
148         .arg2_type      = ARG_ANYTHING,
149 };
150 #endif
151
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154 {
155         int ret;
156
157         ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158         if (unlikely(ret < 0))
159                 memset(dst, 0, size);
160         return ret;
161 }
162
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164            const void __user *, unsafe_ptr)
165 {
166         return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167 }
168
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170         .func           = bpf_probe_read_user,
171         .gpl_only       = true,
172         .ret_type       = RET_INTEGER,
173         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
174         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
175         .arg3_type      = ARG_ANYTHING,
176 };
177
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180                                const void __user *unsafe_ptr)
181 {
182         int ret;
183
184         /*
185          * NB: We rely on strncpy_from_user() not copying junk past the NUL
186          * terminator into `dst`.
187          *
188          * strncpy_from_user() does long-sized strides in the fast path. If the
189          * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190          * then there could be junk after the NUL in `dst`. If user takes `dst`
191          * and keys a hash map with it, then semantically identical strings can
192          * occupy multiple entries in the map.
193          */
194         ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195         if (unlikely(ret < 0))
196                 memset(dst, 0, size);
197         return ret;
198 }
199
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201            const void __user *, unsafe_ptr)
202 {
203         return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204 }
205
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207         .func           = bpf_probe_read_user_str,
208         .gpl_only       = true,
209         .ret_type       = RET_INTEGER,
210         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
211         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
212         .arg3_type      = ARG_ANYTHING,
213 };
214
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217 {
218         int ret;
219
220         ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
221         if (unlikely(ret < 0))
222                 memset(dst, 0, size);
223         return ret;
224 }
225
226 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
227            const void *, unsafe_ptr)
228 {
229         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
230 }
231
232 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
233         .func           = bpf_probe_read_kernel,
234         .gpl_only       = true,
235         .ret_type       = RET_INTEGER,
236         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
237         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
238         .arg3_type      = ARG_ANYTHING,
239 };
240
241 static __always_inline int
242 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
243 {
244         int ret;
245
246         /*
247          * The strncpy_from_kernel_nofault() call will likely not fill the
248          * entire buffer, but that's okay in this circumstance as we're probing
249          * arbitrary memory anyway similar to bpf_probe_read_*() and might
250          * as well probe the stack. Thus, memory is explicitly cleared
251          * only in error case, so that improper users ignoring return
252          * code altogether don't copy garbage; otherwise length of string
253          * is returned that can be used for bpf_perf_event_output() et al.
254          */
255         ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
256         if (unlikely(ret < 0))
257                 memset(dst, 0, size);
258         return ret;
259 }
260
261 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
262            const void *, unsafe_ptr)
263 {
264         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
265 }
266
267 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
268         .func           = bpf_probe_read_kernel_str,
269         .gpl_only       = true,
270         .ret_type       = RET_INTEGER,
271         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
272         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
273         .arg3_type      = ARG_ANYTHING,
274 };
275
276 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
277 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
278            const void *, unsafe_ptr)
279 {
280         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
281                 return bpf_probe_read_user_common(dst, size,
282                                 (__force void __user *)unsafe_ptr);
283         }
284         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
285 }
286
287 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
288         .func           = bpf_probe_read_compat,
289         .gpl_only       = true,
290         .ret_type       = RET_INTEGER,
291         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
292         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
293         .arg3_type      = ARG_ANYTHING,
294 };
295
296 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
297            const void *, unsafe_ptr)
298 {
299         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
300                 return bpf_probe_read_user_str_common(dst, size,
301                                 (__force void __user *)unsafe_ptr);
302         }
303         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
304 }
305
306 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
307         .func           = bpf_probe_read_compat_str,
308         .gpl_only       = true,
309         .ret_type       = RET_INTEGER,
310         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
311         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
312         .arg3_type      = ARG_ANYTHING,
313 };
314 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
315
316 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
317            u32, size)
318 {
319         /*
320          * Ensure we're in user context which is safe for the helper to
321          * run. This helper has no business in a kthread.
322          *
323          * access_ok() should prevent writing to non-user memory, but in
324          * some situations (nommu, temporary switch, etc) access_ok() does
325          * not provide enough validation, hence the check on KERNEL_DS.
326          *
327          * nmi_uaccess_okay() ensures the probe is not run in an interim
328          * state, when the task or mm are switched. This is specifically
329          * required to prevent the use of temporary mm.
330          */
331
332         if (unlikely(in_interrupt() ||
333                      current->flags & (PF_KTHREAD | PF_EXITING)))
334                 return -EPERM;
335         if (unlikely(uaccess_kernel()))
336                 return -EPERM;
337         if (unlikely(!nmi_uaccess_okay()))
338                 return -EPERM;
339
340         return copy_to_user_nofault(unsafe_ptr, src, size);
341 }
342
343 static const struct bpf_func_proto bpf_probe_write_user_proto = {
344         .func           = bpf_probe_write_user,
345         .gpl_only       = true,
346         .ret_type       = RET_INTEGER,
347         .arg1_type      = ARG_ANYTHING,
348         .arg2_type      = ARG_PTR_TO_MEM,
349         .arg3_type      = ARG_CONST_SIZE,
350 };
351
352 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
353 {
354         if (!capable(CAP_SYS_ADMIN))
355                 return NULL;
356
357         pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
358                             current->comm, task_pid_nr(current));
359
360         return &bpf_probe_write_user_proto;
361 }
362
363 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
364
365 #define MAX_TRACE_PRINTK_VARARGS        3
366 #define BPF_TRACE_PRINTK_SIZE           1024
367
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369            u64, arg2, u64, arg3)
370 {
371         u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
372         u32 *bin_args;
373         static char buf[BPF_TRACE_PRINTK_SIZE];
374         unsigned long flags;
375         int ret;
376
377         ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
378                                   MAX_TRACE_PRINTK_VARARGS);
379         if (ret < 0)
380                 return ret;
381
382         raw_spin_lock_irqsave(&trace_printk_lock, flags);
383         ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
384
385         trace_bpf_trace_printk(buf);
386         raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
387
388         bpf_bprintf_cleanup();
389
390         return ret;
391 }
392
393 static const struct bpf_func_proto bpf_trace_printk_proto = {
394         .func           = bpf_trace_printk,
395         .gpl_only       = true,
396         .ret_type       = RET_INTEGER,
397         .arg1_type      = ARG_PTR_TO_MEM,
398         .arg2_type      = ARG_CONST_SIZE,
399 };
400
401 static void __set_printk_clr_event(void)
402 {
403         /*
404          * This program might be calling bpf_trace_printk,
405          * so enable the associated bpf_trace/bpf_trace_printk event.
406          * Repeat this each time as it is possible a user has
407          * disabled bpf_trace_printk events.  By loading a program
408          * calling bpf_trace_printk() however the user has expressed
409          * the intent to see such events.
410          */
411         if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
412                 pr_warn_ratelimited("could not enable bpf_trace_printk events");
413 }
414
415 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
416 {
417         __set_printk_clr_event();
418         return &bpf_trace_printk_proto;
419 }
420
421 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
422            u32, data_len)
423 {
424         static char buf[BPF_TRACE_PRINTK_SIZE];
425         unsigned long flags;
426         int ret, num_args;
427         u32 *bin_args;
428
429         if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
430             (data_len && !data))
431                 return -EINVAL;
432         num_args = data_len / 8;
433
434         ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
435         if (ret < 0)
436                 return ret;
437
438         raw_spin_lock_irqsave(&trace_printk_lock, flags);
439         ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
440
441         trace_bpf_trace_printk(buf);
442         raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
443
444         bpf_bprintf_cleanup();
445
446         return ret;
447 }
448
449 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
450         .func           = bpf_trace_vprintk,
451         .gpl_only       = true,
452         .ret_type       = RET_INTEGER,
453         .arg1_type      = ARG_PTR_TO_MEM,
454         .arg2_type      = ARG_CONST_SIZE,
455         .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
456         .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
457 };
458
459 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
460 {
461         __set_printk_clr_event();
462         return &bpf_trace_vprintk_proto;
463 }
464
465 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
466            const void *, data, u32, data_len)
467 {
468         int err, num_args;
469         u32 *bin_args;
470
471         if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
472             (data_len && !data))
473                 return -EINVAL;
474         num_args = data_len / 8;
475
476         err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
477         if (err < 0)
478                 return err;
479
480         seq_bprintf(m, fmt, bin_args);
481
482         bpf_bprintf_cleanup();
483
484         return seq_has_overflowed(m) ? -EOVERFLOW : 0;
485 }
486
487 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
488
489 static const struct bpf_func_proto bpf_seq_printf_proto = {
490         .func           = bpf_seq_printf,
491         .gpl_only       = true,
492         .ret_type       = RET_INTEGER,
493         .arg1_type      = ARG_PTR_TO_BTF_ID,
494         .arg1_btf_id    = &btf_seq_file_ids[0],
495         .arg2_type      = ARG_PTR_TO_MEM,
496         .arg3_type      = ARG_CONST_SIZE,
497         .arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
498         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
499 };
500
501 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
502 {
503         return seq_write(m, data, len) ? -EOVERFLOW : 0;
504 }
505
506 static const struct bpf_func_proto bpf_seq_write_proto = {
507         .func           = bpf_seq_write,
508         .gpl_only       = true,
509         .ret_type       = RET_INTEGER,
510         .arg1_type      = ARG_PTR_TO_BTF_ID,
511         .arg1_btf_id    = &btf_seq_file_ids[0],
512         .arg2_type      = ARG_PTR_TO_MEM,
513         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
514 };
515
516 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
517            u32, btf_ptr_size, u64, flags)
518 {
519         const struct btf *btf;
520         s32 btf_id;
521         int ret;
522
523         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
524         if (ret)
525                 return ret;
526
527         return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
528 }
529
530 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
531         .func           = bpf_seq_printf_btf,
532         .gpl_only       = true,
533         .ret_type       = RET_INTEGER,
534         .arg1_type      = ARG_PTR_TO_BTF_ID,
535         .arg1_btf_id    = &btf_seq_file_ids[0],
536         .arg2_type      = ARG_PTR_TO_MEM,
537         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
538         .arg4_type      = ARG_ANYTHING,
539 };
540
541 static __always_inline int
542 get_map_perf_counter(struct bpf_map *map, u64 flags,
543                      u64 *value, u64 *enabled, u64 *running)
544 {
545         struct bpf_array *array = container_of(map, struct bpf_array, map);
546         unsigned int cpu = smp_processor_id();
547         u64 index = flags & BPF_F_INDEX_MASK;
548         struct bpf_event_entry *ee;
549
550         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
551                 return -EINVAL;
552         if (index == BPF_F_CURRENT_CPU)
553                 index = cpu;
554         if (unlikely(index >= array->map.max_entries))
555                 return -E2BIG;
556
557         ee = READ_ONCE(array->ptrs[index]);
558         if (!ee)
559                 return -ENOENT;
560
561         return perf_event_read_local(ee->event, value, enabled, running);
562 }
563
564 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
565 {
566         u64 value = 0;
567         int err;
568
569         err = get_map_perf_counter(map, flags, &value, NULL, NULL);
570         /*
571          * this api is ugly since we miss [-22..-2] range of valid
572          * counter values, but that's uapi
573          */
574         if (err)
575                 return err;
576         return value;
577 }
578
579 static const struct bpf_func_proto bpf_perf_event_read_proto = {
580         .func           = bpf_perf_event_read,
581         .gpl_only       = true,
582         .ret_type       = RET_INTEGER,
583         .arg1_type      = ARG_CONST_MAP_PTR,
584         .arg2_type      = ARG_ANYTHING,
585 };
586
587 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
588            struct bpf_perf_event_value *, buf, u32, size)
589 {
590         int err = -EINVAL;
591
592         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
593                 goto clear;
594         err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
595                                    &buf->running);
596         if (unlikely(err))
597                 goto clear;
598         return 0;
599 clear:
600         memset(buf, 0, size);
601         return err;
602 }
603
604 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
605         .func           = bpf_perf_event_read_value,
606         .gpl_only       = true,
607         .ret_type       = RET_INTEGER,
608         .arg1_type      = ARG_CONST_MAP_PTR,
609         .arg2_type      = ARG_ANYTHING,
610         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
611         .arg4_type      = ARG_CONST_SIZE,
612 };
613
614 static __always_inline u64
615 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
616                         u64 flags, struct perf_sample_data *sd)
617 {
618         struct bpf_array *array = container_of(map, struct bpf_array, map);
619         unsigned int cpu = smp_processor_id();
620         u64 index = flags & BPF_F_INDEX_MASK;
621         struct bpf_event_entry *ee;
622         struct perf_event *event;
623
624         if (index == BPF_F_CURRENT_CPU)
625                 index = cpu;
626         if (unlikely(index >= array->map.max_entries))
627                 return -E2BIG;
628
629         ee = READ_ONCE(array->ptrs[index]);
630         if (!ee)
631                 return -ENOENT;
632
633         event = ee->event;
634         if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
635                      event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
636                 return -EINVAL;
637
638         if (unlikely(event->oncpu != cpu))
639                 return -EOPNOTSUPP;
640
641         return perf_event_output(event, sd, regs);
642 }
643
644 /*
645  * Support executing tracepoints in normal, irq, and nmi context that each call
646  * bpf_perf_event_output
647  */
648 struct bpf_trace_sample_data {
649         struct perf_sample_data sds[3];
650 };
651
652 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
653 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
654 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
655            u64, flags, void *, data, u64, size)
656 {
657         struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
658         int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
659         struct perf_raw_record raw = {
660                 .frag = {
661                         .size = size,
662                         .data = data,
663                 },
664         };
665         struct perf_sample_data *sd;
666         int err;
667
668         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
669                 err = -EBUSY;
670                 goto out;
671         }
672
673         sd = &sds->sds[nest_level - 1];
674
675         if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
676                 err = -EINVAL;
677                 goto out;
678         }
679
680         perf_sample_data_init(sd, 0, 0);
681         sd->raw = &raw;
682
683         err = __bpf_perf_event_output(regs, map, flags, sd);
684
685 out:
686         this_cpu_dec(bpf_trace_nest_level);
687         return err;
688 }
689
690 static const struct bpf_func_proto bpf_perf_event_output_proto = {
691         .func           = bpf_perf_event_output,
692         .gpl_only       = true,
693         .ret_type       = RET_INTEGER,
694         .arg1_type      = ARG_PTR_TO_CTX,
695         .arg2_type      = ARG_CONST_MAP_PTR,
696         .arg3_type      = ARG_ANYTHING,
697         .arg4_type      = ARG_PTR_TO_MEM,
698         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
699 };
700
701 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
702 struct bpf_nested_pt_regs {
703         struct pt_regs regs[3];
704 };
705 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
706 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
707
708 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
709                      void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
710 {
711         int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
712         struct perf_raw_frag frag = {
713                 .copy           = ctx_copy,
714                 .size           = ctx_size,
715                 .data           = ctx,
716         };
717         struct perf_raw_record raw = {
718                 .frag = {
719                         {
720                                 .next   = ctx_size ? &frag : NULL,
721                         },
722                         .size   = meta_size,
723                         .data   = meta,
724                 },
725         };
726         struct perf_sample_data *sd;
727         struct pt_regs *regs;
728         u64 ret;
729
730         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
731                 ret = -EBUSY;
732                 goto out;
733         }
734         sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
735         regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
736
737         perf_fetch_caller_regs(regs);
738         perf_sample_data_init(sd, 0, 0);
739         sd->raw = &raw;
740
741         ret = __bpf_perf_event_output(regs, map, flags, sd);
742 out:
743         this_cpu_dec(bpf_event_output_nest_level);
744         return ret;
745 }
746
747 BPF_CALL_0(bpf_get_current_task)
748 {
749         return (long) current;
750 }
751
752 const struct bpf_func_proto bpf_get_current_task_proto = {
753         .func           = bpf_get_current_task,
754         .gpl_only       = true,
755         .ret_type       = RET_INTEGER,
756 };
757
758 BPF_CALL_0(bpf_get_current_task_btf)
759 {
760         return (unsigned long) current;
761 }
762
763 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
764         .func           = bpf_get_current_task_btf,
765         .gpl_only       = true,
766         .ret_type       = RET_PTR_TO_BTF_ID,
767         .ret_btf_id     = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
768 };
769
770 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
771 {
772         return (unsigned long) task_pt_regs(task);
773 }
774
775 BTF_ID_LIST(bpf_task_pt_regs_ids)
776 BTF_ID(struct, pt_regs)
777
778 const struct bpf_func_proto bpf_task_pt_regs_proto = {
779         .func           = bpf_task_pt_regs,
780         .gpl_only       = true,
781         .arg1_type      = ARG_PTR_TO_BTF_ID,
782         .arg1_btf_id    = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
783         .ret_type       = RET_PTR_TO_BTF_ID,
784         .ret_btf_id     = &bpf_task_pt_regs_ids[0],
785 };
786
787 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
788 {
789         struct bpf_array *array = container_of(map, struct bpf_array, map);
790         struct cgroup *cgrp;
791
792         if (unlikely(idx >= array->map.max_entries))
793                 return -E2BIG;
794
795         cgrp = READ_ONCE(array->ptrs[idx]);
796         if (unlikely(!cgrp))
797                 return -EAGAIN;
798
799         return task_under_cgroup_hierarchy(current, cgrp);
800 }
801
802 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
803         .func           = bpf_current_task_under_cgroup,
804         .gpl_only       = false,
805         .ret_type       = RET_INTEGER,
806         .arg1_type      = ARG_CONST_MAP_PTR,
807         .arg2_type      = ARG_ANYTHING,
808 };
809
810 struct send_signal_irq_work {
811         struct irq_work irq_work;
812         struct task_struct *task;
813         u32 sig;
814         enum pid_type type;
815 };
816
817 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
818
819 static void do_bpf_send_signal(struct irq_work *entry)
820 {
821         struct send_signal_irq_work *work;
822
823         work = container_of(entry, struct send_signal_irq_work, irq_work);
824         group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
825 }
826
827 static int bpf_send_signal_common(u32 sig, enum pid_type type)
828 {
829         struct send_signal_irq_work *work = NULL;
830
831         /* Similar to bpf_probe_write_user, task needs to be
832          * in a sound condition and kernel memory access be
833          * permitted in order to send signal to the current
834          * task.
835          */
836         if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
837                 return -EPERM;
838         if (unlikely(uaccess_kernel()))
839                 return -EPERM;
840         if (unlikely(!nmi_uaccess_okay()))
841                 return -EPERM;
842
843         if (irqs_disabled()) {
844                 /* Do an early check on signal validity. Otherwise,
845                  * the error is lost in deferred irq_work.
846                  */
847                 if (unlikely(!valid_signal(sig)))
848                         return -EINVAL;
849
850                 work = this_cpu_ptr(&send_signal_work);
851                 if (irq_work_is_busy(&work->irq_work))
852                         return -EBUSY;
853
854                 /* Add the current task, which is the target of sending signal,
855                  * to the irq_work. The current task may change when queued
856                  * irq works get executed.
857                  */
858                 work->task = current;
859                 work->sig = sig;
860                 work->type = type;
861                 irq_work_queue(&work->irq_work);
862                 return 0;
863         }
864
865         return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
866 }
867
868 BPF_CALL_1(bpf_send_signal, u32, sig)
869 {
870         return bpf_send_signal_common(sig, PIDTYPE_TGID);
871 }
872
873 static const struct bpf_func_proto bpf_send_signal_proto = {
874         .func           = bpf_send_signal,
875         .gpl_only       = false,
876         .ret_type       = RET_INTEGER,
877         .arg1_type      = ARG_ANYTHING,
878 };
879
880 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
881 {
882         return bpf_send_signal_common(sig, PIDTYPE_PID);
883 }
884
885 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
886         .func           = bpf_send_signal_thread,
887         .gpl_only       = false,
888         .ret_type       = RET_INTEGER,
889         .arg1_type      = ARG_ANYTHING,
890 };
891
892 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
893 {
894         long len;
895         char *p;
896
897         if (!sz)
898                 return 0;
899
900         p = d_path(path, buf, sz);
901         if (IS_ERR(p)) {
902                 len = PTR_ERR(p);
903         } else {
904                 len = buf + sz - p;
905                 memmove(buf, p, len);
906         }
907
908         return len;
909 }
910
911 BTF_SET_START(btf_allowlist_d_path)
912 #ifdef CONFIG_SECURITY
913 BTF_ID(func, security_file_permission)
914 BTF_ID(func, security_inode_getattr)
915 BTF_ID(func, security_file_open)
916 #endif
917 #ifdef CONFIG_SECURITY_PATH
918 BTF_ID(func, security_path_truncate)
919 #endif
920 BTF_ID(func, vfs_truncate)
921 BTF_ID(func, vfs_fallocate)
922 BTF_ID(func, dentry_open)
923 BTF_ID(func, vfs_getattr)
924 BTF_ID(func, filp_close)
925 BTF_SET_END(btf_allowlist_d_path)
926
927 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
928 {
929         if (prog->type == BPF_PROG_TYPE_TRACING &&
930             prog->expected_attach_type == BPF_TRACE_ITER)
931                 return true;
932
933         if (prog->type == BPF_PROG_TYPE_LSM)
934                 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
935
936         return btf_id_set_contains(&btf_allowlist_d_path,
937                                    prog->aux->attach_btf_id);
938 }
939
940 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
941
942 static const struct bpf_func_proto bpf_d_path_proto = {
943         .func           = bpf_d_path,
944         .gpl_only       = false,
945         .ret_type       = RET_INTEGER,
946         .arg1_type      = ARG_PTR_TO_BTF_ID,
947         .arg1_btf_id    = &bpf_d_path_btf_ids[0],
948         .arg2_type      = ARG_PTR_TO_MEM,
949         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
950         .allowed        = bpf_d_path_allowed,
951 };
952
953 #define BTF_F_ALL       (BTF_F_COMPACT  | BTF_F_NONAME | \
954                          BTF_F_PTR_RAW | BTF_F_ZERO)
955
956 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
957                                   u64 flags, const struct btf **btf,
958                                   s32 *btf_id)
959 {
960         const struct btf_type *t;
961
962         if (unlikely(flags & ~(BTF_F_ALL)))
963                 return -EINVAL;
964
965         if (btf_ptr_size != sizeof(struct btf_ptr))
966                 return -EINVAL;
967
968         *btf = bpf_get_btf_vmlinux();
969
970         if (IS_ERR_OR_NULL(*btf))
971                 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
972
973         if (ptr->type_id > 0)
974                 *btf_id = ptr->type_id;
975         else
976                 return -EINVAL;
977
978         if (*btf_id > 0)
979                 t = btf_type_by_id(*btf, *btf_id);
980         if (*btf_id <= 0 || !t)
981                 return -ENOENT;
982
983         return 0;
984 }
985
986 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
987            u32, btf_ptr_size, u64, flags)
988 {
989         const struct btf *btf;
990         s32 btf_id;
991         int ret;
992
993         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
994         if (ret)
995                 return ret;
996
997         return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
998                                       flags);
999 }
1000
1001 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1002         .func           = bpf_snprintf_btf,
1003         .gpl_only       = false,
1004         .ret_type       = RET_INTEGER,
1005         .arg1_type      = ARG_PTR_TO_MEM,
1006         .arg2_type      = ARG_CONST_SIZE,
1007         .arg3_type      = ARG_PTR_TO_MEM,
1008         .arg4_type      = ARG_CONST_SIZE,
1009         .arg5_type      = ARG_ANYTHING,
1010 };
1011
1012 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1013 {
1014         /* This helper call is inlined by verifier. */
1015         return ((u64 *)ctx)[-2];
1016 }
1017
1018 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1019         .func           = bpf_get_func_ip_tracing,
1020         .gpl_only       = true,
1021         .ret_type       = RET_INTEGER,
1022         .arg1_type      = ARG_PTR_TO_CTX,
1023 };
1024
1025 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1026 {
1027         struct kprobe *kp = kprobe_running();
1028
1029         return kp ? (uintptr_t)kp->addr : 0;
1030 }
1031
1032 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1033         .func           = bpf_get_func_ip_kprobe,
1034         .gpl_only       = true,
1035         .ret_type       = RET_INTEGER,
1036         .arg1_type      = ARG_PTR_TO_CTX,
1037 };
1038
1039 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1040 {
1041         struct bpf_trace_run_ctx *run_ctx;
1042
1043         run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1044         return run_ctx->bpf_cookie;
1045 }
1046
1047 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1048         .func           = bpf_get_attach_cookie_trace,
1049         .gpl_only       = false,
1050         .ret_type       = RET_INTEGER,
1051         .arg1_type      = ARG_PTR_TO_CTX,
1052 };
1053
1054 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1055 {
1056         return ctx->event->bpf_cookie;
1057 }
1058
1059 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1060         .func           = bpf_get_attach_cookie_pe,
1061         .gpl_only       = false,
1062         .ret_type       = RET_INTEGER,
1063         .arg1_type      = ARG_PTR_TO_CTX,
1064 };
1065
1066 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1067 {
1068 #ifndef CONFIG_X86
1069         return -ENOENT;
1070 #else
1071         static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1072         u32 entry_cnt = size / br_entry_size;
1073
1074         entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1075
1076         if (unlikely(flags))
1077                 return -EINVAL;
1078
1079         if (!entry_cnt)
1080                 return -ENOENT;
1081
1082         return entry_cnt * br_entry_size;
1083 #endif
1084 }
1085
1086 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1087         .func           = bpf_get_branch_snapshot,
1088         .gpl_only       = true,
1089         .ret_type       = RET_INTEGER,
1090         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
1091         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
1092 };
1093
1094 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1095 {
1096         /* This helper call is inlined by verifier. */
1097         u64 nr_args = ((u64 *)ctx)[-1];
1098
1099         if ((u64) n >= nr_args)
1100                 return -EINVAL;
1101         *value = ((u64 *)ctx)[n];
1102         return 0;
1103 }
1104
1105 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1106         .func           = get_func_arg,
1107         .ret_type       = RET_INTEGER,
1108         .arg1_type      = ARG_PTR_TO_CTX,
1109         .arg2_type      = ARG_ANYTHING,
1110         .arg3_type      = ARG_PTR_TO_LONG,
1111 };
1112
1113 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1114 {
1115         /* This helper call is inlined by verifier. */
1116         u64 nr_args = ((u64 *)ctx)[-1];
1117
1118         *value = ((u64 *)ctx)[nr_args];
1119         return 0;
1120 }
1121
1122 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1123         .func           = get_func_ret,
1124         .ret_type       = RET_INTEGER,
1125         .arg1_type      = ARG_PTR_TO_CTX,
1126         .arg2_type      = ARG_PTR_TO_LONG,
1127 };
1128
1129 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1130 {
1131         /* This helper call is inlined by verifier. */
1132         return ((u64 *)ctx)[-1];
1133 }
1134
1135 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1136         .func           = get_func_arg_cnt,
1137         .ret_type       = RET_INTEGER,
1138         .arg1_type      = ARG_PTR_TO_CTX,
1139 };
1140
1141 static const struct bpf_func_proto *
1142 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1143 {
1144         switch (func_id) {
1145         case BPF_FUNC_map_lookup_elem:
1146                 return &bpf_map_lookup_elem_proto;
1147         case BPF_FUNC_map_update_elem:
1148                 return &bpf_map_update_elem_proto;
1149         case BPF_FUNC_map_delete_elem:
1150                 return &bpf_map_delete_elem_proto;
1151         case BPF_FUNC_map_push_elem:
1152                 return &bpf_map_push_elem_proto;
1153         case BPF_FUNC_map_pop_elem:
1154                 return &bpf_map_pop_elem_proto;
1155         case BPF_FUNC_map_peek_elem:
1156                 return &bpf_map_peek_elem_proto;
1157         case BPF_FUNC_ktime_get_ns:
1158                 return &bpf_ktime_get_ns_proto;
1159         case BPF_FUNC_ktime_get_boot_ns:
1160                 return &bpf_ktime_get_boot_ns_proto;
1161         case BPF_FUNC_tail_call:
1162                 return &bpf_tail_call_proto;
1163         case BPF_FUNC_get_current_pid_tgid:
1164                 return &bpf_get_current_pid_tgid_proto;
1165         case BPF_FUNC_get_current_task:
1166                 return &bpf_get_current_task_proto;
1167         case BPF_FUNC_get_current_task_btf:
1168                 return &bpf_get_current_task_btf_proto;
1169         case BPF_FUNC_task_pt_regs:
1170                 return &bpf_task_pt_regs_proto;
1171         case BPF_FUNC_get_current_uid_gid:
1172                 return &bpf_get_current_uid_gid_proto;
1173         case BPF_FUNC_get_current_comm:
1174                 return &bpf_get_current_comm_proto;
1175         case BPF_FUNC_trace_printk:
1176                 return bpf_get_trace_printk_proto();
1177         case BPF_FUNC_get_smp_processor_id:
1178                 return &bpf_get_smp_processor_id_proto;
1179         case BPF_FUNC_get_numa_node_id:
1180                 return &bpf_get_numa_node_id_proto;
1181         case BPF_FUNC_perf_event_read:
1182                 return &bpf_perf_event_read_proto;
1183         case BPF_FUNC_current_task_under_cgroup:
1184                 return &bpf_current_task_under_cgroup_proto;
1185         case BPF_FUNC_get_prandom_u32:
1186                 return &bpf_get_prandom_u32_proto;
1187         case BPF_FUNC_probe_write_user:
1188                 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1189                        NULL : bpf_get_probe_write_proto();
1190         case BPF_FUNC_probe_read_user:
1191                 return &bpf_probe_read_user_proto;
1192         case BPF_FUNC_probe_read_kernel:
1193                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1194                        NULL : &bpf_probe_read_kernel_proto;
1195         case BPF_FUNC_probe_read_user_str:
1196                 return &bpf_probe_read_user_str_proto;
1197         case BPF_FUNC_probe_read_kernel_str:
1198                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1199                        NULL : &bpf_probe_read_kernel_str_proto;
1200 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1201         case BPF_FUNC_probe_read:
1202                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1203                        NULL : &bpf_probe_read_compat_proto;
1204         case BPF_FUNC_probe_read_str:
1205                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1206                        NULL : &bpf_probe_read_compat_str_proto;
1207 #endif
1208 #ifdef CONFIG_CGROUPS
1209         case BPF_FUNC_get_current_cgroup_id:
1210                 return &bpf_get_current_cgroup_id_proto;
1211         case BPF_FUNC_get_current_ancestor_cgroup_id:
1212                 return &bpf_get_current_ancestor_cgroup_id_proto;
1213 #endif
1214         case BPF_FUNC_send_signal:
1215                 return &bpf_send_signal_proto;
1216         case BPF_FUNC_send_signal_thread:
1217                 return &bpf_send_signal_thread_proto;
1218         case BPF_FUNC_perf_event_read_value:
1219                 return &bpf_perf_event_read_value_proto;
1220         case BPF_FUNC_get_ns_current_pid_tgid:
1221                 return &bpf_get_ns_current_pid_tgid_proto;
1222         case BPF_FUNC_ringbuf_output:
1223                 return &bpf_ringbuf_output_proto;
1224         case BPF_FUNC_ringbuf_reserve:
1225                 return &bpf_ringbuf_reserve_proto;
1226         case BPF_FUNC_ringbuf_submit:
1227                 return &bpf_ringbuf_submit_proto;
1228         case BPF_FUNC_ringbuf_discard:
1229                 return &bpf_ringbuf_discard_proto;
1230         case BPF_FUNC_ringbuf_query:
1231                 return &bpf_ringbuf_query_proto;
1232         case BPF_FUNC_jiffies64:
1233                 return &bpf_jiffies64_proto;
1234         case BPF_FUNC_get_task_stack:
1235                 return &bpf_get_task_stack_proto;
1236         case BPF_FUNC_copy_from_user:
1237                 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1238         case BPF_FUNC_snprintf_btf:
1239                 return &bpf_snprintf_btf_proto;
1240         case BPF_FUNC_per_cpu_ptr:
1241                 return &bpf_per_cpu_ptr_proto;
1242         case BPF_FUNC_this_cpu_ptr:
1243                 return &bpf_this_cpu_ptr_proto;
1244         case BPF_FUNC_task_storage_get:
1245                 return &bpf_task_storage_get_proto;
1246         case BPF_FUNC_task_storage_delete:
1247                 return &bpf_task_storage_delete_proto;
1248         case BPF_FUNC_for_each_map_elem:
1249                 return &bpf_for_each_map_elem_proto;
1250         case BPF_FUNC_snprintf:
1251                 return &bpf_snprintf_proto;
1252         case BPF_FUNC_get_func_ip:
1253                 return &bpf_get_func_ip_proto_tracing;
1254         case BPF_FUNC_get_branch_snapshot:
1255                 return &bpf_get_branch_snapshot_proto;
1256         case BPF_FUNC_find_vma:
1257                 return &bpf_find_vma_proto;
1258         case BPF_FUNC_trace_vprintk:
1259                 return bpf_get_trace_vprintk_proto();
1260         default:
1261                 return bpf_base_func_proto(func_id);
1262         }
1263 }
1264
1265 static const struct bpf_func_proto *
1266 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1267 {
1268         switch (func_id) {
1269         case BPF_FUNC_perf_event_output:
1270                 return &bpf_perf_event_output_proto;
1271         case BPF_FUNC_get_stackid:
1272                 return &bpf_get_stackid_proto;
1273         case BPF_FUNC_get_stack:
1274                 return &bpf_get_stack_proto;
1275 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1276         case BPF_FUNC_override_return:
1277                 return &bpf_override_return_proto;
1278 #endif
1279         case BPF_FUNC_get_func_ip:
1280                 return &bpf_get_func_ip_proto_kprobe;
1281         case BPF_FUNC_get_attach_cookie:
1282                 return &bpf_get_attach_cookie_proto_trace;
1283         default:
1284                 return bpf_tracing_func_proto(func_id, prog);
1285         }
1286 }
1287
1288 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1289 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1290                                         const struct bpf_prog *prog,
1291                                         struct bpf_insn_access_aux *info)
1292 {
1293         if (off < 0 || off >= sizeof(struct pt_regs))
1294                 return false;
1295         if (type != BPF_READ)
1296                 return false;
1297         if (off % size != 0)
1298                 return false;
1299         /*
1300          * Assertion for 32 bit to make sure last 8 byte access
1301          * (BPF_DW) to the last 4 byte member is disallowed.
1302          */
1303         if (off + size > sizeof(struct pt_regs))
1304                 return false;
1305
1306         return true;
1307 }
1308
1309 const struct bpf_verifier_ops kprobe_verifier_ops = {
1310         .get_func_proto  = kprobe_prog_func_proto,
1311         .is_valid_access = kprobe_prog_is_valid_access,
1312 };
1313
1314 const struct bpf_prog_ops kprobe_prog_ops = {
1315 };
1316
1317 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1318            u64, flags, void *, data, u64, size)
1319 {
1320         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1321
1322         /*
1323          * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1324          * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1325          * from there and call the same bpf_perf_event_output() helper inline.
1326          */
1327         return ____bpf_perf_event_output(regs, map, flags, data, size);
1328 }
1329
1330 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1331         .func           = bpf_perf_event_output_tp,
1332         .gpl_only       = true,
1333         .ret_type       = RET_INTEGER,
1334         .arg1_type      = ARG_PTR_TO_CTX,
1335         .arg2_type      = ARG_CONST_MAP_PTR,
1336         .arg3_type      = ARG_ANYTHING,
1337         .arg4_type      = ARG_PTR_TO_MEM,
1338         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1339 };
1340
1341 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1342            u64, flags)
1343 {
1344         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1345
1346         /*
1347          * Same comment as in bpf_perf_event_output_tp(), only that this time
1348          * the other helper's function body cannot be inlined due to being
1349          * external, thus we need to call raw helper function.
1350          */
1351         return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1352                                flags, 0, 0);
1353 }
1354
1355 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1356         .func           = bpf_get_stackid_tp,
1357         .gpl_only       = true,
1358         .ret_type       = RET_INTEGER,
1359         .arg1_type      = ARG_PTR_TO_CTX,
1360         .arg2_type      = ARG_CONST_MAP_PTR,
1361         .arg3_type      = ARG_ANYTHING,
1362 };
1363
1364 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1365            u64, flags)
1366 {
1367         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1368
1369         return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1370                              (unsigned long) size, flags, 0);
1371 }
1372
1373 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1374         .func           = bpf_get_stack_tp,
1375         .gpl_only       = true,
1376         .ret_type       = RET_INTEGER,
1377         .arg1_type      = ARG_PTR_TO_CTX,
1378         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1379         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1380         .arg4_type      = ARG_ANYTHING,
1381 };
1382
1383 static const struct bpf_func_proto *
1384 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1385 {
1386         switch (func_id) {
1387         case BPF_FUNC_perf_event_output:
1388                 return &bpf_perf_event_output_proto_tp;
1389         case BPF_FUNC_get_stackid:
1390                 return &bpf_get_stackid_proto_tp;
1391         case BPF_FUNC_get_stack:
1392                 return &bpf_get_stack_proto_tp;
1393         case BPF_FUNC_get_attach_cookie:
1394                 return &bpf_get_attach_cookie_proto_trace;
1395         default:
1396                 return bpf_tracing_func_proto(func_id, prog);
1397         }
1398 }
1399
1400 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1401                                     const struct bpf_prog *prog,
1402                                     struct bpf_insn_access_aux *info)
1403 {
1404         if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1405                 return false;
1406         if (type != BPF_READ)
1407                 return false;
1408         if (off % size != 0)
1409                 return false;
1410
1411         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1412         return true;
1413 }
1414
1415 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1416         .get_func_proto  = tp_prog_func_proto,
1417         .is_valid_access = tp_prog_is_valid_access,
1418 };
1419
1420 const struct bpf_prog_ops tracepoint_prog_ops = {
1421 };
1422
1423 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1424            struct bpf_perf_event_value *, buf, u32, size)
1425 {
1426         int err = -EINVAL;
1427
1428         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1429                 goto clear;
1430         err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1431                                     &buf->running);
1432         if (unlikely(err))
1433                 goto clear;
1434         return 0;
1435 clear:
1436         memset(buf, 0, size);
1437         return err;
1438 }
1439
1440 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1441          .func           = bpf_perf_prog_read_value,
1442          .gpl_only       = true,
1443          .ret_type       = RET_INTEGER,
1444          .arg1_type      = ARG_PTR_TO_CTX,
1445          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1446          .arg3_type      = ARG_CONST_SIZE,
1447 };
1448
1449 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1450            void *, buf, u32, size, u64, flags)
1451 {
1452         static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1453         struct perf_branch_stack *br_stack = ctx->data->br_stack;
1454         u32 to_copy;
1455
1456         if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1457                 return -EINVAL;
1458
1459         if (unlikely(!br_stack))
1460                 return -ENOENT;
1461
1462         if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1463                 return br_stack->nr * br_entry_size;
1464
1465         if (!buf || (size % br_entry_size != 0))
1466                 return -EINVAL;
1467
1468         to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1469         memcpy(buf, br_stack->entries, to_copy);
1470
1471         return to_copy;
1472 }
1473
1474 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1475         .func           = bpf_read_branch_records,
1476         .gpl_only       = true,
1477         .ret_type       = RET_INTEGER,
1478         .arg1_type      = ARG_PTR_TO_CTX,
1479         .arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1480         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1481         .arg4_type      = ARG_ANYTHING,
1482 };
1483
1484 static const struct bpf_func_proto *
1485 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1486 {
1487         switch (func_id) {
1488         case BPF_FUNC_perf_event_output:
1489                 return &bpf_perf_event_output_proto_tp;
1490         case BPF_FUNC_get_stackid:
1491                 return &bpf_get_stackid_proto_pe;
1492         case BPF_FUNC_get_stack:
1493                 return &bpf_get_stack_proto_pe;
1494         case BPF_FUNC_perf_prog_read_value:
1495                 return &bpf_perf_prog_read_value_proto;
1496         case BPF_FUNC_read_branch_records:
1497                 return &bpf_read_branch_records_proto;
1498         case BPF_FUNC_get_attach_cookie:
1499                 return &bpf_get_attach_cookie_proto_pe;
1500         default:
1501                 return bpf_tracing_func_proto(func_id, prog);
1502         }
1503 }
1504
1505 /*
1506  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1507  * to avoid potential recursive reuse issue when/if tracepoints are added
1508  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1509  *
1510  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1511  * in normal, irq, and nmi context.
1512  */
1513 struct bpf_raw_tp_regs {
1514         struct pt_regs regs[3];
1515 };
1516 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1517 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1518 static struct pt_regs *get_bpf_raw_tp_regs(void)
1519 {
1520         struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1521         int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1522
1523         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1524                 this_cpu_dec(bpf_raw_tp_nest_level);
1525                 return ERR_PTR(-EBUSY);
1526         }
1527
1528         return &tp_regs->regs[nest_level - 1];
1529 }
1530
1531 static void put_bpf_raw_tp_regs(void)
1532 {
1533         this_cpu_dec(bpf_raw_tp_nest_level);
1534 }
1535
1536 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1537            struct bpf_map *, map, u64, flags, void *, data, u64, size)
1538 {
1539         struct pt_regs *regs = get_bpf_raw_tp_regs();
1540         int ret;
1541
1542         if (IS_ERR(regs))
1543                 return PTR_ERR(regs);
1544
1545         perf_fetch_caller_regs(regs);
1546         ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1547
1548         put_bpf_raw_tp_regs();
1549         return ret;
1550 }
1551
1552 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1553         .func           = bpf_perf_event_output_raw_tp,
1554         .gpl_only       = true,
1555         .ret_type       = RET_INTEGER,
1556         .arg1_type      = ARG_PTR_TO_CTX,
1557         .arg2_type      = ARG_CONST_MAP_PTR,
1558         .arg3_type      = ARG_ANYTHING,
1559         .arg4_type      = ARG_PTR_TO_MEM,
1560         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1561 };
1562
1563 extern const struct bpf_func_proto bpf_skb_output_proto;
1564 extern const struct bpf_func_proto bpf_xdp_output_proto;
1565
1566 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1567            struct bpf_map *, map, u64, flags)
1568 {
1569         struct pt_regs *regs = get_bpf_raw_tp_regs();
1570         int ret;
1571
1572         if (IS_ERR(regs))
1573                 return PTR_ERR(regs);
1574
1575         perf_fetch_caller_regs(regs);
1576         /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1577         ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1578                               flags, 0, 0);
1579         put_bpf_raw_tp_regs();
1580         return ret;
1581 }
1582
1583 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1584         .func           = bpf_get_stackid_raw_tp,
1585         .gpl_only       = true,
1586         .ret_type       = RET_INTEGER,
1587         .arg1_type      = ARG_PTR_TO_CTX,
1588         .arg2_type      = ARG_CONST_MAP_PTR,
1589         .arg3_type      = ARG_ANYTHING,
1590 };
1591
1592 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1593            void *, buf, u32, size, u64, flags)
1594 {
1595         struct pt_regs *regs = get_bpf_raw_tp_regs();
1596         int ret;
1597
1598         if (IS_ERR(regs))
1599                 return PTR_ERR(regs);
1600
1601         perf_fetch_caller_regs(regs);
1602         ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1603                             (unsigned long) size, flags, 0);
1604         put_bpf_raw_tp_regs();
1605         return ret;
1606 }
1607
1608 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1609         .func           = bpf_get_stack_raw_tp,
1610         .gpl_only       = true,
1611         .ret_type       = RET_INTEGER,
1612         .arg1_type      = ARG_PTR_TO_CTX,
1613         .arg2_type      = ARG_PTR_TO_MEM,
1614         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1615         .arg4_type      = ARG_ANYTHING,
1616 };
1617
1618 static const struct bpf_func_proto *
1619 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1620 {
1621         switch (func_id) {
1622         case BPF_FUNC_perf_event_output:
1623                 return &bpf_perf_event_output_proto_raw_tp;
1624         case BPF_FUNC_get_stackid:
1625                 return &bpf_get_stackid_proto_raw_tp;
1626         case BPF_FUNC_get_stack:
1627                 return &bpf_get_stack_proto_raw_tp;
1628         default:
1629                 return bpf_tracing_func_proto(func_id, prog);
1630         }
1631 }
1632
1633 const struct bpf_func_proto *
1634 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1635 {
1636         const struct bpf_func_proto *fn;
1637
1638         switch (func_id) {
1639 #ifdef CONFIG_NET
1640         case BPF_FUNC_skb_output:
1641                 return &bpf_skb_output_proto;
1642         case BPF_FUNC_xdp_output:
1643                 return &bpf_xdp_output_proto;
1644         case BPF_FUNC_skc_to_tcp6_sock:
1645                 return &bpf_skc_to_tcp6_sock_proto;
1646         case BPF_FUNC_skc_to_tcp_sock:
1647                 return &bpf_skc_to_tcp_sock_proto;
1648         case BPF_FUNC_skc_to_tcp_timewait_sock:
1649                 return &bpf_skc_to_tcp_timewait_sock_proto;
1650         case BPF_FUNC_skc_to_tcp_request_sock:
1651                 return &bpf_skc_to_tcp_request_sock_proto;
1652         case BPF_FUNC_skc_to_udp6_sock:
1653                 return &bpf_skc_to_udp6_sock_proto;
1654         case BPF_FUNC_skc_to_unix_sock:
1655                 return &bpf_skc_to_unix_sock_proto;
1656         case BPF_FUNC_sk_storage_get:
1657                 return &bpf_sk_storage_get_tracing_proto;
1658         case BPF_FUNC_sk_storage_delete:
1659                 return &bpf_sk_storage_delete_tracing_proto;
1660         case BPF_FUNC_sock_from_file:
1661                 return &bpf_sock_from_file_proto;
1662         case BPF_FUNC_get_socket_cookie:
1663                 return &bpf_get_socket_ptr_cookie_proto;
1664 #endif
1665         case BPF_FUNC_seq_printf:
1666                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1667                        &bpf_seq_printf_proto :
1668                        NULL;
1669         case BPF_FUNC_seq_write:
1670                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1671                        &bpf_seq_write_proto :
1672                        NULL;
1673         case BPF_FUNC_seq_printf_btf:
1674                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1675                        &bpf_seq_printf_btf_proto :
1676                        NULL;
1677         case BPF_FUNC_d_path:
1678                 return &bpf_d_path_proto;
1679         case BPF_FUNC_get_func_arg:
1680                 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1681         case BPF_FUNC_get_func_ret:
1682                 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1683         case BPF_FUNC_get_func_arg_cnt:
1684                 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1685         default:
1686                 fn = raw_tp_prog_func_proto(func_id, prog);
1687                 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1688                         fn = bpf_iter_get_func_proto(func_id, prog);
1689                 return fn;
1690         }
1691 }
1692
1693 static bool raw_tp_prog_is_valid_access(int off, int size,
1694                                         enum bpf_access_type type,
1695                                         const struct bpf_prog *prog,
1696                                         struct bpf_insn_access_aux *info)
1697 {
1698         return bpf_tracing_ctx_access(off, size, type);
1699 }
1700
1701 static bool tracing_prog_is_valid_access(int off, int size,
1702                                          enum bpf_access_type type,
1703                                          const struct bpf_prog *prog,
1704                                          struct bpf_insn_access_aux *info)
1705 {
1706         return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1707 }
1708
1709 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1710                                      const union bpf_attr *kattr,
1711                                      union bpf_attr __user *uattr)
1712 {
1713         return -ENOTSUPP;
1714 }
1715
1716 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1717         .get_func_proto  = raw_tp_prog_func_proto,
1718         .is_valid_access = raw_tp_prog_is_valid_access,
1719 };
1720
1721 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1722 #ifdef CONFIG_NET
1723         .test_run = bpf_prog_test_run_raw_tp,
1724 #endif
1725 };
1726
1727 const struct bpf_verifier_ops tracing_verifier_ops = {
1728         .get_func_proto  = tracing_prog_func_proto,
1729         .is_valid_access = tracing_prog_is_valid_access,
1730 };
1731
1732 const struct bpf_prog_ops tracing_prog_ops = {
1733         .test_run = bpf_prog_test_run_tracing,
1734 };
1735
1736 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1737                                                  enum bpf_access_type type,
1738                                                  const struct bpf_prog *prog,
1739                                                  struct bpf_insn_access_aux *info)
1740 {
1741         if (off == 0) {
1742                 if (size != sizeof(u64) || type != BPF_READ)
1743                         return false;
1744                 info->reg_type = PTR_TO_TP_BUFFER;
1745         }
1746         return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1747 }
1748
1749 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1750         .get_func_proto  = raw_tp_prog_func_proto,
1751         .is_valid_access = raw_tp_writable_prog_is_valid_access,
1752 };
1753
1754 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1755 };
1756
1757 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1758                                     const struct bpf_prog *prog,
1759                                     struct bpf_insn_access_aux *info)
1760 {
1761         const int size_u64 = sizeof(u64);
1762
1763         if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1764                 return false;
1765         if (type != BPF_READ)
1766                 return false;
1767         if (off % size != 0) {
1768                 if (sizeof(unsigned long) != 4)
1769                         return false;
1770                 if (size != 8)
1771                         return false;
1772                 if (off % size != 4)
1773                         return false;
1774         }
1775
1776         switch (off) {
1777         case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1778                 bpf_ctx_record_field_size(info, size_u64);
1779                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1780                         return false;
1781                 break;
1782         case bpf_ctx_range(struct bpf_perf_event_data, addr):
1783                 bpf_ctx_record_field_size(info, size_u64);
1784                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1785                         return false;
1786                 break;
1787         default:
1788                 if (size != sizeof(long))
1789                         return false;
1790         }
1791
1792         return true;
1793 }
1794
1795 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1796                                       const struct bpf_insn *si,
1797                                       struct bpf_insn *insn_buf,
1798                                       struct bpf_prog *prog, u32 *target_size)
1799 {
1800         struct bpf_insn *insn = insn_buf;
1801
1802         switch (si->off) {
1803         case offsetof(struct bpf_perf_event_data, sample_period):
1804                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1805                                                        data), si->dst_reg, si->src_reg,
1806                                       offsetof(struct bpf_perf_event_data_kern, data));
1807                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1808                                       bpf_target_off(struct perf_sample_data, period, 8,
1809                                                      target_size));
1810                 break;
1811         case offsetof(struct bpf_perf_event_data, addr):
1812                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1813                                                        data), si->dst_reg, si->src_reg,
1814                                       offsetof(struct bpf_perf_event_data_kern, data));
1815                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1816                                       bpf_target_off(struct perf_sample_data, addr, 8,
1817                                                      target_size));
1818                 break;
1819         default:
1820                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1821                                                        regs), si->dst_reg, si->src_reg,
1822                                       offsetof(struct bpf_perf_event_data_kern, regs));
1823                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1824                                       si->off);
1825                 break;
1826         }
1827
1828         return insn - insn_buf;
1829 }
1830
1831 const struct bpf_verifier_ops perf_event_verifier_ops = {
1832         .get_func_proto         = pe_prog_func_proto,
1833         .is_valid_access        = pe_prog_is_valid_access,
1834         .convert_ctx_access     = pe_prog_convert_ctx_access,
1835 };
1836
1837 const struct bpf_prog_ops perf_event_prog_ops = {
1838 };
1839
1840 static DEFINE_MUTEX(bpf_event_mutex);
1841
1842 #define BPF_TRACE_MAX_PROGS 64
1843
1844 int perf_event_attach_bpf_prog(struct perf_event *event,
1845                                struct bpf_prog *prog,
1846                                u64 bpf_cookie)
1847 {
1848         struct bpf_prog_array *old_array;
1849         struct bpf_prog_array *new_array;
1850         int ret = -EEXIST;
1851
1852         /*
1853          * Kprobe override only works if they are on the function entry,
1854          * and only if they are on the opt-in list.
1855          */
1856         if (prog->kprobe_override &&
1857             (!trace_kprobe_on_func_entry(event->tp_event) ||
1858              !trace_kprobe_error_injectable(event->tp_event)))
1859                 return -EINVAL;
1860
1861         mutex_lock(&bpf_event_mutex);
1862
1863         if (event->prog)
1864                 goto unlock;
1865
1866         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1867         if (old_array &&
1868             bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1869                 ret = -E2BIG;
1870                 goto unlock;
1871         }
1872
1873         ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
1874         if (ret < 0)
1875                 goto unlock;
1876
1877         /* set the new array to event->tp_event and set event->prog */
1878         event->prog = prog;
1879         event->bpf_cookie = bpf_cookie;
1880         rcu_assign_pointer(event->tp_event->prog_array, new_array);
1881         bpf_prog_array_free(old_array);
1882
1883 unlock:
1884         mutex_unlock(&bpf_event_mutex);
1885         return ret;
1886 }
1887
1888 void perf_event_detach_bpf_prog(struct perf_event *event)
1889 {
1890         struct bpf_prog_array *old_array;
1891         struct bpf_prog_array *new_array;
1892         int ret;
1893
1894         mutex_lock(&bpf_event_mutex);
1895
1896         if (!event->prog)
1897                 goto unlock;
1898
1899         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1900         ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
1901         if (ret == -ENOENT)
1902                 goto unlock;
1903         if (ret < 0) {
1904                 bpf_prog_array_delete_safe(old_array, event->prog);
1905         } else {
1906                 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1907                 bpf_prog_array_free(old_array);
1908         }
1909
1910         bpf_prog_put(event->prog);
1911         event->prog = NULL;
1912
1913 unlock:
1914         mutex_unlock(&bpf_event_mutex);
1915 }
1916
1917 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1918 {
1919         struct perf_event_query_bpf __user *uquery = info;
1920         struct perf_event_query_bpf query = {};
1921         struct bpf_prog_array *progs;
1922         u32 *ids, prog_cnt, ids_len;
1923         int ret;
1924
1925         if (!perfmon_capable())
1926                 return -EPERM;
1927         if (event->attr.type != PERF_TYPE_TRACEPOINT)
1928                 return -EINVAL;
1929         if (copy_from_user(&query, uquery, sizeof(query)))
1930                 return -EFAULT;
1931
1932         ids_len = query.ids_len;
1933         if (ids_len > BPF_TRACE_MAX_PROGS)
1934                 return -E2BIG;
1935         ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1936         if (!ids)
1937                 return -ENOMEM;
1938         /*
1939          * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1940          * is required when user only wants to check for uquery->prog_cnt.
1941          * There is no need to check for it since the case is handled
1942          * gracefully in bpf_prog_array_copy_info.
1943          */
1944
1945         mutex_lock(&bpf_event_mutex);
1946         progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1947         ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1948         mutex_unlock(&bpf_event_mutex);
1949
1950         if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1951             copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1952                 ret = -EFAULT;
1953
1954         kfree(ids);
1955         return ret;
1956 }
1957
1958 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1959 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1960
1961 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1962 {
1963         struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1964
1965         for (; btp < __stop__bpf_raw_tp; btp++) {
1966                 if (!strcmp(btp->tp->name, name))
1967                         return btp;
1968         }
1969
1970         return bpf_get_raw_tracepoint_module(name);
1971 }
1972
1973 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1974 {
1975         struct module *mod;
1976
1977         preempt_disable();
1978         mod = __module_address((unsigned long)btp);
1979         module_put(mod);
1980         preempt_enable();
1981 }
1982
1983 static __always_inline
1984 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1985 {
1986         cant_sleep();
1987         rcu_read_lock();
1988         (void) bpf_prog_run(prog, args);
1989         rcu_read_unlock();
1990 }
1991
1992 #define UNPACK(...)                     __VA_ARGS__
1993 #define REPEAT_1(FN, DL, X, ...)        FN(X)
1994 #define REPEAT_2(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1995 #define REPEAT_3(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1996 #define REPEAT_4(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1997 #define REPEAT_5(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1998 #define REPEAT_6(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1999 #define REPEAT_7(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2000 #define REPEAT_8(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2001 #define REPEAT_9(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2002 #define REPEAT_10(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2003 #define REPEAT_11(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2004 #define REPEAT_12(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2005 #define REPEAT(X, FN, DL, ...)          REPEAT_##X(FN, DL, __VA_ARGS__)
2006
2007 #define SARG(X)         u64 arg##X
2008 #define COPY(X)         args[X] = arg##X
2009
2010 #define __DL_COM        (,)
2011 #define __DL_SEM        (;)
2012
2013 #define __SEQ_0_11      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2014
2015 #define BPF_TRACE_DEFN_x(x)                                             \
2016         void bpf_trace_run##x(struct bpf_prog *prog,                    \
2017                               REPEAT(x, SARG, __DL_COM, __SEQ_0_11))    \
2018         {                                                               \
2019                 u64 args[x];                                            \
2020                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
2021                 __bpf_trace_run(prog, args);                            \
2022         }                                                               \
2023         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2024 BPF_TRACE_DEFN_x(1);
2025 BPF_TRACE_DEFN_x(2);
2026 BPF_TRACE_DEFN_x(3);
2027 BPF_TRACE_DEFN_x(4);
2028 BPF_TRACE_DEFN_x(5);
2029 BPF_TRACE_DEFN_x(6);
2030 BPF_TRACE_DEFN_x(7);
2031 BPF_TRACE_DEFN_x(8);
2032 BPF_TRACE_DEFN_x(9);
2033 BPF_TRACE_DEFN_x(10);
2034 BPF_TRACE_DEFN_x(11);
2035 BPF_TRACE_DEFN_x(12);
2036
2037 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2038 {
2039         struct tracepoint *tp = btp->tp;
2040
2041         /*
2042          * check that program doesn't access arguments beyond what's
2043          * available in this tracepoint
2044          */
2045         if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2046                 return -EINVAL;
2047
2048         if (prog->aux->max_tp_access > btp->writable_size)
2049                 return -EINVAL;
2050
2051         return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2052                                                    prog);
2053 }
2054
2055 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2056 {
2057         return __bpf_probe_register(btp, prog);
2058 }
2059
2060 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2061 {
2062         return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2063 }
2064
2065 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2066                             u32 *fd_type, const char **buf,
2067                             u64 *probe_offset, u64 *probe_addr)
2068 {
2069         bool is_tracepoint, is_syscall_tp;
2070         struct bpf_prog *prog;
2071         int flags, err = 0;
2072
2073         prog = event->prog;
2074         if (!prog)
2075                 return -ENOENT;
2076
2077         /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2078         if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2079                 return -EOPNOTSUPP;
2080
2081         *prog_id = prog->aux->id;
2082         flags = event->tp_event->flags;
2083         is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2084         is_syscall_tp = is_syscall_trace_event(event->tp_event);
2085
2086         if (is_tracepoint || is_syscall_tp) {
2087                 *buf = is_tracepoint ? event->tp_event->tp->name
2088                                      : event->tp_event->name;
2089                 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2090                 *probe_offset = 0x0;
2091                 *probe_addr = 0x0;
2092         } else {
2093                 /* kprobe/uprobe */
2094                 err = -EOPNOTSUPP;
2095 #ifdef CONFIG_KPROBE_EVENTS
2096                 if (flags & TRACE_EVENT_FL_KPROBE)
2097                         err = bpf_get_kprobe_info(event, fd_type, buf,
2098                                                   probe_offset, probe_addr,
2099                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
2100 #endif
2101 #ifdef CONFIG_UPROBE_EVENTS
2102                 if (flags & TRACE_EVENT_FL_UPROBE)
2103                         err = bpf_get_uprobe_info(event, fd_type, buf,
2104                                                   probe_offset,
2105                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
2106 #endif
2107         }
2108
2109         return err;
2110 }
2111
2112 static int __init send_signal_irq_work_init(void)
2113 {
2114         int cpu;
2115         struct send_signal_irq_work *work;
2116
2117         for_each_possible_cpu(cpu) {
2118                 work = per_cpu_ptr(&send_signal_work, cpu);
2119                 init_irq_work(&work->irq_work, do_bpf_send_signal);
2120         }
2121         return 0;
2122 }
2123
2124 subsys_initcall(send_signal_irq_work_init);
2125
2126 #ifdef CONFIG_MODULES
2127 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2128                             void *module)
2129 {
2130         struct bpf_trace_module *btm, *tmp;
2131         struct module *mod = module;
2132         int ret = 0;
2133
2134         if (mod->num_bpf_raw_events == 0 ||
2135             (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2136                 goto out;
2137
2138         mutex_lock(&bpf_module_mutex);
2139
2140         switch (op) {
2141         case MODULE_STATE_COMING:
2142                 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2143                 if (btm) {
2144                         btm->module = module;
2145                         list_add(&btm->list, &bpf_trace_modules);
2146                 } else {
2147                         ret = -ENOMEM;
2148                 }
2149                 break;
2150         case MODULE_STATE_GOING:
2151                 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2152                         if (btm->module == module) {
2153                                 list_del(&btm->list);
2154                                 kfree(btm);
2155                                 break;
2156                         }
2157                 }
2158                 break;
2159         }
2160
2161         mutex_unlock(&bpf_module_mutex);
2162
2163 out:
2164         return notifier_from_errno(ret);
2165 }
2166
2167 static struct notifier_block bpf_module_nb = {
2168         .notifier_call = bpf_event_notify,
2169 };
2170
2171 static int __init bpf_event_init(void)
2172 {
2173         register_module_notifier(&bpf_module_nb);
2174         return 0;
2175 }
2176
2177 fs_initcall(bpf_event_init);
2178 #endif /* CONFIG_MODULES */