1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
27 #include <net/bpf_sk_storage.h>
29 #include <uapi/linux/bpf.h>
30 #include <uapi/linux/btf.h>
34 #include "trace_probe.h"
37 #define CREATE_TRACE_POINTS
38 #include "bpf_trace.h"
40 #define bpf_event_rcu_dereference(p) \
41 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
44 struct bpf_trace_module {
45 struct module *module;
46 struct list_head list;
49 static LIST_HEAD(bpf_trace_modules);
50 static DEFINE_MUTEX(bpf_module_mutex);
52 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
54 struct bpf_raw_event_map *btp, *ret = NULL;
55 struct bpf_trace_module *btm;
58 mutex_lock(&bpf_module_mutex);
59 list_for_each_entry(btm, &bpf_trace_modules, list) {
60 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
61 btp = &btm->module->bpf_raw_events[i];
62 if (!strcmp(btp->tp->name, name)) {
63 if (try_module_get(btm->module))
70 mutex_unlock(&bpf_module_mutex);
74 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
78 #endif /* CONFIG_MODULES */
80 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
81 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
83 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
84 u64 flags, const struct btf **btf,
86 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
87 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
90 * trace_call_bpf - invoke BPF program
91 * @call: tracepoint event
92 * @ctx: opaque context pointer
94 * kprobe handlers execute BPF programs via this helper.
95 * Can be used from static tracepoints in the future.
97 * Return: BPF programs always return an integer which is interpreted by
99 * 0 - return from kprobe (event is filtered out)
100 * 1 - store kprobe event into ring buffer
101 * Other values are reserved and currently alias to 1
103 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
109 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
111 * since some bpf program is already running on this cpu,
112 * don't call into another bpf program (same or different)
113 * and don't send kprobe event into ring-buffer,
114 * so return zero here
121 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
122 * to all call sites, we did a bpf_prog_array_valid() there to check
123 * whether call->prog_array is empty or not, which is
124 * a heuristic to speed up execution.
126 * If bpf_prog_array_valid() fetched prog_array was
127 * non-NULL, we go into trace_call_bpf() and do the actual
128 * proper rcu_dereference() under RCU lock.
129 * If it turns out that prog_array is NULL then, we bail out.
130 * For the opposite, if the bpf_prog_array_valid() fetched pointer
131 * was NULL, you'll skip the prog_array with the risk of missing
132 * out of events when it was updated in between this and the
133 * rcu_dereference() which is accepted risk.
136 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
141 __this_cpu_dec(bpf_prog_active);
146 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
147 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
149 regs_set_return_value(regs, rc);
150 override_function_with_return(regs);
154 static const struct bpf_func_proto bpf_override_return_proto = {
155 .func = bpf_override_return,
157 .ret_type = RET_INTEGER,
158 .arg1_type = ARG_PTR_TO_CTX,
159 .arg2_type = ARG_ANYTHING,
163 static __always_inline int
164 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
168 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
169 if (unlikely(ret < 0))
170 memset(dst, 0, size);
174 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
175 const void __user *, unsafe_ptr)
177 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
180 const struct bpf_func_proto bpf_probe_read_user_proto = {
181 .func = bpf_probe_read_user,
183 .ret_type = RET_INTEGER,
184 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
185 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
186 .arg3_type = ARG_ANYTHING,
189 static __always_inline int
190 bpf_probe_read_user_str_common(void *dst, u32 size,
191 const void __user *unsafe_ptr)
196 * NB: We rely on strncpy_from_user() not copying junk past the NUL
197 * terminator into `dst`.
199 * strncpy_from_user() does long-sized strides in the fast path. If the
200 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
201 * then there could be junk after the NUL in `dst`. If user takes `dst`
202 * and keys a hash map with it, then semantically identical strings can
203 * occupy multiple entries in the map.
205 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
206 if (unlikely(ret < 0))
207 memset(dst, 0, size);
211 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
212 const void __user *, unsafe_ptr)
214 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
217 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
218 .func = bpf_probe_read_user_str,
220 .ret_type = RET_INTEGER,
221 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
222 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
223 .arg3_type = ARG_ANYTHING,
226 static __always_inline int
227 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
231 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
232 if (unlikely(ret < 0))
233 memset(dst, 0, size);
237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 const void *, unsafe_ptr)
240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
243 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
244 .func = bpf_probe_read_kernel,
246 .ret_type = RET_INTEGER,
247 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
248 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
249 .arg3_type = ARG_ANYTHING,
252 static __always_inline int
253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
267 if (unlikely(ret < 0))
268 memset(dst, 0, size);
272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 const void *, unsafe_ptr)
275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
279 .func = bpf_probe_read_kernel_str,
281 .ret_type = RET_INTEGER,
282 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
283 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
284 .arg3_type = ARG_ANYTHING,
287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 const void *, unsafe_ptr)
291 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 return bpf_probe_read_user_common(dst, size,
293 (__force void __user *)unsafe_ptr);
295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
298 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 .func = bpf_probe_read_compat,
301 .ret_type = RET_INTEGER,
302 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
303 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
304 .arg3_type = ARG_ANYTHING,
307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 const void *, unsafe_ptr)
310 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 return bpf_probe_read_user_str_common(dst, size,
312 (__force void __user *)unsafe_ptr);
314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 .func = bpf_probe_read_compat_str,
320 .ret_type = RET_INTEGER,
321 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
322 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
323 .arg3_type = ARG_ANYTHING,
325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
331 * Ensure we're in user context which is safe for the helper to
332 * run. This helper has no business in a kthread.
334 * access_ok() should prevent writing to non-user memory, but in
335 * some situations (nommu, temporary switch, etc) access_ok() does
336 * not provide enough validation, hence the check on KERNEL_DS.
338 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 * state, when the task or mm are switched. This is specifically
340 * required to prevent the use of temporary mm.
343 if (unlikely(in_interrupt() ||
344 current->flags & (PF_KTHREAD | PF_EXITING)))
346 if (unlikely(!nmi_uaccess_okay()))
349 return copy_to_user_nofault(unsafe_ptr, src, size);
352 static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 .func = bpf_probe_write_user,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_ANYTHING,
357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
358 .arg3_type = ARG_CONST_SIZE,
361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
363 if (!capable(CAP_SYS_ADMIN))
366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 current->comm, task_pid_nr(current));
369 return &bpf_probe_write_user_proto;
372 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
374 #define MAX_TRACE_PRINTK_VARARGS 3
375 #define BPF_TRACE_PRINTK_SIZE 1024
377 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
378 u64, arg2, u64, arg3)
380 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
382 static char buf[BPF_TRACE_PRINTK_SIZE];
386 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
387 MAX_TRACE_PRINTK_VARARGS);
391 raw_spin_lock_irqsave(&trace_printk_lock, flags);
392 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
394 trace_bpf_trace_printk(buf);
395 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
397 bpf_bprintf_cleanup();
402 static const struct bpf_func_proto bpf_trace_printk_proto = {
403 .func = bpf_trace_printk,
405 .ret_type = RET_INTEGER,
406 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
407 .arg2_type = ARG_CONST_SIZE,
410 static void __set_printk_clr_event(void)
413 * This program might be calling bpf_trace_printk,
414 * so enable the associated bpf_trace/bpf_trace_printk event.
415 * Repeat this each time as it is possible a user has
416 * disabled bpf_trace_printk events. By loading a program
417 * calling bpf_trace_printk() however the user has expressed
418 * the intent to see such events.
420 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
421 pr_warn_ratelimited("could not enable bpf_trace_printk events");
424 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
426 __set_printk_clr_event();
427 return &bpf_trace_printk_proto;
430 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
433 static char buf[BPF_TRACE_PRINTK_SIZE];
438 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
441 num_args = data_len / 8;
443 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
447 raw_spin_lock_irqsave(&trace_printk_lock, flags);
448 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
450 trace_bpf_trace_printk(buf);
451 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
453 bpf_bprintf_cleanup();
458 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
459 .func = bpf_trace_vprintk,
461 .ret_type = RET_INTEGER,
462 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
463 .arg2_type = ARG_CONST_SIZE,
464 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
465 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
468 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
470 __set_printk_clr_event();
471 return &bpf_trace_vprintk_proto;
474 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
475 const void *, data, u32, data_len)
480 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
483 num_args = data_len / 8;
485 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
489 seq_bprintf(m, fmt, bin_args);
491 bpf_bprintf_cleanup();
493 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
496 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
498 static const struct bpf_func_proto bpf_seq_printf_proto = {
499 .func = bpf_seq_printf,
501 .ret_type = RET_INTEGER,
502 .arg1_type = ARG_PTR_TO_BTF_ID,
503 .arg1_btf_id = &btf_seq_file_ids[0],
504 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
505 .arg3_type = ARG_CONST_SIZE,
506 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
507 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
510 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
512 return seq_write(m, data, len) ? -EOVERFLOW : 0;
515 static const struct bpf_func_proto bpf_seq_write_proto = {
516 .func = bpf_seq_write,
518 .ret_type = RET_INTEGER,
519 .arg1_type = ARG_PTR_TO_BTF_ID,
520 .arg1_btf_id = &btf_seq_file_ids[0],
521 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
522 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
525 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
526 u32, btf_ptr_size, u64, flags)
528 const struct btf *btf;
532 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
536 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
539 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
540 .func = bpf_seq_printf_btf,
542 .ret_type = RET_INTEGER,
543 .arg1_type = ARG_PTR_TO_BTF_ID,
544 .arg1_btf_id = &btf_seq_file_ids[0],
545 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
546 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
547 .arg4_type = ARG_ANYTHING,
550 static __always_inline int
551 get_map_perf_counter(struct bpf_map *map, u64 flags,
552 u64 *value, u64 *enabled, u64 *running)
554 struct bpf_array *array = container_of(map, struct bpf_array, map);
555 unsigned int cpu = smp_processor_id();
556 u64 index = flags & BPF_F_INDEX_MASK;
557 struct bpf_event_entry *ee;
559 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
561 if (index == BPF_F_CURRENT_CPU)
563 if (unlikely(index >= array->map.max_entries))
566 ee = READ_ONCE(array->ptrs[index]);
570 return perf_event_read_local(ee->event, value, enabled, running);
573 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
578 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
580 * this api is ugly since we miss [-22..-2] range of valid
581 * counter values, but that's uapi
588 static const struct bpf_func_proto bpf_perf_event_read_proto = {
589 .func = bpf_perf_event_read,
591 .ret_type = RET_INTEGER,
592 .arg1_type = ARG_CONST_MAP_PTR,
593 .arg2_type = ARG_ANYTHING,
596 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
597 struct bpf_perf_event_value *, buf, u32, size)
601 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
603 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
609 memset(buf, 0, size);
613 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
614 .func = bpf_perf_event_read_value,
616 .ret_type = RET_INTEGER,
617 .arg1_type = ARG_CONST_MAP_PTR,
618 .arg2_type = ARG_ANYTHING,
619 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
620 .arg4_type = ARG_CONST_SIZE,
623 static __always_inline u64
624 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
625 u64 flags, struct perf_sample_data *sd)
627 struct bpf_array *array = container_of(map, struct bpf_array, map);
628 unsigned int cpu = smp_processor_id();
629 u64 index = flags & BPF_F_INDEX_MASK;
630 struct bpf_event_entry *ee;
631 struct perf_event *event;
633 if (index == BPF_F_CURRENT_CPU)
635 if (unlikely(index >= array->map.max_entries))
638 ee = READ_ONCE(array->ptrs[index]);
643 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
644 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
647 if (unlikely(event->oncpu != cpu))
650 return perf_event_output(event, sd, regs);
654 * Support executing tracepoints in normal, irq, and nmi context that each call
655 * bpf_perf_event_output
657 struct bpf_trace_sample_data {
658 struct perf_sample_data sds[3];
661 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
662 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
663 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
664 u64, flags, void *, data, u64, size)
666 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
667 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
668 struct perf_raw_record raw = {
674 struct perf_sample_data *sd;
677 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
682 sd = &sds->sds[nest_level - 1];
684 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
689 perf_sample_data_init(sd, 0, 0);
691 sd->sample_flags |= PERF_SAMPLE_RAW;
693 err = __bpf_perf_event_output(regs, map, flags, sd);
696 this_cpu_dec(bpf_trace_nest_level);
700 static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 .func = bpf_perf_event_output,
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_CTX,
705 .arg2_type = ARG_CONST_MAP_PTR,
706 .arg3_type = ARG_ANYTHING,
707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
708 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
711 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712 struct bpf_nested_pt_regs {
713 struct pt_regs regs[3];
715 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
718 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
721 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
722 struct perf_raw_frag frag = {
727 struct perf_raw_record raw = {
730 .next = ctx_size ? &frag : NULL,
736 struct perf_sample_data *sd;
737 struct pt_regs *regs;
740 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
744 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
745 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
747 perf_fetch_caller_regs(regs);
748 perf_sample_data_init(sd, 0, 0);
750 sd->sample_flags |= PERF_SAMPLE_RAW;
752 ret = __bpf_perf_event_output(regs, map, flags, sd);
754 this_cpu_dec(bpf_event_output_nest_level);
758 BPF_CALL_0(bpf_get_current_task)
760 return (long) current;
763 const struct bpf_func_proto bpf_get_current_task_proto = {
764 .func = bpf_get_current_task,
766 .ret_type = RET_INTEGER,
769 BPF_CALL_0(bpf_get_current_task_btf)
771 return (unsigned long) current;
774 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
775 .func = bpf_get_current_task_btf,
777 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
778 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
781 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
783 return (unsigned long) task_pt_regs(task);
786 BTF_ID_LIST(bpf_task_pt_regs_ids)
787 BTF_ID(struct, pt_regs)
789 const struct bpf_func_proto bpf_task_pt_regs_proto = {
790 .func = bpf_task_pt_regs,
792 .arg1_type = ARG_PTR_TO_BTF_ID,
793 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
794 .ret_type = RET_PTR_TO_BTF_ID,
795 .ret_btf_id = &bpf_task_pt_regs_ids[0],
798 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
800 struct bpf_array *array = container_of(map, struct bpf_array, map);
803 if (unlikely(idx >= array->map.max_entries))
806 cgrp = READ_ONCE(array->ptrs[idx]);
810 return task_under_cgroup_hierarchy(current, cgrp);
813 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
814 .func = bpf_current_task_under_cgroup,
816 .ret_type = RET_INTEGER,
817 .arg1_type = ARG_CONST_MAP_PTR,
818 .arg2_type = ARG_ANYTHING,
821 struct send_signal_irq_work {
822 struct irq_work irq_work;
823 struct task_struct *task;
828 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
830 static void do_bpf_send_signal(struct irq_work *entry)
832 struct send_signal_irq_work *work;
834 work = container_of(entry, struct send_signal_irq_work, irq_work);
835 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
836 put_task_struct(work->task);
839 static int bpf_send_signal_common(u32 sig, enum pid_type type)
841 struct send_signal_irq_work *work = NULL;
843 /* Similar to bpf_probe_write_user, task needs to be
844 * in a sound condition and kernel memory access be
845 * permitted in order to send signal to the current
848 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
850 if (unlikely(!nmi_uaccess_okay()))
852 /* Task should not be pid=1 to avoid kernel panic. */
853 if (unlikely(is_global_init(current)))
856 if (irqs_disabled()) {
857 /* Do an early check on signal validity. Otherwise,
858 * the error is lost in deferred irq_work.
860 if (unlikely(!valid_signal(sig)))
863 work = this_cpu_ptr(&send_signal_work);
864 if (irq_work_is_busy(&work->irq_work))
867 /* Add the current task, which is the target of sending signal,
868 * to the irq_work. The current task may change when queued
869 * irq works get executed.
871 work->task = get_task_struct(current);
874 irq_work_queue(&work->irq_work);
878 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
881 BPF_CALL_1(bpf_send_signal, u32, sig)
883 return bpf_send_signal_common(sig, PIDTYPE_TGID);
886 static const struct bpf_func_proto bpf_send_signal_proto = {
887 .func = bpf_send_signal,
889 .ret_type = RET_INTEGER,
890 .arg1_type = ARG_ANYTHING,
893 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
895 return bpf_send_signal_common(sig, PIDTYPE_PID);
898 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
899 .func = bpf_send_signal_thread,
901 .ret_type = RET_INTEGER,
902 .arg1_type = ARG_ANYTHING,
905 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
913 p = d_path(path, buf, sz);
918 memmove(buf, p, len);
924 BTF_SET_START(btf_allowlist_d_path)
925 #ifdef CONFIG_SECURITY
926 BTF_ID(func, security_file_permission)
927 BTF_ID(func, security_inode_getattr)
928 BTF_ID(func, security_file_open)
930 #ifdef CONFIG_SECURITY_PATH
931 BTF_ID(func, security_path_truncate)
933 BTF_ID(func, vfs_truncate)
934 BTF_ID(func, vfs_fallocate)
935 BTF_ID(func, dentry_open)
936 BTF_ID(func, vfs_getattr)
937 BTF_ID(func, filp_close)
938 BTF_SET_END(btf_allowlist_d_path)
940 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
942 if (prog->type == BPF_PROG_TYPE_TRACING &&
943 prog->expected_attach_type == BPF_TRACE_ITER)
946 if (prog->type == BPF_PROG_TYPE_LSM)
947 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
949 return btf_id_set_contains(&btf_allowlist_d_path,
950 prog->aux->attach_btf_id);
953 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
955 static const struct bpf_func_proto bpf_d_path_proto = {
958 .ret_type = RET_INTEGER,
959 .arg1_type = ARG_PTR_TO_BTF_ID,
960 .arg1_btf_id = &bpf_d_path_btf_ids[0],
961 .arg2_type = ARG_PTR_TO_MEM,
962 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
963 .allowed = bpf_d_path_allowed,
966 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
967 BTF_F_PTR_RAW | BTF_F_ZERO)
969 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
970 u64 flags, const struct btf **btf,
973 const struct btf_type *t;
975 if (unlikely(flags & ~(BTF_F_ALL)))
978 if (btf_ptr_size != sizeof(struct btf_ptr))
981 *btf = bpf_get_btf_vmlinux();
983 if (IS_ERR_OR_NULL(*btf))
984 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
986 if (ptr->type_id > 0)
987 *btf_id = ptr->type_id;
992 t = btf_type_by_id(*btf, *btf_id);
993 if (*btf_id <= 0 || !t)
999 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1000 u32, btf_ptr_size, u64, flags)
1002 const struct btf *btf;
1006 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1010 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1014 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1015 .func = bpf_snprintf_btf,
1017 .ret_type = RET_INTEGER,
1018 .arg1_type = ARG_PTR_TO_MEM,
1019 .arg2_type = ARG_CONST_SIZE,
1020 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1021 .arg4_type = ARG_CONST_SIZE,
1022 .arg5_type = ARG_ANYTHING,
1025 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1027 /* This helper call is inlined by verifier. */
1028 return ((u64 *)ctx)[-2];
1031 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1032 .func = bpf_get_func_ip_tracing,
1034 .ret_type = RET_INTEGER,
1035 .arg1_type = ARG_PTR_TO_CTX,
1038 #ifdef CONFIG_X86_KERNEL_IBT
1039 static unsigned long get_entry_ip(unsigned long fentry_ip)
1043 /* Being extra safe in here in case entry ip is on the page-edge. */
1044 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1046 if (is_endbr(instr))
1047 fentry_ip -= ENDBR_INSN_SIZE;
1051 #define get_entry_ip(fentry_ip) fentry_ip
1054 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1056 struct kprobe *kp = kprobe_running();
1058 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1061 return get_entry_ip((uintptr_t)kp->addr);
1064 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1065 .func = bpf_get_func_ip_kprobe,
1067 .ret_type = RET_INTEGER,
1068 .arg1_type = ARG_PTR_TO_CTX,
1071 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1073 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1076 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1077 .func = bpf_get_func_ip_kprobe_multi,
1079 .ret_type = RET_INTEGER,
1080 .arg1_type = ARG_PTR_TO_CTX,
1083 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1085 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1088 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1089 .func = bpf_get_attach_cookie_kprobe_multi,
1091 .ret_type = RET_INTEGER,
1092 .arg1_type = ARG_PTR_TO_CTX,
1095 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1097 struct bpf_trace_run_ctx *run_ctx;
1099 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1100 return run_ctx->bpf_cookie;
1103 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1104 .func = bpf_get_attach_cookie_trace,
1106 .ret_type = RET_INTEGER,
1107 .arg1_type = ARG_PTR_TO_CTX,
1110 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1112 return ctx->event->bpf_cookie;
1115 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1116 .func = bpf_get_attach_cookie_pe,
1118 .ret_type = RET_INTEGER,
1119 .arg1_type = ARG_PTR_TO_CTX,
1122 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1124 struct bpf_trace_run_ctx *run_ctx;
1126 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1127 return run_ctx->bpf_cookie;
1130 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1131 .func = bpf_get_attach_cookie_tracing,
1133 .ret_type = RET_INTEGER,
1134 .arg1_type = ARG_PTR_TO_CTX,
1137 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1142 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1143 u32 entry_cnt = size / br_entry_size;
1145 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1147 if (unlikely(flags))
1153 return entry_cnt * br_entry_size;
1157 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1158 .func = bpf_get_branch_snapshot,
1160 .ret_type = RET_INTEGER,
1161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1165 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1167 /* This helper call is inlined by verifier. */
1168 u64 nr_args = ((u64 *)ctx)[-1];
1170 if ((u64) n >= nr_args)
1172 *value = ((u64 *)ctx)[n];
1176 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1177 .func = get_func_arg,
1178 .ret_type = RET_INTEGER,
1179 .arg1_type = ARG_PTR_TO_CTX,
1180 .arg2_type = ARG_ANYTHING,
1181 .arg3_type = ARG_PTR_TO_LONG,
1184 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1186 /* This helper call is inlined by verifier. */
1187 u64 nr_args = ((u64 *)ctx)[-1];
1189 *value = ((u64 *)ctx)[nr_args];
1193 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1194 .func = get_func_ret,
1195 .ret_type = RET_INTEGER,
1196 .arg1_type = ARG_PTR_TO_CTX,
1197 .arg2_type = ARG_PTR_TO_LONG,
1200 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1202 /* This helper call is inlined by verifier. */
1203 return ((u64 *)ctx)[-1];
1206 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1207 .func = get_func_arg_cnt,
1208 .ret_type = RET_INTEGER,
1209 .arg1_type = ARG_PTR_TO_CTX,
1214 __diag_ignore_all("-Wmissing-prototypes",
1215 "kfuncs which will be used in BPF programs");
1218 * bpf_lookup_user_key - lookup a key by its serial
1219 * @serial: key handle serial number
1220 * @flags: lookup-specific flags
1222 * Search a key with a given *serial* and the provided *flags*.
1223 * If found, increment the reference count of the key by one, and
1224 * return it in the bpf_key structure.
1226 * The bpf_key structure must be passed to bpf_key_put() when done
1227 * with it, so that the key reference count is decremented and the
1228 * bpf_key structure is freed.
1230 * Permission checks are deferred to the time the key is used by
1231 * one of the available key-specific kfuncs.
1233 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1234 * special keyring (e.g. session keyring), if it doesn't yet exist.
1235 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1236 * for the key construction, and to retrieve uninstantiated keys (keys
1237 * without data attached to them).
1239 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1240 * NULL pointer otherwise.
1242 struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1245 struct bpf_key *bkey;
1247 if (flags & ~KEY_LOOKUP_ALL)
1251 * Permission check is deferred until the key is used, as the
1252 * intent of the caller is unknown here.
1254 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1255 if (IS_ERR(key_ref))
1258 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1260 key_put(key_ref_to_ptr(key_ref));
1264 bkey->key = key_ref_to_ptr(key_ref);
1265 bkey->has_ref = true;
1271 * bpf_lookup_system_key - lookup a key by a system-defined ID
1274 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1275 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1276 * attempting to decrement the key reference count on that pointer. The key
1277 * pointer set in such way is currently understood only by
1278 * verify_pkcs7_signature().
1280 * Set *id* to one of the values defined in include/linux/verification.h:
1281 * 0 for the primary keyring (immutable keyring of system keys);
1282 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1283 * (where keys can be added only if they are vouched for by existing keys
1284 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1285 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1286 * kerned image and, possibly, the initramfs signature).
1288 * Return: a bpf_key pointer with an invalid key pointer set from the
1289 * pre-determined ID on success, a NULL pointer otherwise
1291 struct bpf_key *bpf_lookup_system_key(u64 id)
1293 struct bpf_key *bkey;
1295 if (system_keyring_id_check(id) < 0)
1298 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1302 bkey->key = (struct key *)(unsigned long)id;
1303 bkey->has_ref = false;
1309 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1310 * @bkey: bpf_key structure
1312 * Decrement the reference count of the key inside *bkey*, if the pointer
1313 * is valid, and free *bkey*.
1315 void bpf_key_put(struct bpf_key *bkey)
1323 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1325 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1326 * @data_ptr: data to verify
1327 * @sig_ptr: signature of the data
1328 * @trusted_keyring: keyring with keys trusted for signature verification
1330 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1331 * with keys in a keyring referenced by *trusted_keyring*.
1333 * Return: 0 on success, a negative value on error.
1335 int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1336 struct bpf_dynptr_kern *sig_ptr,
1337 struct bpf_key *trusted_keyring)
1341 if (trusted_keyring->has_ref) {
1343 * Do the permission check deferred in bpf_lookup_user_key().
1344 * See bpf_lookup_user_key() for more details.
1346 * A call to key_task_permission() here would be redundant, as
1347 * it is already done by keyring_search() called by
1348 * find_asymmetric_key().
1350 ret = key_validate(trusted_keyring->key);
1355 return verify_pkcs7_signature(data_ptr->data,
1356 bpf_dynptr_get_size(data_ptr),
1358 bpf_dynptr_get_size(sig_ptr),
1359 trusted_keyring->key,
1360 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1363 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1367 BTF_SET8_START(key_sig_kfunc_set)
1368 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1369 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1370 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1371 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1372 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1374 BTF_SET8_END(key_sig_kfunc_set)
1376 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1377 .owner = THIS_MODULE,
1378 .set = &key_sig_kfunc_set,
1381 static int __init bpf_key_sig_kfuncs_init(void)
1383 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1384 &bpf_key_sig_kfunc_set);
1387 late_initcall(bpf_key_sig_kfuncs_init);
1388 #endif /* CONFIG_KEYS */
1390 static const struct bpf_func_proto *
1391 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1394 case BPF_FUNC_map_lookup_elem:
1395 return &bpf_map_lookup_elem_proto;
1396 case BPF_FUNC_map_update_elem:
1397 return &bpf_map_update_elem_proto;
1398 case BPF_FUNC_map_delete_elem:
1399 return &bpf_map_delete_elem_proto;
1400 case BPF_FUNC_map_push_elem:
1401 return &bpf_map_push_elem_proto;
1402 case BPF_FUNC_map_pop_elem:
1403 return &bpf_map_pop_elem_proto;
1404 case BPF_FUNC_map_peek_elem:
1405 return &bpf_map_peek_elem_proto;
1406 case BPF_FUNC_map_lookup_percpu_elem:
1407 return &bpf_map_lookup_percpu_elem_proto;
1408 case BPF_FUNC_ktime_get_ns:
1409 return &bpf_ktime_get_ns_proto;
1410 case BPF_FUNC_ktime_get_boot_ns:
1411 return &bpf_ktime_get_boot_ns_proto;
1412 case BPF_FUNC_tail_call:
1413 return &bpf_tail_call_proto;
1414 case BPF_FUNC_get_current_pid_tgid:
1415 return &bpf_get_current_pid_tgid_proto;
1416 case BPF_FUNC_get_current_task:
1417 return &bpf_get_current_task_proto;
1418 case BPF_FUNC_get_current_task_btf:
1419 return &bpf_get_current_task_btf_proto;
1420 case BPF_FUNC_task_pt_regs:
1421 return &bpf_task_pt_regs_proto;
1422 case BPF_FUNC_get_current_uid_gid:
1423 return &bpf_get_current_uid_gid_proto;
1424 case BPF_FUNC_get_current_comm:
1425 return &bpf_get_current_comm_proto;
1426 case BPF_FUNC_trace_printk:
1427 return bpf_get_trace_printk_proto();
1428 case BPF_FUNC_get_smp_processor_id:
1429 return &bpf_get_smp_processor_id_proto;
1430 case BPF_FUNC_get_numa_node_id:
1431 return &bpf_get_numa_node_id_proto;
1432 case BPF_FUNC_perf_event_read:
1433 return &bpf_perf_event_read_proto;
1434 case BPF_FUNC_current_task_under_cgroup:
1435 return &bpf_current_task_under_cgroup_proto;
1436 case BPF_FUNC_get_prandom_u32:
1437 return &bpf_get_prandom_u32_proto;
1438 case BPF_FUNC_probe_write_user:
1439 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1440 NULL : bpf_get_probe_write_proto();
1441 case BPF_FUNC_probe_read_user:
1442 return &bpf_probe_read_user_proto;
1443 case BPF_FUNC_probe_read_kernel:
1444 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1445 NULL : &bpf_probe_read_kernel_proto;
1446 case BPF_FUNC_probe_read_user_str:
1447 return &bpf_probe_read_user_str_proto;
1448 case BPF_FUNC_probe_read_kernel_str:
1449 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1450 NULL : &bpf_probe_read_kernel_str_proto;
1451 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1452 case BPF_FUNC_probe_read:
1453 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1454 NULL : &bpf_probe_read_compat_proto;
1455 case BPF_FUNC_probe_read_str:
1456 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1457 NULL : &bpf_probe_read_compat_str_proto;
1459 #ifdef CONFIG_CGROUPS
1460 case BPF_FUNC_get_current_cgroup_id:
1461 return &bpf_get_current_cgroup_id_proto;
1462 case BPF_FUNC_get_current_ancestor_cgroup_id:
1463 return &bpf_get_current_ancestor_cgroup_id_proto;
1464 case BPF_FUNC_cgrp_storage_get:
1465 return &bpf_cgrp_storage_get_proto;
1466 case BPF_FUNC_cgrp_storage_delete:
1467 return &bpf_cgrp_storage_delete_proto;
1469 case BPF_FUNC_send_signal:
1470 return &bpf_send_signal_proto;
1471 case BPF_FUNC_send_signal_thread:
1472 return &bpf_send_signal_thread_proto;
1473 case BPF_FUNC_perf_event_read_value:
1474 return &bpf_perf_event_read_value_proto;
1475 case BPF_FUNC_get_ns_current_pid_tgid:
1476 return &bpf_get_ns_current_pid_tgid_proto;
1477 case BPF_FUNC_ringbuf_output:
1478 return &bpf_ringbuf_output_proto;
1479 case BPF_FUNC_ringbuf_reserve:
1480 return &bpf_ringbuf_reserve_proto;
1481 case BPF_FUNC_ringbuf_submit:
1482 return &bpf_ringbuf_submit_proto;
1483 case BPF_FUNC_ringbuf_discard:
1484 return &bpf_ringbuf_discard_proto;
1485 case BPF_FUNC_ringbuf_query:
1486 return &bpf_ringbuf_query_proto;
1487 case BPF_FUNC_jiffies64:
1488 return &bpf_jiffies64_proto;
1489 case BPF_FUNC_get_task_stack:
1490 return &bpf_get_task_stack_proto;
1491 case BPF_FUNC_copy_from_user:
1492 return &bpf_copy_from_user_proto;
1493 case BPF_FUNC_copy_from_user_task:
1494 return &bpf_copy_from_user_task_proto;
1495 case BPF_FUNC_snprintf_btf:
1496 return &bpf_snprintf_btf_proto;
1497 case BPF_FUNC_per_cpu_ptr:
1498 return &bpf_per_cpu_ptr_proto;
1499 case BPF_FUNC_this_cpu_ptr:
1500 return &bpf_this_cpu_ptr_proto;
1501 case BPF_FUNC_task_storage_get:
1502 if (bpf_prog_check_recur(prog))
1503 return &bpf_task_storage_get_recur_proto;
1504 return &bpf_task_storage_get_proto;
1505 case BPF_FUNC_task_storage_delete:
1506 if (bpf_prog_check_recur(prog))
1507 return &bpf_task_storage_delete_recur_proto;
1508 return &bpf_task_storage_delete_proto;
1509 case BPF_FUNC_for_each_map_elem:
1510 return &bpf_for_each_map_elem_proto;
1511 case BPF_FUNC_snprintf:
1512 return &bpf_snprintf_proto;
1513 case BPF_FUNC_get_func_ip:
1514 return &bpf_get_func_ip_proto_tracing;
1515 case BPF_FUNC_get_branch_snapshot:
1516 return &bpf_get_branch_snapshot_proto;
1517 case BPF_FUNC_find_vma:
1518 return &bpf_find_vma_proto;
1519 case BPF_FUNC_trace_vprintk:
1520 return bpf_get_trace_vprintk_proto();
1522 return bpf_base_func_proto(func_id);
1526 static const struct bpf_func_proto *
1527 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1530 case BPF_FUNC_perf_event_output:
1531 return &bpf_perf_event_output_proto;
1532 case BPF_FUNC_get_stackid:
1533 return &bpf_get_stackid_proto;
1534 case BPF_FUNC_get_stack:
1535 return &bpf_get_stack_proto;
1536 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1537 case BPF_FUNC_override_return:
1538 return &bpf_override_return_proto;
1540 case BPF_FUNC_get_func_ip:
1541 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1542 &bpf_get_func_ip_proto_kprobe_multi :
1543 &bpf_get_func_ip_proto_kprobe;
1544 case BPF_FUNC_get_attach_cookie:
1545 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1546 &bpf_get_attach_cookie_proto_kmulti :
1547 &bpf_get_attach_cookie_proto_trace;
1549 return bpf_tracing_func_proto(func_id, prog);
1553 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1554 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1555 const struct bpf_prog *prog,
1556 struct bpf_insn_access_aux *info)
1558 if (off < 0 || off >= sizeof(struct pt_regs))
1560 if (type != BPF_READ)
1562 if (off % size != 0)
1565 * Assertion for 32 bit to make sure last 8 byte access
1566 * (BPF_DW) to the last 4 byte member is disallowed.
1568 if (off + size > sizeof(struct pt_regs))
1574 const struct bpf_verifier_ops kprobe_verifier_ops = {
1575 .get_func_proto = kprobe_prog_func_proto,
1576 .is_valid_access = kprobe_prog_is_valid_access,
1579 const struct bpf_prog_ops kprobe_prog_ops = {
1582 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1583 u64, flags, void *, data, u64, size)
1585 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1588 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1589 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1590 * from there and call the same bpf_perf_event_output() helper inline.
1592 return ____bpf_perf_event_output(regs, map, flags, data, size);
1595 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1596 .func = bpf_perf_event_output_tp,
1598 .ret_type = RET_INTEGER,
1599 .arg1_type = ARG_PTR_TO_CTX,
1600 .arg2_type = ARG_CONST_MAP_PTR,
1601 .arg3_type = ARG_ANYTHING,
1602 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1603 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1606 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1609 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1612 * Same comment as in bpf_perf_event_output_tp(), only that this time
1613 * the other helper's function body cannot be inlined due to being
1614 * external, thus we need to call raw helper function.
1616 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1620 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1621 .func = bpf_get_stackid_tp,
1623 .ret_type = RET_INTEGER,
1624 .arg1_type = ARG_PTR_TO_CTX,
1625 .arg2_type = ARG_CONST_MAP_PTR,
1626 .arg3_type = ARG_ANYTHING,
1629 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1632 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1634 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1635 (unsigned long) size, flags, 0);
1638 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1639 .func = bpf_get_stack_tp,
1641 .ret_type = RET_INTEGER,
1642 .arg1_type = ARG_PTR_TO_CTX,
1643 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1644 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1645 .arg4_type = ARG_ANYTHING,
1648 static const struct bpf_func_proto *
1649 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1652 case BPF_FUNC_perf_event_output:
1653 return &bpf_perf_event_output_proto_tp;
1654 case BPF_FUNC_get_stackid:
1655 return &bpf_get_stackid_proto_tp;
1656 case BPF_FUNC_get_stack:
1657 return &bpf_get_stack_proto_tp;
1658 case BPF_FUNC_get_attach_cookie:
1659 return &bpf_get_attach_cookie_proto_trace;
1661 return bpf_tracing_func_proto(func_id, prog);
1665 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1666 const struct bpf_prog *prog,
1667 struct bpf_insn_access_aux *info)
1669 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1671 if (type != BPF_READ)
1673 if (off % size != 0)
1676 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1680 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1681 .get_func_proto = tp_prog_func_proto,
1682 .is_valid_access = tp_prog_is_valid_access,
1685 const struct bpf_prog_ops tracepoint_prog_ops = {
1688 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1689 struct bpf_perf_event_value *, buf, u32, size)
1693 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1695 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1701 memset(buf, 0, size);
1705 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1706 .func = bpf_perf_prog_read_value,
1708 .ret_type = RET_INTEGER,
1709 .arg1_type = ARG_PTR_TO_CTX,
1710 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1711 .arg3_type = ARG_CONST_SIZE,
1714 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1715 void *, buf, u32, size, u64, flags)
1717 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1718 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1721 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1724 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1727 if (unlikely(!br_stack))
1730 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1731 return br_stack->nr * br_entry_size;
1733 if (!buf || (size % br_entry_size != 0))
1736 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1737 memcpy(buf, br_stack->entries, to_copy);
1742 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1743 .func = bpf_read_branch_records,
1745 .ret_type = RET_INTEGER,
1746 .arg1_type = ARG_PTR_TO_CTX,
1747 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1748 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1749 .arg4_type = ARG_ANYTHING,
1752 static const struct bpf_func_proto *
1753 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1756 case BPF_FUNC_perf_event_output:
1757 return &bpf_perf_event_output_proto_tp;
1758 case BPF_FUNC_get_stackid:
1759 return &bpf_get_stackid_proto_pe;
1760 case BPF_FUNC_get_stack:
1761 return &bpf_get_stack_proto_pe;
1762 case BPF_FUNC_perf_prog_read_value:
1763 return &bpf_perf_prog_read_value_proto;
1764 case BPF_FUNC_read_branch_records:
1765 return &bpf_read_branch_records_proto;
1766 case BPF_FUNC_get_attach_cookie:
1767 return &bpf_get_attach_cookie_proto_pe;
1769 return bpf_tracing_func_proto(func_id, prog);
1774 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1775 * to avoid potential recursive reuse issue when/if tracepoints are added
1776 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1778 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1779 * in normal, irq, and nmi context.
1781 struct bpf_raw_tp_regs {
1782 struct pt_regs regs[3];
1784 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1785 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1786 static struct pt_regs *get_bpf_raw_tp_regs(void)
1788 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1789 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1791 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1792 this_cpu_dec(bpf_raw_tp_nest_level);
1793 return ERR_PTR(-EBUSY);
1796 return &tp_regs->regs[nest_level - 1];
1799 static void put_bpf_raw_tp_regs(void)
1801 this_cpu_dec(bpf_raw_tp_nest_level);
1804 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1805 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1807 struct pt_regs *regs = get_bpf_raw_tp_regs();
1811 return PTR_ERR(regs);
1813 perf_fetch_caller_regs(regs);
1814 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1816 put_bpf_raw_tp_regs();
1820 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1821 .func = bpf_perf_event_output_raw_tp,
1823 .ret_type = RET_INTEGER,
1824 .arg1_type = ARG_PTR_TO_CTX,
1825 .arg2_type = ARG_CONST_MAP_PTR,
1826 .arg3_type = ARG_ANYTHING,
1827 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1828 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1831 extern const struct bpf_func_proto bpf_skb_output_proto;
1832 extern const struct bpf_func_proto bpf_xdp_output_proto;
1833 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1835 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1836 struct bpf_map *, map, u64, flags)
1838 struct pt_regs *regs = get_bpf_raw_tp_regs();
1842 return PTR_ERR(regs);
1844 perf_fetch_caller_regs(regs);
1845 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1846 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1848 put_bpf_raw_tp_regs();
1852 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1853 .func = bpf_get_stackid_raw_tp,
1855 .ret_type = RET_INTEGER,
1856 .arg1_type = ARG_PTR_TO_CTX,
1857 .arg2_type = ARG_CONST_MAP_PTR,
1858 .arg3_type = ARG_ANYTHING,
1861 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1862 void *, buf, u32, size, u64, flags)
1864 struct pt_regs *regs = get_bpf_raw_tp_regs();
1868 return PTR_ERR(regs);
1870 perf_fetch_caller_regs(regs);
1871 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1872 (unsigned long) size, flags, 0);
1873 put_bpf_raw_tp_regs();
1877 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1878 .func = bpf_get_stack_raw_tp,
1880 .ret_type = RET_INTEGER,
1881 .arg1_type = ARG_PTR_TO_CTX,
1882 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1883 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1884 .arg4_type = ARG_ANYTHING,
1887 static const struct bpf_func_proto *
1888 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1891 case BPF_FUNC_perf_event_output:
1892 return &bpf_perf_event_output_proto_raw_tp;
1893 case BPF_FUNC_get_stackid:
1894 return &bpf_get_stackid_proto_raw_tp;
1895 case BPF_FUNC_get_stack:
1896 return &bpf_get_stack_proto_raw_tp;
1898 return bpf_tracing_func_proto(func_id, prog);
1902 const struct bpf_func_proto *
1903 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1905 const struct bpf_func_proto *fn;
1909 case BPF_FUNC_skb_output:
1910 return &bpf_skb_output_proto;
1911 case BPF_FUNC_xdp_output:
1912 return &bpf_xdp_output_proto;
1913 case BPF_FUNC_skc_to_tcp6_sock:
1914 return &bpf_skc_to_tcp6_sock_proto;
1915 case BPF_FUNC_skc_to_tcp_sock:
1916 return &bpf_skc_to_tcp_sock_proto;
1917 case BPF_FUNC_skc_to_tcp_timewait_sock:
1918 return &bpf_skc_to_tcp_timewait_sock_proto;
1919 case BPF_FUNC_skc_to_tcp_request_sock:
1920 return &bpf_skc_to_tcp_request_sock_proto;
1921 case BPF_FUNC_skc_to_udp6_sock:
1922 return &bpf_skc_to_udp6_sock_proto;
1923 case BPF_FUNC_skc_to_unix_sock:
1924 return &bpf_skc_to_unix_sock_proto;
1925 case BPF_FUNC_skc_to_mptcp_sock:
1926 return &bpf_skc_to_mptcp_sock_proto;
1927 case BPF_FUNC_sk_storage_get:
1928 return &bpf_sk_storage_get_tracing_proto;
1929 case BPF_FUNC_sk_storage_delete:
1930 return &bpf_sk_storage_delete_tracing_proto;
1931 case BPF_FUNC_sock_from_file:
1932 return &bpf_sock_from_file_proto;
1933 case BPF_FUNC_get_socket_cookie:
1934 return &bpf_get_socket_ptr_cookie_proto;
1935 case BPF_FUNC_xdp_get_buff_len:
1936 return &bpf_xdp_get_buff_len_trace_proto;
1938 case BPF_FUNC_seq_printf:
1939 return prog->expected_attach_type == BPF_TRACE_ITER ?
1940 &bpf_seq_printf_proto :
1942 case BPF_FUNC_seq_write:
1943 return prog->expected_attach_type == BPF_TRACE_ITER ?
1944 &bpf_seq_write_proto :
1946 case BPF_FUNC_seq_printf_btf:
1947 return prog->expected_attach_type == BPF_TRACE_ITER ?
1948 &bpf_seq_printf_btf_proto :
1950 case BPF_FUNC_d_path:
1951 return &bpf_d_path_proto;
1952 case BPF_FUNC_get_func_arg:
1953 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1954 case BPF_FUNC_get_func_ret:
1955 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1956 case BPF_FUNC_get_func_arg_cnt:
1957 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1958 case BPF_FUNC_get_attach_cookie:
1959 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
1961 fn = raw_tp_prog_func_proto(func_id, prog);
1962 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1963 fn = bpf_iter_get_func_proto(func_id, prog);
1968 static bool raw_tp_prog_is_valid_access(int off, int size,
1969 enum bpf_access_type type,
1970 const struct bpf_prog *prog,
1971 struct bpf_insn_access_aux *info)
1973 return bpf_tracing_ctx_access(off, size, type);
1976 static bool tracing_prog_is_valid_access(int off, int size,
1977 enum bpf_access_type type,
1978 const struct bpf_prog *prog,
1979 struct bpf_insn_access_aux *info)
1981 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1984 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1985 const union bpf_attr *kattr,
1986 union bpf_attr __user *uattr)
1991 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1992 .get_func_proto = raw_tp_prog_func_proto,
1993 .is_valid_access = raw_tp_prog_is_valid_access,
1996 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1998 .test_run = bpf_prog_test_run_raw_tp,
2002 const struct bpf_verifier_ops tracing_verifier_ops = {
2003 .get_func_proto = tracing_prog_func_proto,
2004 .is_valid_access = tracing_prog_is_valid_access,
2007 const struct bpf_prog_ops tracing_prog_ops = {
2008 .test_run = bpf_prog_test_run_tracing,
2011 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2012 enum bpf_access_type type,
2013 const struct bpf_prog *prog,
2014 struct bpf_insn_access_aux *info)
2017 if (size != sizeof(u64) || type != BPF_READ)
2019 info->reg_type = PTR_TO_TP_BUFFER;
2021 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2024 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2025 .get_func_proto = raw_tp_prog_func_proto,
2026 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2029 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2032 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2033 const struct bpf_prog *prog,
2034 struct bpf_insn_access_aux *info)
2036 const int size_u64 = sizeof(u64);
2038 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2040 if (type != BPF_READ)
2042 if (off % size != 0) {
2043 if (sizeof(unsigned long) != 4)
2047 if (off % size != 4)
2052 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2053 bpf_ctx_record_field_size(info, size_u64);
2054 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2057 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2058 bpf_ctx_record_field_size(info, size_u64);
2059 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2063 if (size != sizeof(long))
2070 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2071 const struct bpf_insn *si,
2072 struct bpf_insn *insn_buf,
2073 struct bpf_prog *prog, u32 *target_size)
2075 struct bpf_insn *insn = insn_buf;
2078 case offsetof(struct bpf_perf_event_data, sample_period):
2079 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2080 data), si->dst_reg, si->src_reg,
2081 offsetof(struct bpf_perf_event_data_kern, data));
2082 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2083 bpf_target_off(struct perf_sample_data, period, 8,
2086 case offsetof(struct bpf_perf_event_data, addr):
2087 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2088 data), si->dst_reg, si->src_reg,
2089 offsetof(struct bpf_perf_event_data_kern, data));
2090 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2091 bpf_target_off(struct perf_sample_data, addr, 8,
2095 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2096 regs), si->dst_reg, si->src_reg,
2097 offsetof(struct bpf_perf_event_data_kern, regs));
2098 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2103 return insn - insn_buf;
2106 const struct bpf_verifier_ops perf_event_verifier_ops = {
2107 .get_func_proto = pe_prog_func_proto,
2108 .is_valid_access = pe_prog_is_valid_access,
2109 .convert_ctx_access = pe_prog_convert_ctx_access,
2112 const struct bpf_prog_ops perf_event_prog_ops = {
2115 static DEFINE_MUTEX(bpf_event_mutex);
2117 #define BPF_TRACE_MAX_PROGS 64
2119 int perf_event_attach_bpf_prog(struct perf_event *event,
2120 struct bpf_prog *prog,
2123 struct bpf_prog_array *old_array;
2124 struct bpf_prog_array *new_array;
2128 * Kprobe override only works if they are on the function entry,
2129 * and only if they are on the opt-in list.
2131 if (prog->kprobe_override &&
2132 (!trace_kprobe_on_func_entry(event->tp_event) ||
2133 !trace_kprobe_error_injectable(event->tp_event)))
2136 mutex_lock(&bpf_event_mutex);
2141 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2143 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2148 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2152 /* set the new array to event->tp_event and set event->prog */
2154 event->bpf_cookie = bpf_cookie;
2155 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2156 bpf_prog_array_free_sleepable(old_array);
2159 mutex_unlock(&bpf_event_mutex);
2163 void perf_event_detach_bpf_prog(struct perf_event *event)
2165 struct bpf_prog_array *old_array;
2166 struct bpf_prog_array *new_array;
2169 mutex_lock(&bpf_event_mutex);
2174 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2175 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2179 bpf_prog_array_delete_safe(old_array, event->prog);
2181 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2182 bpf_prog_array_free_sleepable(old_array);
2185 bpf_prog_put(event->prog);
2189 mutex_unlock(&bpf_event_mutex);
2192 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2194 struct perf_event_query_bpf __user *uquery = info;
2195 struct perf_event_query_bpf query = {};
2196 struct bpf_prog_array *progs;
2197 u32 *ids, prog_cnt, ids_len;
2200 if (!perfmon_capable())
2202 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2204 if (copy_from_user(&query, uquery, sizeof(query)))
2207 ids_len = query.ids_len;
2208 if (ids_len > BPF_TRACE_MAX_PROGS)
2210 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2214 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2215 * is required when user only wants to check for uquery->prog_cnt.
2216 * There is no need to check for it since the case is handled
2217 * gracefully in bpf_prog_array_copy_info.
2220 mutex_lock(&bpf_event_mutex);
2221 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2222 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2223 mutex_unlock(&bpf_event_mutex);
2225 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2226 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2233 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2234 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2236 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2238 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2240 for (; btp < __stop__bpf_raw_tp; btp++) {
2241 if (!strcmp(btp->tp->name, name))
2245 return bpf_get_raw_tracepoint_module(name);
2248 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2253 mod = __module_address((unsigned long)btp);
2258 static __always_inline
2259 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2262 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2263 bpf_prog_inc_misses_counter(prog);
2267 (void) bpf_prog_run(prog, args);
2270 this_cpu_dec(*(prog->active));
2273 #define UNPACK(...) __VA_ARGS__
2274 #define REPEAT_1(FN, DL, X, ...) FN(X)
2275 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2276 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2277 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2278 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2279 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2280 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2281 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2282 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2283 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2284 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2285 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2286 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2288 #define SARG(X) u64 arg##X
2289 #define COPY(X) args[X] = arg##X
2291 #define __DL_COM (,)
2292 #define __DL_SEM (;)
2294 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2296 #define BPF_TRACE_DEFN_x(x) \
2297 void bpf_trace_run##x(struct bpf_prog *prog, \
2298 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2301 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2302 __bpf_trace_run(prog, args); \
2304 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2305 BPF_TRACE_DEFN_x(1);
2306 BPF_TRACE_DEFN_x(2);
2307 BPF_TRACE_DEFN_x(3);
2308 BPF_TRACE_DEFN_x(4);
2309 BPF_TRACE_DEFN_x(5);
2310 BPF_TRACE_DEFN_x(6);
2311 BPF_TRACE_DEFN_x(7);
2312 BPF_TRACE_DEFN_x(8);
2313 BPF_TRACE_DEFN_x(9);
2314 BPF_TRACE_DEFN_x(10);
2315 BPF_TRACE_DEFN_x(11);
2316 BPF_TRACE_DEFN_x(12);
2318 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2320 struct tracepoint *tp = btp->tp;
2323 * check that program doesn't access arguments beyond what's
2324 * available in this tracepoint
2326 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2329 if (prog->aux->max_tp_access > btp->writable_size)
2332 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2336 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2338 return __bpf_probe_register(btp, prog);
2341 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2343 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2346 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2347 u32 *fd_type, const char **buf,
2348 u64 *probe_offset, u64 *probe_addr)
2350 bool is_tracepoint, is_syscall_tp;
2351 struct bpf_prog *prog;
2358 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2359 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2362 *prog_id = prog->aux->id;
2363 flags = event->tp_event->flags;
2364 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2365 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2367 if (is_tracepoint || is_syscall_tp) {
2368 *buf = is_tracepoint ? event->tp_event->tp->name
2369 : event->tp_event->name;
2370 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2371 *probe_offset = 0x0;
2376 #ifdef CONFIG_KPROBE_EVENTS
2377 if (flags & TRACE_EVENT_FL_KPROBE)
2378 err = bpf_get_kprobe_info(event, fd_type, buf,
2379 probe_offset, probe_addr,
2380 event->attr.type == PERF_TYPE_TRACEPOINT);
2382 #ifdef CONFIG_UPROBE_EVENTS
2383 if (flags & TRACE_EVENT_FL_UPROBE)
2384 err = bpf_get_uprobe_info(event, fd_type, buf,
2386 event->attr.type == PERF_TYPE_TRACEPOINT);
2393 static int __init send_signal_irq_work_init(void)
2396 struct send_signal_irq_work *work;
2398 for_each_possible_cpu(cpu) {
2399 work = per_cpu_ptr(&send_signal_work, cpu);
2400 init_irq_work(&work->irq_work, do_bpf_send_signal);
2405 subsys_initcall(send_signal_irq_work_init);
2407 #ifdef CONFIG_MODULES
2408 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2411 struct bpf_trace_module *btm, *tmp;
2412 struct module *mod = module;
2415 if (mod->num_bpf_raw_events == 0 ||
2416 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2419 mutex_lock(&bpf_module_mutex);
2422 case MODULE_STATE_COMING:
2423 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2425 btm->module = module;
2426 list_add(&btm->list, &bpf_trace_modules);
2431 case MODULE_STATE_GOING:
2432 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2433 if (btm->module == module) {
2434 list_del(&btm->list);
2442 mutex_unlock(&bpf_module_mutex);
2445 return notifier_from_errno(ret);
2448 static struct notifier_block bpf_module_nb = {
2449 .notifier_call = bpf_event_notify,
2452 static int __init bpf_event_init(void)
2454 register_module_notifier(&bpf_module_nb);
2458 fs_initcall(bpf_event_init);
2459 #endif /* CONFIG_MODULES */
2461 #ifdef CONFIG_FPROBE
2462 struct bpf_kprobe_multi_link {
2463 struct bpf_link link;
2465 unsigned long *addrs;
2469 struct module **mods;
2472 struct bpf_kprobe_multi_run_ctx {
2473 struct bpf_run_ctx run_ctx;
2474 struct bpf_kprobe_multi_link *link;
2475 unsigned long entry_ip;
2483 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2485 unsigned long __user usymbol;
2486 const char **syms = NULL;
2487 char *buf = NULL, *p;
2491 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2495 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2499 for (p = buf, i = 0; i < cnt; i++) {
2500 if (__get_user(usymbol, usyms + i)) {
2504 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2505 if (err == KSYM_NAME_LEN)
2525 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2529 for (i = 0; i < cnt; i++)
2530 module_put(mods[i]);
2533 static void free_user_syms(struct user_syms *us)
2539 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2541 struct bpf_kprobe_multi_link *kmulti_link;
2543 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2544 unregister_fprobe(&kmulti_link->fp);
2545 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2548 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2550 struct bpf_kprobe_multi_link *kmulti_link;
2552 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2553 kvfree(kmulti_link->addrs);
2554 kvfree(kmulti_link->cookies);
2555 kfree(kmulti_link->mods);
2559 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2560 .release = bpf_kprobe_multi_link_release,
2561 .dealloc = bpf_kprobe_multi_link_dealloc,
2564 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2566 const struct bpf_kprobe_multi_link *link = priv;
2567 unsigned long *addr_a = a, *addr_b = b;
2568 u64 *cookie_a, *cookie_b;
2570 cookie_a = link->cookies + (addr_a - link->addrs);
2571 cookie_b = link->cookies + (addr_b - link->addrs);
2573 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2574 swap(*addr_a, *addr_b);
2575 swap(*cookie_a, *cookie_b);
2578 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2580 const unsigned long *addr_a = a, *addr_b = b;
2582 if (*addr_a == *addr_b)
2584 return *addr_a < *addr_b ? -1 : 1;
2587 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2589 return bpf_kprobe_multi_addrs_cmp(a, b);
2592 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2594 struct bpf_kprobe_multi_run_ctx *run_ctx;
2595 struct bpf_kprobe_multi_link *link;
2596 u64 *cookie, entry_ip;
2597 unsigned long *addr;
2599 if (WARN_ON_ONCE(!ctx))
2601 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2602 link = run_ctx->link;
2605 entry_ip = run_ctx->entry_ip;
2606 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2607 bpf_kprobe_multi_addrs_cmp);
2610 cookie = link->cookies + (addr - link->addrs);
2614 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2616 struct bpf_kprobe_multi_run_ctx *run_ctx;
2618 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2619 return run_ctx->entry_ip;
2623 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2624 unsigned long entry_ip, struct pt_regs *regs)
2626 struct bpf_kprobe_multi_run_ctx run_ctx = {
2628 .entry_ip = entry_ip,
2630 struct bpf_run_ctx *old_run_ctx;
2633 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2640 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2641 err = bpf_prog_run(link->link.prog, regs);
2642 bpf_reset_run_ctx(old_run_ctx);
2647 __this_cpu_dec(bpf_prog_active);
2652 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2653 struct pt_regs *regs)
2655 struct bpf_kprobe_multi_link *link;
2657 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2658 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2661 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2663 const char **str_a = (const char **) a;
2664 const char **str_b = (const char **) b;
2666 return strcmp(*str_a, *str_b);
2669 struct multi_symbols_sort {
2674 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2676 const struct multi_symbols_sort *data = priv;
2677 const char **name_a = a, **name_b = b;
2679 swap(*name_a, *name_b);
2681 /* If defined, swap also related cookies. */
2682 if (data->cookies) {
2683 u64 *cookie_a, *cookie_b;
2685 cookie_a = data->cookies + (name_a - data->funcs);
2686 cookie_b = data->cookies + (name_b - data->funcs);
2687 swap(*cookie_a, *cookie_b);
2691 struct module_addr_args {
2692 unsigned long *addrs;
2694 struct module **mods;
2699 static int module_callback(void *data, const char *name,
2700 struct module *mod, unsigned long addr)
2702 struct module_addr_args *args = data;
2703 struct module **mods;
2705 /* We iterate all modules symbols and for each we:
2706 * - search for it in provided addresses array
2707 * - if found we check if we already have the module pointer stored
2708 * (we iterate modules sequentially, so we can check just the last
2710 * - take module reference and store it
2712 if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
2713 bpf_kprobe_multi_addrs_cmp))
2716 if (args->mods && args->mods[args->mods_cnt - 1] == mod)
2719 if (args->mods_cnt == args->mods_cap) {
2720 args->mods_cap = max(16, args->mods_cap * 3 / 2);
2721 mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
2727 if (!try_module_get(mod))
2730 args->mods[args->mods_cnt] = mod;
2735 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2737 struct module_addr_args args = {
2739 .addrs_cnt = addrs_cnt,
2743 /* We return either err < 0 in case of error, ... */
2744 err = module_kallsyms_on_each_symbol(module_callback, &args);
2746 kprobe_multi_put_modules(args.mods, args.mods_cnt);
2751 /* or number of modules found if everything is ok. */
2753 return args.mods_cnt;
2756 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2758 struct bpf_kprobe_multi_link *link = NULL;
2759 struct bpf_link_primer link_primer;
2760 void __user *ucookies;
2761 unsigned long *addrs;
2762 u32 flags, cnt, size;
2763 void __user *uaddrs;
2764 u64 *cookies = NULL;
2768 /* no support for 32bit archs yet */
2769 if (sizeof(u64) != sizeof(void *))
2772 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2775 flags = attr->link_create.kprobe_multi.flags;
2776 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2779 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2780 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2781 if (!!uaddrs == !!usyms)
2784 cnt = attr->link_create.kprobe_multi.cnt;
2788 size = cnt * sizeof(*addrs);
2789 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2793 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2795 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2800 if (copy_from_user(cookies, ucookies, size)) {
2807 if (copy_from_user(addrs, uaddrs, size)) {
2812 struct multi_symbols_sort data = {
2815 struct user_syms us;
2817 err = copy_user_syms(&us, usyms, cnt);
2822 data.funcs = us.syms;
2824 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2825 symbols_swap_r, &data);
2827 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2828 free_user_syms(&us);
2833 link = kzalloc(sizeof(*link), GFP_KERNEL);
2839 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2840 &bpf_kprobe_multi_link_lops, prog);
2842 err = bpf_link_prime(&link->link, &link_primer);
2846 if (flags & BPF_F_KPROBE_MULTI_RETURN)
2847 link->fp.exit_handler = kprobe_multi_link_handler;
2849 link->fp.entry_handler = kprobe_multi_link_handler;
2851 link->addrs = addrs;
2852 link->cookies = cookies;
2857 * Sorting addresses will trigger sorting cookies as well
2858 * (check bpf_kprobe_multi_cookie_swap). This way we can
2859 * find cookie based on the address in bpf_get_attach_cookie
2862 sort_r(addrs, cnt, sizeof(*addrs),
2863 bpf_kprobe_multi_cookie_cmp,
2864 bpf_kprobe_multi_cookie_swap,
2868 * We need to sort addrs array even if there are no cookies
2869 * provided, to allow bsearch in get_modules_for_addrs.
2871 sort(addrs, cnt, sizeof(*addrs),
2872 bpf_kprobe_multi_addrs_cmp, NULL);
2875 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2877 bpf_link_cleanup(&link_primer);
2880 link->mods_cnt = err;
2882 err = register_fprobe_ips(&link->fp, addrs, cnt);
2884 kprobe_multi_put_modules(link->mods, link->mods_cnt);
2885 bpf_link_cleanup(&link_primer);
2889 return bpf_link_settle(&link_primer);
2897 #else /* !CONFIG_FPROBE */
2898 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2902 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2906 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)