1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20 #include <linux/fprobe.h>
21 #include <linux/bsearch.h>
22 #include <linux/sort.h>
24 #include <net/bpf_sk_storage.h>
26 #include <uapi/linux/bpf.h>
27 #include <uapi/linux/btf.h>
31 #include "trace_probe.h"
34 #define CREATE_TRACE_POINTS
35 #include "bpf_trace.h"
37 #define bpf_event_rcu_dereference(p) \
38 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
41 struct bpf_trace_module {
42 struct module *module;
43 struct list_head list;
46 static LIST_HEAD(bpf_trace_modules);
47 static DEFINE_MUTEX(bpf_module_mutex);
49 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
51 struct bpf_raw_event_map *btp, *ret = NULL;
52 struct bpf_trace_module *btm;
55 mutex_lock(&bpf_module_mutex);
56 list_for_each_entry(btm, &bpf_trace_modules, list) {
57 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
58 btp = &btm->module->bpf_raw_events[i];
59 if (!strcmp(btp->tp->name, name)) {
60 if (try_module_get(btm->module))
67 mutex_unlock(&bpf_module_mutex);
71 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
75 #endif /* CONFIG_MODULES */
77 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
78 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
80 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
81 u64 flags, const struct btf **btf,
83 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
84 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
87 * trace_call_bpf - invoke BPF program
88 * @call: tracepoint event
89 * @ctx: opaque context pointer
91 * kprobe handlers execute BPF programs via this helper.
92 * Can be used from static tracepoints in the future.
94 * Return: BPF programs always return an integer which is interpreted by
96 * 0 - return from kprobe (event is filtered out)
97 * 1 - store kprobe event into ring buffer
98 * Other values are reserved and currently alias to 1
100 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
106 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
108 * since some bpf program is already running on this cpu,
109 * don't call into another bpf program (same or different)
110 * and don't send kprobe event into ring-buffer,
111 * so return zero here
118 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
119 * to all call sites, we did a bpf_prog_array_valid() there to check
120 * whether call->prog_array is empty or not, which is
121 * a heuristic to speed up execution.
123 * If bpf_prog_array_valid() fetched prog_array was
124 * non-NULL, we go into trace_call_bpf() and do the actual
125 * proper rcu_dereference() under RCU lock.
126 * If it turns out that prog_array is NULL then, we bail out.
127 * For the opposite, if the bpf_prog_array_valid() fetched pointer
128 * was NULL, you'll skip the prog_array with the risk of missing
129 * out of events when it was updated in between this and the
130 * rcu_dereference() which is accepted risk.
132 ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run);
135 __this_cpu_dec(bpf_prog_active);
140 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
141 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
143 regs_set_return_value(regs, rc);
144 override_function_with_return(regs);
148 static const struct bpf_func_proto bpf_override_return_proto = {
149 .func = bpf_override_return,
151 .ret_type = RET_INTEGER,
152 .arg1_type = ARG_PTR_TO_CTX,
153 .arg2_type = ARG_ANYTHING,
157 static __always_inline int
158 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
162 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
163 if (unlikely(ret < 0))
164 memset(dst, 0, size);
168 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
169 const void __user *, unsafe_ptr)
171 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
174 const struct bpf_func_proto bpf_probe_read_user_proto = {
175 .func = bpf_probe_read_user,
177 .ret_type = RET_INTEGER,
178 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
179 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
180 .arg3_type = ARG_ANYTHING,
183 static __always_inline int
184 bpf_probe_read_user_str_common(void *dst, u32 size,
185 const void __user *unsafe_ptr)
190 * NB: We rely on strncpy_from_user() not copying junk past the NUL
191 * terminator into `dst`.
193 * strncpy_from_user() does long-sized strides in the fast path. If the
194 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
195 * then there could be junk after the NUL in `dst`. If user takes `dst`
196 * and keys a hash map with it, then semantically identical strings can
197 * occupy multiple entries in the map.
199 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
200 if (unlikely(ret < 0))
201 memset(dst, 0, size);
205 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
206 const void __user *, unsafe_ptr)
208 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
211 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
212 .func = bpf_probe_read_user_str,
214 .ret_type = RET_INTEGER,
215 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
216 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
217 .arg3_type = ARG_ANYTHING,
220 static __always_inline int
221 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
225 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
226 if (unlikely(ret < 0))
227 memset(dst, 0, size);
231 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232 const void *, unsafe_ptr)
234 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
237 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
238 .func = bpf_probe_read_kernel,
240 .ret_type = RET_INTEGER,
241 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
242 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
243 .arg3_type = ARG_ANYTHING,
246 static __always_inline int
247 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
252 * The strncpy_from_kernel_nofault() call will likely not fill the
253 * entire buffer, but that's okay in this circumstance as we're probing
254 * arbitrary memory anyway similar to bpf_probe_read_*() and might
255 * as well probe the stack. Thus, memory is explicitly cleared
256 * only in error case, so that improper users ignoring return
257 * code altogether don't copy garbage; otherwise length of string
258 * is returned that can be used for bpf_perf_event_output() et al.
260 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
261 if (unlikely(ret < 0))
262 memset(dst, 0, size);
266 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
267 const void *, unsafe_ptr)
269 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
272 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
273 .func = bpf_probe_read_kernel_str,
275 .ret_type = RET_INTEGER,
276 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
277 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
278 .arg3_type = ARG_ANYTHING,
281 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
282 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
283 const void *, unsafe_ptr)
285 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
286 return bpf_probe_read_user_common(dst, size,
287 (__force void __user *)unsafe_ptr);
289 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
292 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
293 .func = bpf_probe_read_compat,
295 .ret_type = RET_INTEGER,
296 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
297 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
298 .arg3_type = ARG_ANYTHING,
301 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
302 const void *, unsafe_ptr)
304 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
305 return bpf_probe_read_user_str_common(dst, size,
306 (__force void __user *)unsafe_ptr);
308 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
311 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
312 .func = bpf_probe_read_compat_str,
314 .ret_type = RET_INTEGER,
315 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
316 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
317 .arg3_type = ARG_ANYTHING,
319 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
321 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
325 * Ensure we're in user context which is safe for the helper to
326 * run. This helper has no business in a kthread.
328 * access_ok() should prevent writing to non-user memory, but in
329 * some situations (nommu, temporary switch, etc) access_ok() does
330 * not provide enough validation, hence the check on KERNEL_DS.
332 * nmi_uaccess_okay() ensures the probe is not run in an interim
333 * state, when the task or mm are switched. This is specifically
334 * required to prevent the use of temporary mm.
337 if (unlikely(in_interrupt() ||
338 current->flags & (PF_KTHREAD | PF_EXITING)))
340 if (unlikely(!nmi_uaccess_okay()))
343 return copy_to_user_nofault(unsafe_ptr, src, size);
346 static const struct bpf_func_proto bpf_probe_write_user_proto = {
347 .func = bpf_probe_write_user,
349 .ret_type = RET_INTEGER,
350 .arg1_type = ARG_ANYTHING,
351 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
352 .arg3_type = ARG_CONST_SIZE,
355 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
357 if (!capable(CAP_SYS_ADMIN))
360 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
361 current->comm, task_pid_nr(current));
363 return &bpf_probe_write_user_proto;
366 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
368 #define MAX_TRACE_PRINTK_VARARGS 3
369 #define BPF_TRACE_PRINTK_SIZE 1024
371 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
372 u64, arg2, u64, arg3)
374 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
376 static char buf[BPF_TRACE_PRINTK_SIZE];
380 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
381 MAX_TRACE_PRINTK_VARARGS);
385 raw_spin_lock_irqsave(&trace_printk_lock, flags);
386 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
388 trace_bpf_trace_printk(buf);
389 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
391 bpf_bprintf_cleanup();
396 static const struct bpf_func_proto bpf_trace_printk_proto = {
397 .func = bpf_trace_printk,
399 .ret_type = RET_INTEGER,
400 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
401 .arg2_type = ARG_CONST_SIZE,
404 static void __set_printk_clr_event(void)
407 * This program might be calling bpf_trace_printk,
408 * so enable the associated bpf_trace/bpf_trace_printk event.
409 * Repeat this each time as it is possible a user has
410 * disabled bpf_trace_printk events. By loading a program
411 * calling bpf_trace_printk() however the user has expressed
412 * the intent to see such events.
414 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
415 pr_warn_ratelimited("could not enable bpf_trace_printk events");
418 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
420 __set_printk_clr_event();
421 return &bpf_trace_printk_proto;
424 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
427 static char buf[BPF_TRACE_PRINTK_SIZE];
432 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
435 num_args = data_len / 8;
437 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
441 raw_spin_lock_irqsave(&trace_printk_lock, flags);
442 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
444 trace_bpf_trace_printk(buf);
445 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
447 bpf_bprintf_cleanup();
452 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
453 .func = bpf_trace_vprintk,
455 .ret_type = RET_INTEGER,
456 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
457 .arg2_type = ARG_CONST_SIZE,
458 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
459 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
462 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
464 __set_printk_clr_event();
465 return &bpf_trace_vprintk_proto;
468 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
469 const void *, data, u32, data_len)
474 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
477 num_args = data_len / 8;
479 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
483 seq_bprintf(m, fmt, bin_args);
485 bpf_bprintf_cleanup();
487 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
490 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
492 static const struct bpf_func_proto bpf_seq_printf_proto = {
493 .func = bpf_seq_printf,
495 .ret_type = RET_INTEGER,
496 .arg1_type = ARG_PTR_TO_BTF_ID,
497 .arg1_btf_id = &btf_seq_file_ids[0],
498 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
499 .arg3_type = ARG_CONST_SIZE,
500 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
501 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
504 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
506 return seq_write(m, data, len) ? -EOVERFLOW : 0;
509 static const struct bpf_func_proto bpf_seq_write_proto = {
510 .func = bpf_seq_write,
512 .ret_type = RET_INTEGER,
513 .arg1_type = ARG_PTR_TO_BTF_ID,
514 .arg1_btf_id = &btf_seq_file_ids[0],
515 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
516 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
519 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
520 u32, btf_ptr_size, u64, flags)
522 const struct btf *btf;
526 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
530 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
533 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
534 .func = bpf_seq_printf_btf,
536 .ret_type = RET_INTEGER,
537 .arg1_type = ARG_PTR_TO_BTF_ID,
538 .arg1_btf_id = &btf_seq_file_ids[0],
539 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
540 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
541 .arg4_type = ARG_ANYTHING,
544 static __always_inline int
545 get_map_perf_counter(struct bpf_map *map, u64 flags,
546 u64 *value, u64 *enabled, u64 *running)
548 struct bpf_array *array = container_of(map, struct bpf_array, map);
549 unsigned int cpu = smp_processor_id();
550 u64 index = flags & BPF_F_INDEX_MASK;
551 struct bpf_event_entry *ee;
553 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
555 if (index == BPF_F_CURRENT_CPU)
557 if (unlikely(index >= array->map.max_entries))
560 ee = READ_ONCE(array->ptrs[index]);
564 return perf_event_read_local(ee->event, value, enabled, running);
567 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
572 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
574 * this api is ugly since we miss [-22..-2] range of valid
575 * counter values, but that's uapi
582 static const struct bpf_func_proto bpf_perf_event_read_proto = {
583 .func = bpf_perf_event_read,
585 .ret_type = RET_INTEGER,
586 .arg1_type = ARG_CONST_MAP_PTR,
587 .arg2_type = ARG_ANYTHING,
590 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
591 struct bpf_perf_event_value *, buf, u32, size)
595 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
597 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
603 memset(buf, 0, size);
607 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
608 .func = bpf_perf_event_read_value,
610 .ret_type = RET_INTEGER,
611 .arg1_type = ARG_CONST_MAP_PTR,
612 .arg2_type = ARG_ANYTHING,
613 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
614 .arg4_type = ARG_CONST_SIZE,
617 static __always_inline u64
618 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
619 u64 flags, struct perf_sample_data *sd)
621 struct bpf_array *array = container_of(map, struct bpf_array, map);
622 unsigned int cpu = smp_processor_id();
623 u64 index = flags & BPF_F_INDEX_MASK;
624 struct bpf_event_entry *ee;
625 struct perf_event *event;
627 if (index == BPF_F_CURRENT_CPU)
629 if (unlikely(index >= array->map.max_entries))
632 ee = READ_ONCE(array->ptrs[index]);
637 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
638 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
641 if (unlikely(event->oncpu != cpu))
644 return perf_event_output(event, sd, regs);
648 * Support executing tracepoints in normal, irq, and nmi context that each call
649 * bpf_perf_event_output
651 struct bpf_trace_sample_data {
652 struct perf_sample_data sds[3];
655 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
656 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
657 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
658 u64, flags, void *, data, u64, size)
660 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
661 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
662 struct perf_raw_record raw = {
668 struct perf_sample_data *sd;
671 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
676 sd = &sds->sds[nest_level - 1];
678 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
683 perf_sample_data_init(sd, 0, 0);
686 err = __bpf_perf_event_output(regs, map, flags, sd);
689 this_cpu_dec(bpf_trace_nest_level);
693 static const struct bpf_func_proto bpf_perf_event_output_proto = {
694 .func = bpf_perf_event_output,
696 .ret_type = RET_INTEGER,
697 .arg1_type = ARG_PTR_TO_CTX,
698 .arg2_type = ARG_CONST_MAP_PTR,
699 .arg3_type = ARG_ANYTHING,
700 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
701 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
704 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
705 struct bpf_nested_pt_regs {
706 struct pt_regs regs[3];
708 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
709 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
711 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
712 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
714 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
715 struct perf_raw_frag frag = {
720 struct perf_raw_record raw = {
723 .next = ctx_size ? &frag : NULL,
729 struct perf_sample_data *sd;
730 struct pt_regs *regs;
733 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
737 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
738 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
740 perf_fetch_caller_regs(regs);
741 perf_sample_data_init(sd, 0, 0);
744 ret = __bpf_perf_event_output(regs, map, flags, sd);
746 this_cpu_dec(bpf_event_output_nest_level);
750 BPF_CALL_0(bpf_get_current_task)
752 return (long) current;
755 const struct bpf_func_proto bpf_get_current_task_proto = {
756 .func = bpf_get_current_task,
758 .ret_type = RET_INTEGER,
761 BPF_CALL_0(bpf_get_current_task_btf)
763 return (unsigned long) current;
766 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
767 .func = bpf_get_current_task_btf,
769 .ret_type = RET_PTR_TO_BTF_ID,
770 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
773 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
775 return (unsigned long) task_pt_regs(task);
778 BTF_ID_LIST(bpf_task_pt_regs_ids)
779 BTF_ID(struct, pt_regs)
781 const struct bpf_func_proto bpf_task_pt_regs_proto = {
782 .func = bpf_task_pt_regs,
784 .arg1_type = ARG_PTR_TO_BTF_ID,
785 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
786 .ret_type = RET_PTR_TO_BTF_ID,
787 .ret_btf_id = &bpf_task_pt_regs_ids[0],
790 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
792 struct bpf_array *array = container_of(map, struct bpf_array, map);
795 if (unlikely(idx >= array->map.max_entries))
798 cgrp = READ_ONCE(array->ptrs[idx]);
802 return task_under_cgroup_hierarchy(current, cgrp);
805 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
806 .func = bpf_current_task_under_cgroup,
808 .ret_type = RET_INTEGER,
809 .arg1_type = ARG_CONST_MAP_PTR,
810 .arg2_type = ARG_ANYTHING,
813 struct send_signal_irq_work {
814 struct irq_work irq_work;
815 struct task_struct *task;
820 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
822 static void do_bpf_send_signal(struct irq_work *entry)
824 struct send_signal_irq_work *work;
826 work = container_of(entry, struct send_signal_irq_work, irq_work);
827 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
830 static int bpf_send_signal_common(u32 sig, enum pid_type type)
832 struct send_signal_irq_work *work = NULL;
834 /* Similar to bpf_probe_write_user, task needs to be
835 * in a sound condition and kernel memory access be
836 * permitted in order to send signal to the current
839 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
841 if (unlikely(!nmi_uaccess_okay()))
844 if (irqs_disabled()) {
845 /* Do an early check on signal validity. Otherwise,
846 * the error is lost in deferred irq_work.
848 if (unlikely(!valid_signal(sig)))
851 work = this_cpu_ptr(&send_signal_work);
852 if (irq_work_is_busy(&work->irq_work))
855 /* Add the current task, which is the target of sending signal,
856 * to the irq_work. The current task may change when queued
857 * irq works get executed.
859 work->task = current;
862 irq_work_queue(&work->irq_work);
866 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
869 BPF_CALL_1(bpf_send_signal, u32, sig)
871 return bpf_send_signal_common(sig, PIDTYPE_TGID);
874 static const struct bpf_func_proto bpf_send_signal_proto = {
875 .func = bpf_send_signal,
877 .ret_type = RET_INTEGER,
878 .arg1_type = ARG_ANYTHING,
881 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
883 return bpf_send_signal_common(sig, PIDTYPE_PID);
886 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
887 .func = bpf_send_signal_thread,
889 .ret_type = RET_INTEGER,
890 .arg1_type = ARG_ANYTHING,
893 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
901 p = d_path(path, buf, sz);
906 memmove(buf, p, len);
912 BTF_SET_START(btf_allowlist_d_path)
913 #ifdef CONFIG_SECURITY
914 BTF_ID(func, security_file_permission)
915 BTF_ID(func, security_inode_getattr)
916 BTF_ID(func, security_file_open)
918 #ifdef CONFIG_SECURITY_PATH
919 BTF_ID(func, security_path_truncate)
921 BTF_ID(func, vfs_truncate)
922 BTF_ID(func, vfs_fallocate)
923 BTF_ID(func, dentry_open)
924 BTF_ID(func, vfs_getattr)
925 BTF_ID(func, filp_close)
926 BTF_SET_END(btf_allowlist_d_path)
928 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
930 if (prog->type == BPF_PROG_TYPE_TRACING &&
931 prog->expected_attach_type == BPF_TRACE_ITER)
934 if (prog->type == BPF_PROG_TYPE_LSM)
935 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
937 return btf_id_set_contains(&btf_allowlist_d_path,
938 prog->aux->attach_btf_id);
941 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
943 static const struct bpf_func_proto bpf_d_path_proto = {
946 .ret_type = RET_INTEGER,
947 .arg1_type = ARG_PTR_TO_BTF_ID,
948 .arg1_btf_id = &bpf_d_path_btf_ids[0],
949 .arg2_type = ARG_PTR_TO_MEM,
950 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
951 .allowed = bpf_d_path_allowed,
954 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
955 BTF_F_PTR_RAW | BTF_F_ZERO)
957 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
958 u64 flags, const struct btf **btf,
961 const struct btf_type *t;
963 if (unlikely(flags & ~(BTF_F_ALL)))
966 if (btf_ptr_size != sizeof(struct btf_ptr))
969 *btf = bpf_get_btf_vmlinux();
971 if (IS_ERR_OR_NULL(*btf))
972 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
974 if (ptr->type_id > 0)
975 *btf_id = ptr->type_id;
980 t = btf_type_by_id(*btf, *btf_id);
981 if (*btf_id <= 0 || !t)
987 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
988 u32, btf_ptr_size, u64, flags)
990 const struct btf *btf;
994 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
998 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1002 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1003 .func = bpf_snprintf_btf,
1005 .ret_type = RET_INTEGER,
1006 .arg1_type = ARG_PTR_TO_MEM,
1007 .arg2_type = ARG_CONST_SIZE,
1008 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1009 .arg4_type = ARG_CONST_SIZE,
1010 .arg5_type = ARG_ANYTHING,
1013 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1015 /* This helper call is inlined by verifier. */
1016 return ((u64 *)ctx)[-2];
1019 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1020 .func = bpf_get_func_ip_tracing,
1022 .ret_type = RET_INTEGER,
1023 .arg1_type = ARG_PTR_TO_CTX,
1026 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1028 struct kprobe *kp = kprobe_running();
1030 return kp ? (uintptr_t)kp->addr : 0;
1033 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1034 .func = bpf_get_func_ip_kprobe,
1036 .ret_type = RET_INTEGER,
1037 .arg1_type = ARG_PTR_TO_CTX,
1040 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1042 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1045 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1046 .func = bpf_get_func_ip_kprobe_multi,
1048 .ret_type = RET_INTEGER,
1049 .arg1_type = ARG_PTR_TO_CTX,
1052 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1054 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1057 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1058 .func = bpf_get_attach_cookie_kprobe_multi,
1060 .ret_type = RET_INTEGER,
1061 .arg1_type = ARG_PTR_TO_CTX,
1064 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1066 struct bpf_trace_run_ctx *run_ctx;
1068 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1069 return run_ctx->bpf_cookie;
1072 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1073 .func = bpf_get_attach_cookie_trace,
1075 .ret_type = RET_INTEGER,
1076 .arg1_type = ARG_PTR_TO_CTX,
1079 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1081 return ctx->event->bpf_cookie;
1084 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1085 .func = bpf_get_attach_cookie_pe,
1087 .ret_type = RET_INTEGER,
1088 .arg1_type = ARG_PTR_TO_CTX,
1091 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1096 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1097 u32 entry_cnt = size / br_entry_size;
1099 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1101 if (unlikely(flags))
1107 return entry_cnt * br_entry_size;
1111 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1112 .func = bpf_get_branch_snapshot,
1114 .ret_type = RET_INTEGER,
1115 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1116 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1119 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1121 /* This helper call is inlined by verifier. */
1122 u64 nr_args = ((u64 *)ctx)[-1];
1124 if ((u64) n >= nr_args)
1126 *value = ((u64 *)ctx)[n];
1130 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1131 .func = get_func_arg,
1132 .ret_type = RET_INTEGER,
1133 .arg1_type = ARG_PTR_TO_CTX,
1134 .arg2_type = ARG_ANYTHING,
1135 .arg3_type = ARG_PTR_TO_LONG,
1138 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1140 /* This helper call is inlined by verifier. */
1141 u64 nr_args = ((u64 *)ctx)[-1];
1143 *value = ((u64 *)ctx)[nr_args];
1147 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1148 .func = get_func_ret,
1149 .ret_type = RET_INTEGER,
1150 .arg1_type = ARG_PTR_TO_CTX,
1151 .arg2_type = ARG_PTR_TO_LONG,
1154 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1156 /* This helper call is inlined by verifier. */
1157 return ((u64 *)ctx)[-1];
1160 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1161 .func = get_func_arg_cnt,
1162 .ret_type = RET_INTEGER,
1163 .arg1_type = ARG_PTR_TO_CTX,
1166 static const struct bpf_func_proto *
1167 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1170 case BPF_FUNC_map_lookup_elem:
1171 return &bpf_map_lookup_elem_proto;
1172 case BPF_FUNC_map_update_elem:
1173 return &bpf_map_update_elem_proto;
1174 case BPF_FUNC_map_delete_elem:
1175 return &bpf_map_delete_elem_proto;
1176 case BPF_FUNC_map_push_elem:
1177 return &bpf_map_push_elem_proto;
1178 case BPF_FUNC_map_pop_elem:
1179 return &bpf_map_pop_elem_proto;
1180 case BPF_FUNC_map_peek_elem:
1181 return &bpf_map_peek_elem_proto;
1182 case BPF_FUNC_ktime_get_ns:
1183 return &bpf_ktime_get_ns_proto;
1184 case BPF_FUNC_ktime_get_boot_ns:
1185 return &bpf_ktime_get_boot_ns_proto;
1186 case BPF_FUNC_tail_call:
1187 return &bpf_tail_call_proto;
1188 case BPF_FUNC_get_current_pid_tgid:
1189 return &bpf_get_current_pid_tgid_proto;
1190 case BPF_FUNC_get_current_task:
1191 return &bpf_get_current_task_proto;
1192 case BPF_FUNC_get_current_task_btf:
1193 return &bpf_get_current_task_btf_proto;
1194 case BPF_FUNC_task_pt_regs:
1195 return &bpf_task_pt_regs_proto;
1196 case BPF_FUNC_get_current_uid_gid:
1197 return &bpf_get_current_uid_gid_proto;
1198 case BPF_FUNC_get_current_comm:
1199 return &bpf_get_current_comm_proto;
1200 case BPF_FUNC_trace_printk:
1201 return bpf_get_trace_printk_proto();
1202 case BPF_FUNC_get_smp_processor_id:
1203 return &bpf_get_smp_processor_id_proto;
1204 case BPF_FUNC_get_numa_node_id:
1205 return &bpf_get_numa_node_id_proto;
1206 case BPF_FUNC_perf_event_read:
1207 return &bpf_perf_event_read_proto;
1208 case BPF_FUNC_current_task_under_cgroup:
1209 return &bpf_current_task_under_cgroup_proto;
1210 case BPF_FUNC_get_prandom_u32:
1211 return &bpf_get_prandom_u32_proto;
1212 case BPF_FUNC_probe_write_user:
1213 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1214 NULL : bpf_get_probe_write_proto();
1215 case BPF_FUNC_probe_read_user:
1216 return &bpf_probe_read_user_proto;
1217 case BPF_FUNC_probe_read_kernel:
1218 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1219 NULL : &bpf_probe_read_kernel_proto;
1220 case BPF_FUNC_probe_read_user_str:
1221 return &bpf_probe_read_user_str_proto;
1222 case BPF_FUNC_probe_read_kernel_str:
1223 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1224 NULL : &bpf_probe_read_kernel_str_proto;
1225 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1226 case BPF_FUNC_probe_read:
1227 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1228 NULL : &bpf_probe_read_compat_proto;
1229 case BPF_FUNC_probe_read_str:
1230 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1231 NULL : &bpf_probe_read_compat_str_proto;
1233 #ifdef CONFIG_CGROUPS
1234 case BPF_FUNC_get_current_cgroup_id:
1235 return &bpf_get_current_cgroup_id_proto;
1236 case BPF_FUNC_get_current_ancestor_cgroup_id:
1237 return &bpf_get_current_ancestor_cgroup_id_proto;
1239 case BPF_FUNC_send_signal:
1240 return &bpf_send_signal_proto;
1241 case BPF_FUNC_send_signal_thread:
1242 return &bpf_send_signal_thread_proto;
1243 case BPF_FUNC_perf_event_read_value:
1244 return &bpf_perf_event_read_value_proto;
1245 case BPF_FUNC_get_ns_current_pid_tgid:
1246 return &bpf_get_ns_current_pid_tgid_proto;
1247 case BPF_FUNC_ringbuf_output:
1248 return &bpf_ringbuf_output_proto;
1249 case BPF_FUNC_ringbuf_reserve:
1250 return &bpf_ringbuf_reserve_proto;
1251 case BPF_FUNC_ringbuf_submit:
1252 return &bpf_ringbuf_submit_proto;
1253 case BPF_FUNC_ringbuf_discard:
1254 return &bpf_ringbuf_discard_proto;
1255 case BPF_FUNC_ringbuf_query:
1256 return &bpf_ringbuf_query_proto;
1257 case BPF_FUNC_jiffies64:
1258 return &bpf_jiffies64_proto;
1259 case BPF_FUNC_get_task_stack:
1260 return &bpf_get_task_stack_proto;
1261 case BPF_FUNC_copy_from_user:
1262 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1263 case BPF_FUNC_copy_from_user_task:
1264 return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL;
1265 case BPF_FUNC_snprintf_btf:
1266 return &bpf_snprintf_btf_proto;
1267 case BPF_FUNC_per_cpu_ptr:
1268 return &bpf_per_cpu_ptr_proto;
1269 case BPF_FUNC_this_cpu_ptr:
1270 return &bpf_this_cpu_ptr_proto;
1271 case BPF_FUNC_task_storage_get:
1272 return &bpf_task_storage_get_proto;
1273 case BPF_FUNC_task_storage_delete:
1274 return &bpf_task_storage_delete_proto;
1275 case BPF_FUNC_for_each_map_elem:
1276 return &bpf_for_each_map_elem_proto;
1277 case BPF_FUNC_snprintf:
1278 return &bpf_snprintf_proto;
1279 case BPF_FUNC_get_func_ip:
1280 return &bpf_get_func_ip_proto_tracing;
1281 case BPF_FUNC_get_branch_snapshot:
1282 return &bpf_get_branch_snapshot_proto;
1283 case BPF_FUNC_find_vma:
1284 return &bpf_find_vma_proto;
1285 case BPF_FUNC_trace_vprintk:
1286 return bpf_get_trace_vprintk_proto();
1288 return bpf_base_func_proto(func_id);
1292 static const struct bpf_func_proto *
1293 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1296 case BPF_FUNC_perf_event_output:
1297 return &bpf_perf_event_output_proto;
1298 case BPF_FUNC_get_stackid:
1299 return &bpf_get_stackid_proto;
1300 case BPF_FUNC_get_stack:
1301 return &bpf_get_stack_proto;
1302 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1303 case BPF_FUNC_override_return:
1304 return &bpf_override_return_proto;
1306 case BPF_FUNC_get_func_ip:
1307 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1308 &bpf_get_func_ip_proto_kprobe_multi :
1309 &bpf_get_func_ip_proto_kprobe;
1310 case BPF_FUNC_get_attach_cookie:
1311 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1312 &bpf_get_attach_cookie_proto_kmulti :
1313 &bpf_get_attach_cookie_proto_trace;
1315 return bpf_tracing_func_proto(func_id, prog);
1319 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1320 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1321 const struct bpf_prog *prog,
1322 struct bpf_insn_access_aux *info)
1324 if (off < 0 || off >= sizeof(struct pt_regs))
1326 if (type != BPF_READ)
1328 if (off % size != 0)
1331 * Assertion for 32 bit to make sure last 8 byte access
1332 * (BPF_DW) to the last 4 byte member is disallowed.
1334 if (off + size > sizeof(struct pt_regs))
1340 const struct bpf_verifier_ops kprobe_verifier_ops = {
1341 .get_func_proto = kprobe_prog_func_proto,
1342 .is_valid_access = kprobe_prog_is_valid_access,
1345 const struct bpf_prog_ops kprobe_prog_ops = {
1348 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1349 u64, flags, void *, data, u64, size)
1351 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1354 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1355 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1356 * from there and call the same bpf_perf_event_output() helper inline.
1358 return ____bpf_perf_event_output(regs, map, flags, data, size);
1361 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1362 .func = bpf_perf_event_output_tp,
1364 .ret_type = RET_INTEGER,
1365 .arg1_type = ARG_PTR_TO_CTX,
1366 .arg2_type = ARG_CONST_MAP_PTR,
1367 .arg3_type = ARG_ANYTHING,
1368 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1369 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1372 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1375 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1378 * Same comment as in bpf_perf_event_output_tp(), only that this time
1379 * the other helper's function body cannot be inlined due to being
1380 * external, thus we need to call raw helper function.
1382 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1386 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1387 .func = bpf_get_stackid_tp,
1389 .ret_type = RET_INTEGER,
1390 .arg1_type = ARG_PTR_TO_CTX,
1391 .arg2_type = ARG_CONST_MAP_PTR,
1392 .arg3_type = ARG_ANYTHING,
1395 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1398 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1400 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1401 (unsigned long) size, flags, 0);
1404 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1405 .func = bpf_get_stack_tp,
1407 .ret_type = RET_INTEGER,
1408 .arg1_type = ARG_PTR_TO_CTX,
1409 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1410 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1411 .arg4_type = ARG_ANYTHING,
1414 static const struct bpf_func_proto *
1415 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1418 case BPF_FUNC_perf_event_output:
1419 return &bpf_perf_event_output_proto_tp;
1420 case BPF_FUNC_get_stackid:
1421 return &bpf_get_stackid_proto_tp;
1422 case BPF_FUNC_get_stack:
1423 return &bpf_get_stack_proto_tp;
1424 case BPF_FUNC_get_attach_cookie:
1425 return &bpf_get_attach_cookie_proto_trace;
1427 return bpf_tracing_func_proto(func_id, prog);
1431 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1432 const struct bpf_prog *prog,
1433 struct bpf_insn_access_aux *info)
1435 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1437 if (type != BPF_READ)
1439 if (off % size != 0)
1442 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1446 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1447 .get_func_proto = tp_prog_func_proto,
1448 .is_valid_access = tp_prog_is_valid_access,
1451 const struct bpf_prog_ops tracepoint_prog_ops = {
1454 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1455 struct bpf_perf_event_value *, buf, u32, size)
1459 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1461 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1467 memset(buf, 0, size);
1471 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1472 .func = bpf_perf_prog_read_value,
1474 .ret_type = RET_INTEGER,
1475 .arg1_type = ARG_PTR_TO_CTX,
1476 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1477 .arg3_type = ARG_CONST_SIZE,
1480 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1481 void *, buf, u32, size, u64, flags)
1483 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1484 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1487 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1490 if (unlikely(!br_stack))
1493 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1494 return br_stack->nr * br_entry_size;
1496 if (!buf || (size % br_entry_size != 0))
1499 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1500 memcpy(buf, br_stack->entries, to_copy);
1505 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1506 .func = bpf_read_branch_records,
1508 .ret_type = RET_INTEGER,
1509 .arg1_type = ARG_PTR_TO_CTX,
1510 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1511 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1512 .arg4_type = ARG_ANYTHING,
1515 static const struct bpf_func_proto *
1516 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1519 case BPF_FUNC_perf_event_output:
1520 return &bpf_perf_event_output_proto_tp;
1521 case BPF_FUNC_get_stackid:
1522 return &bpf_get_stackid_proto_pe;
1523 case BPF_FUNC_get_stack:
1524 return &bpf_get_stack_proto_pe;
1525 case BPF_FUNC_perf_prog_read_value:
1526 return &bpf_perf_prog_read_value_proto;
1527 case BPF_FUNC_read_branch_records:
1528 return &bpf_read_branch_records_proto;
1529 case BPF_FUNC_get_attach_cookie:
1530 return &bpf_get_attach_cookie_proto_pe;
1532 return bpf_tracing_func_proto(func_id, prog);
1537 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1538 * to avoid potential recursive reuse issue when/if tracepoints are added
1539 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1541 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1542 * in normal, irq, and nmi context.
1544 struct bpf_raw_tp_regs {
1545 struct pt_regs regs[3];
1547 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1548 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1549 static struct pt_regs *get_bpf_raw_tp_regs(void)
1551 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1552 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1554 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1555 this_cpu_dec(bpf_raw_tp_nest_level);
1556 return ERR_PTR(-EBUSY);
1559 return &tp_regs->regs[nest_level - 1];
1562 static void put_bpf_raw_tp_regs(void)
1564 this_cpu_dec(bpf_raw_tp_nest_level);
1567 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1568 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1570 struct pt_regs *regs = get_bpf_raw_tp_regs();
1574 return PTR_ERR(regs);
1576 perf_fetch_caller_regs(regs);
1577 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1579 put_bpf_raw_tp_regs();
1583 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1584 .func = bpf_perf_event_output_raw_tp,
1586 .ret_type = RET_INTEGER,
1587 .arg1_type = ARG_PTR_TO_CTX,
1588 .arg2_type = ARG_CONST_MAP_PTR,
1589 .arg3_type = ARG_ANYTHING,
1590 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1591 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1594 extern const struct bpf_func_proto bpf_skb_output_proto;
1595 extern const struct bpf_func_proto bpf_xdp_output_proto;
1596 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1598 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1599 struct bpf_map *, map, u64, flags)
1601 struct pt_regs *regs = get_bpf_raw_tp_regs();
1605 return PTR_ERR(regs);
1607 perf_fetch_caller_regs(regs);
1608 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1609 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1611 put_bpf_raw_tp_regs();
1615 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1616 .func = bpf_get_stackid_raw_tp,
1618 .ret_type = RET_INTEGER,
1619 .arg1_type = ARG_PTR_TO_CTX,
1620 .arg2_type = ARG_CONST_MAP_PTR,
1621 .arg3_type = ARG_ANYTHING,
1624 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1625 void *, buf, u32, size, u64, flags)
1627 struct pt_regs *regs = get_bpf_raw_tp_regs();
1631 return PTR_ERR(regs);
1633 perf_fetch_caller_regs(regs);
1634 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1635 (unsigned long) size, flags, 0);
1636 put_bpf_raw_tp_regs();
1640 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1641 .func = bpf_get_stack_raw_tp,
1643 .ret_type = RET_INTEGER,
1644 .arg1_type = ARG_PTR_TO_CTX,
1645 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1646 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1647 .arg4_type = ARG_ANYTHING,
1650 static const struct bpf_func_proto *
1651 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1654 case BPF_FUNC_perf_event_output:
1655 return &bpf_perf_event_output_proto_raw_tp;
1656 case BPF_FUNC_get_stackid:
1657 return &bpf_get_stackid_proto_raw_tp;
1658 case BPF_FUNC_get_stack:
1659 return &bpf_get_stack_proto_raw_tp;
1661 return bpf_tracing_func_proto(func_id, prog);
1665 const struct bpf_func_proto *
1666 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1668 const struct bpf_func_proto *fn;
1672 case BPF_FUNC_skb_output:
1673 return &bpf_skb_output_proto;
1674 case BPF_FUNC_xdp_output:
1675 return &bpf_xdp_output_proto;
1676 case BPF_FUNC_skc_to_tcp6_sock:
1677 return &bpf_skc_to_tcp6_sock_proto;
1678 case BPF_FUNC_skc_to_tcp_sock:
1679 return &bpf_skc_to_tcp_sock_proto;
1680 case BPF_FUNC_skc_to_tcp_timewait_sock:
1681 return &bpf_skc_to_tcp_timewait_sock_proto;
1682 case BPF_FUNC_skc_to_tcp_request_sock:
1683 return &bpf_skc_to_tcp_request_sock_proto;
1684 case BPF_FUNC_skc_to_udp6_sock:
1685 return &bpf_skc_to_udp6_sock_proto;
1686 case BPF_FUNC_skc_to_unix_sock:
1687 return &bpf_skc_to_unix_sock_proto;
1688 case BPF_FUNC_sk_storage_get:
1689 return &bpf_sk_storage_get_tracing_proto;
1690 case BPF_FUNC_sk_storage_delete:
1691 return &bpf_sk_storage_delete_tracing_proto;
1692 case BPF_FUNC_sock_from_file:
1693 return &bpf_sock_from_file_proto;
1694 case BPF_FUNC_get_socket_cookie:
1695 return &bpf_get_socket_ptr_cookie_proto;
1696 case BPF_FUNC_xdp_get_buff_len:
1697 return &bpf_xdp_get_buff_len_trace_proto;
1699 case BPF_FUNC_seq_printf:
1700 return prog->expected_attach_type == BPF_TRACE_ITER ?
1701 &bpf_seq_printf_proto :
1703 case BPF_FUNC_seq_write:
1704 return prog->expected_attach_type == BPF_TRACE_ITER ?
1705 &bpf_seq_write_proto :
1707 case BPF_FUNC_seq_printf_btf:
1708 return prog->expected_attach_type == BPF_TRACE_ITER ?
1709 &bpf_seq_printf_btf_proto :
1711 case BPF_FUNC_d_path:
1712 return &bpf_d_path_proto;
1713 case BPF_FUNC_get_func_arg:
1714 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1715 case BPF_FUNC_get_func_ret:
1716 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1717 case BPF_FUNC_get_func_arg_cnt:
1718 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1720 fn = raw_tp_prog_func_proto(func_id, prog);
1721 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1722 fn = bpf_iter_get_func_proto(func_id, prog);
1727 static bool raw_tp_prog_is_valid_access(int off, int size,
1728 enum bpf_access_type type,
1729 const struct bpf_prog *prog,
1730 struct bpf_insn_access_aux *info)
1732 return bpf_tracing_ctx_access(off, size, type);
1735 static bool tracing_prog_is_valid_access(int off, int size,
1736 enum bpf_access_type type,
1737 const struct bpf_prog *prog,
1738 struct bpf_insn_access_aux *info)
1740 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1743 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1744 const union bpf_attr *kattr,
1745 union bpf_attr __user *uattr)
1750 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1751 .get_func_proto = raw_tp_prog_func_proto,
1752 .is_valid_access = raw_tp_prog_is_valid_access,
1755 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1757 .test_run = bpf_prog_test_run_raw_tp,
1761 const struct bpf_verifier_ops tracing_verifier_ops = {
1762 .get_func_proto = tracing_prog_func_proto,
1763 .is_valid_access = tracing_prog_is_valid_access,
1766 const struct bpf_prog_ops tracing_prog_ops = {
1767 .test_run = bpf_prog_test_run_tracing,
1770 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1771 enum bpf_access_type type,
1772 const struct bpf_prog *prog,
1773 struct bpf_insn_access_aux *info)
1776 if (size != sizeof(u64) || type != BPF_READ)
1778 info->reg_type = PTR_TO_TP_BUFFER;
1780 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1783 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1784 .get_func_proto = raw_tp_prog_func_proto,
1785 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1788 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1791 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1792 const struct bpf_prog *prog,
1793 struct bpf_insn_access_aux *info)
1795 const int size_u64 = sizeof(u64);
1797 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1799 if (type != BPF_READ)
1801 if (off % size != 0) {
1802 if (sizeof(unsigned long) != 4)
1806 if (off % size != 4)
1811 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1812 bpf_ctx_record_field_size(info, size_u64);
1813 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1816 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1817 bpf_ctx_record_field_size(info, size_u64);
1818 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1822 if (size != sizeof(long))
1829 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1830 const struct bpf_insn *si,
1831 struct bpf_insn *insn_buf,
1832 struct bpf_prog *prog, u32 *target_size)
1834 struct bpf_insn *insn = insn_buf;
1837 case offsetof(struct bpf_perf_event_data, sample_period):
1838 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1839 data), si->dst_reg, si->src_reg,
1840 offsetof(struct bpf_perf_event_data_kern, data));
1841 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1842 bpf_target_off(struct perf_sample_data, period, 8,
1845 case offsetof(struct bpf_perf_event_data, addr):
1846 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1847 data), si->dst_reg, si->src_reg,
1848 offsetof(struct bpf_perf_event_data_kern, data));
1849 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1850 bpf_target_off(struct perf_sample_data, addr, 8,
1854 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1855 regs), si->dst_reg, si->src_reg,
1856 offsetof(struct bpf_perf_event_data_kern, regs));
1857 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1862 return insn - insn_buf;
1865 const struct bpf_verifier_ops perf_event_verifier_ops = {
1866 .get_func_proto = pe_prog_func_proto,
1867 .is_valid_access = pe_prog_is_valid_access,
1868 .convert_ctx_access = pe_prog_convert_ctx_access,
1871 const struct bpf_prog_ops perf_event_prog_ops = {
1874 static DEFINE_MUTEX(bpf_event_mutex);
1876 #define BPF_TRACE_MAX_PROGS 64
1878 int perf_event_attach_bpf_prog(struct perf_event *event,
1879 struct bpf_prog *prog,
1882 struct bpf_prog_array *old_array;
1883 struct bpf_prog_array *new_array;
1887 * Kprobe override only works if they are on the function entry,
1888 * and only if they are on the opt-in list.
1890 if (prog->kprobe_override &&
1891 (!trace_kprobe_on_func_entry(event->tp_event) ||
1892 !trace_kprobe_error_injectable(event->tp_event)))
1895 mutex_lock(&bpf_event_mutex);
1900 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1902 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1907 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
1911 /* set the new array to event->tp_event and set event->prog */
1913 event->bpf_cookie = bpf_cookie;
1914 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1915 bpf_prog_array_free(old_array);
1918 mutex_unlock(&bpf_event_mutex);
1922 void perf_event_detach_bpf_prog(struct perf_event *event)
1924 struct bpf_prog_array *old_array;
1925 struct bpf_prog_array *new_array;
1928 mutex_lock(&bpf_event_mutex);
1933 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1934 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
1938 bpf_prog_array_delete_safe(old_array, event->prog);
1940 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1941 bpf_prog_array_free(old_array);
1944 bpf_prog_put(event->prog);
1948 mutex_unlock(&bpf_event_mutex);
1951 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1953 struct perf_event_query_bpf __user *uquery = info;
1954 struct perf_event_query_bpf query = {};
1955 struct bpf_prog_array *progs;
1956 u32 *ids, prog_cnt, ids_len;
1959 if (!perfmon_capable())
1961 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1963 if (copy_from_user(&query, uquery, sizeof(query)))
1966 ids_len = query.ids_len;
1967 if (ids_len > BPF_TRACE_MAX_PROGS)
1969 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1973 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1974 * is required when user only wants to check for uquery->prog_cnt.
1975 * There is no need to check for it since the case is handled
1976 * gracefully in bpf_prog_array_copy_info.
1979 mutex_lock(&bpf_event_mutex);
1980 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1981 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1982 mutex_unlock(&bpf_event_mutex);
1984 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1985 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1992 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1993 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1995 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1997 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1999 for (; btp < __stop__bpf_raw_tp; btp++) {
2000 if (!strcmp(btp->tp->name, name))
2004 return bpf_get_raw_tracepoint_module(name);
2007 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2012 mod = __module_address((unsigned long)btp);
2017 static __always_inline
2018 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2022 (void) bpf_prog_run(prog, args);
2026 #define UNPACK(...) __VA_ARGS__
2027 #define REPEAT_1(FN, DL, X, ...) FN(X)
2028 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2029 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2030 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2031 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2032 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2033 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2034 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2035 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2036 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2037 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2038 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2039 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2041 #define SARG(X) u64 arg##X
2042 #define COPY(X) args[X] = arg##X
2044 #define __DL_COM (,)
2045 #define __DL_SEM (;)
2047 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2049 #define BPF_TRACE_DEFN_x(x) \
2050 void bpf_trace_run##x(struct bpf_prog *prog, \
2051 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2054 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2055 __bpf_trace_run(prog, args); \
2057 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2058 BPF_TRACE_DEFN_x(1);
2059 BPF_TRACE_DEFN_x(2);
2060 BPF_TRACE_DEFN_x(3);
2061 BPF_TRACE_DEFN_x(4);
2062 BPF_TRACE_DEFN_x(5);
2063 BPF_TRACE_DEFN_x(6);
2064 BPF_TRACE_DEFN_x(7);
2065 BPF_TRACE_DEFN_x(8);
2066 BPF_TRACE_DEFN_x(9);
2067 BPF_TRACE_DEFN_x(10);
2068 BPF_TRACE_DEFN_x(11);
2069 BPF_TRACE_DEFN_x(12);
2071 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2073 struct tracepoint *tp = btp->tp;
2076 * check that program doesn't access arguments beyond what's
2077 * available in this tracepoint
2079 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2082 if (prog->aux->max_tp_access > btp->writable_size)
2085 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2089 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2091 return __bpf_probe_register(btp, prog);
2094 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2096 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2099 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2100 u32 *fd_type, const char **buf,
2101 u64 *probe_offset, u64 *probe_addr)
2103 bool is_tracepoint, is_syscall_tp;
2104 struct bpf_prog *prog;
2111 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2112 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2115 *prog_id = prog->aux->id;
2116 flags = event->tp_event->flags;
2117 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2118 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2120 if (is_tracepoint || is_syscall_tp) {
2121 *buf = is_tracepoint ? event->tp_event->tp->name
2122 : event->tp_event->name;
2123 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2124 *probe_offset = 0x0;
2129 #ifdef CONFIG_KPROBE_EVENTS
2130 if (flags & TRACE_EVENT_FL_KPROBE)
2131 err = bpf_get_kprobe_info(event, fd_type, buf,
2132 probe_offset, probe_addr,
2133 event->attr.type == PERF_TYPE_TRACEPOINT);
2135 #ifdef CONFIG_UPROBE_EVENTS
2136 if (flags & TRACE_EVENT_FL_UPROBE)
2137 err = bpf_get_uprobe_info(event, fd_type, buf,
2139 event->attr.type == PERF_TYPE_TRACEPOINT);
2146 static int __init send_signal_irq_work_init(void)
2149 struct send_signal_irq_work *work;
2151 for_each_possible_cpu(cpu) {
2152 work = per_cpu_ptr(&send_signal_work, cpu);
2153 init_irq_work(&work->irq_work, do_bpf_send_signal);
2158 subsys_initcall(send_signal_irq_work_init);
2160 #ifdef CONFIG_MODULES
2161 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2164 struct bpf_trace_module *btm, *tmp;
2165 struct module *mod = module;
2168 if (mod->num_bpf_raw_events == 0 ||
2169 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2172 mutex_lock(&bpf_module_mutex);
2175 case MODULE_STATE_COMING:
2176 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2178 btm->module = module;
2179 list_add(&btm->list, &bpf_trace_modules);
2184 case MODULE_STATE_GOING:
2185 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2186 if (btm->module == module) {
2187 list_del(&btm->list);
2195 mutex_unlock(&bpf_module_mutex);
2198 return notifier_from_errno(ret);
2201 static struct notifier_block bpf_module_nb = {
2202 .notifier_call = bpf_event_notify,
2205 static int __init bpf_event_init(void)
2207 register_module_notifier(&bpf_module_nb);
2211 fs_initcall(bpf_event_init);
2212 #endif /* CONFIG_MODULES */
2214 #ifdef CONFIG_FPROBE
2215 struct bpf_kprobe_multi_link {
2216 struct bpf_link link;
2218 unsigned long *addrs;
2223 struct bpf_kprobe_multi_run_ctx {
2224 struct bpf_run_ctx run_ctx;
2225 struct bpf_kprobe_multi_link *link;
2226 unsigned long entry_ip;
2229 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2231 struct bpf_kprobe_multi_link *kmulti_link;
2233 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2234 unregister_fprobe(&kmulti_link->fp);
2237 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2239 struct bpf_kprobe_multi_link *kmulti_link;
2241 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2242 kvfree(kmulti_link->addrs);
2243 kvfree(kmulti_link->cookies);
2247 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2248 .release = bpf_kprobe_multi_link_release,
2249 .dealloc = bpf_kprobe_multi_link_dealloc,
2252 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2254 const struct bpf_kprobe_multi_link *link = priv;
2255 unsigned long *addr_a = a, *addr_b = b;
2256 u64 *cookie_a, *cookie_b;
2260 cookie_a = link->cookies + (addr_a - link->addrs);
2261 cookie_b = link->cookies + (addr_b - link->addrs);
2263 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2264 tmp1 = *addr_a; *addr_a = *addr_b; *addr_b = tmp1;
2265 tmp2 = *cookie_a; *cookie_a = *cookie_b; *cookie_b = tmp2;
2268 static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b)
2270 const unsigned long *addr_a = a, *addr_b = b;
2272 if (*addr_a == *addr_b)
2274 return *addr_a < *addr_b ? -1 : 1;
2277 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2279 return __bpf_kprobe_multi_cookie_cmp(a, b);
2282 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2284 struct bpf_kprobe_multi_run_ctx *run_ctx;
2285 struct bpf_kprobe_multi_link *link;
2286 u64 *cookie, entry_ip;
2287 unsigned long *addr;
2289 if (WARN_ON_ONCE(!ctx))
2291 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2292 link = run_ctx->link;
2295 entry_ip = run_ctx->entry_ip;
2296 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2297 __bpf_kprobe_multi_cookie_cmp);
2300 cookie = link->cookies + (addr - link->addrs);
2304 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2306 struct bpf_kprobe_multi_run_ctx *run_ctx;
2308 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2309 return run_ctx->entry_ip;
2313 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2314 unsigned long entry_ip, struct pt_regs *regs)
2316 struct bpf_kprobe_multi_run_ctx run_ctx = {
2318 .entry_ip = entry_ip,
2320 struct bpf_run_ctx *old_run_ctx;
2323 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2330 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2331 err = bpf_prog_run(link->link.prog, regs);
2332 bpf_reset_run_ctx(old_run_ctx);
2337 __this_cpu_dec(bpf_prog_active);
2342 kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
2343 struct pt_regs *regs)
2345 struct bpf_kprobe_multi_link *link;
2347 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2348 kprobe_multi_link_prog_run(link, entry_ip, regs);
2352 kprobe_multi_resolve_syms(const void *usyms, u32 cnt,
2353 unsigned long *addrs)
2355 unsigned long addr, size;
2361 size = cnt * sizeof(*syms);
2362 syms = kvzalloc(size, GFP_KERNEL);
2366 func = kmalloc(KSYM_NAME_LEN, GFP_KERNEL);
2370 if (copy_from_user(syms, usyms, size)) {
2375 for (i = 0; i < cnt; i++) {
2376 err = strncpy_from_user(func, syms[i], KSYM_NAME_LEN);
2377 if (err == KSYM_NAME_LEN)
2382 addr = kallsyms_lookup_name(func);
2385 if (!kallsyms_lookup_size_offset(addr, &size, NULL))
2387 addr = ftrace_location_range(addr, addr + size - 1);
2400 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2402 struct bpf_kprobe_multi_link *link = NULL;
2403 struct bpf_link_primer link_primer;
2404 void __user *ucookies;
2405 unsigned long *addrs;
2406 u32 flags, cnt, size;
2407 void __user *uaddrs;
2408 u64 *cookies = NULL;
2412 /* no support for 32bit archs yet */
2413 if (sizeof(u64) != sizeof(void *))
2416 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2419 flags = attr->link_create.kprobe_multi.flags;
2420 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2423 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2424 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2425 if (!!uaddrs == !!usyms)
2428 cnt = attr->link_create.kprobe_multi.cnt;
2432 size = cnt * sizeof(*addrs);
2433 addrs = kvmalloc(size, GFP_KERNEL);
2438 if (copy_from_user(addrs, uaddrs, size)) {
2443 err = kprobe_multi_resolve_syms(usyms, cnt, addrs);
2448 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2450 cookies = kvmalloc(size, GFP_KERNEL);
2455 if (copy_from_user(cookies, ucookies, size)) {
2461 link = kzalloc(sizeof(*link), GFP_KERNEL);
2467 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2468 &bpf_kprobe_multi_link_lops, prog);
2470 err = bpf_link_prime(&link->link, &link_primer);
2474 if (flags & BPF_F_KPROBE_MULTI_RETURN)
2475 link->fp.exit_handler = kprobe_multi_link_handler;
2477 link->fp.entry_handler = kprobe_multi_link_handler;
2479 link->addrs = addrs;
2480 link->cookies = cookies;
2485 * Sorting addresses will trigger sorting cookies as well
2486 * (check bpf_kprobe_multi_cookie_swap). This way we can
2487 * find cookie based on the address in bpf_get_attach_cookie
2490 sort_r(addrs, cnt, sizeof(*addrs),
2491 bpf_kprobe_multi_cookie_cmp,
2492 bpf_kprobe_multi_cookie_swap,
2496 err = register_fprobe_ips(&link->fp, addrs, cnt);
2498 bpf_link_cleanup(&link_primer);
2502 return bpf_link_settle(&link_primer);
2510 #else /* !CONFIG_FPROBE */
2511 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2515 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2519 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)