1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
21 #include <net/bpf_sk_storage.h>
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
28 #include "trace_probe.h"
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
34 #define bpf_event_rcu_dereference(p) \
35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
38 struct bpf_trace_module {
39 struct module *module;
40 struct list_head list;
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
48 struct bpf_raw_event_map *btp, *ret = NULL;
49 struct bpf_trace_module *btm;
52 mutex_lock(&bpf_module_mutex);
53 list_for_each_entry(btm, &bpf_trace_modules, list) {
54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 btp = &btm->module->bpf_raw_events[i];
56 if (!strcmp(btp->tp->name, name)) {
57 if (try_module_get(btm->module))
64 mutex_unlock(&bpf_module_mutex);
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
72 #endif /* CONFIG_MODULES */
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 u64 flags, const struct btf **btf,
82 * trace_call_bpf - invoke BPF program
83 * @call: tracepoint event
84 * @ctx: opaque context pointer
86 * kprobe handlers execute BPF programs via this helper.
87 * Can be used from static tracepoints in the future.
89 * Return: BPF programs always return an integer which is interpreted by
91 * 0 - return from kprobe (event is filtered out)
92 * 1 - store kprobe event into ring buffer
93 * Other values are reserved and currently alias to 1
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
116 * a heuristic to speed up execution.
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
130 __this_cpu_dec(bpf_prog_active);
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
138 regs_set_return_value(regs, rc);
139 override_function_with_return(regs);
143 static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170 .func = bpf_probe_read_user,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
185 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 * terminator into `dst`.
188 * strncpy_from_user() does long-sized strides in the fast path. If the
189 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 * and keys a hash map with it, then semantically identical strings can
192 * occupy multiple entries in the map.
194 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 if (unlikely(ret < 0))
196 memset(dst, 0, size);
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 const void __user *, unsafe_ptr)
203 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207 .func = bpf_probe_read_user_str,
209 .ret_type = RET_INTEGER,
210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
212 .arg3_type = ARG_ANYTHING,
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
218 int ret = security_locked_down(LOCKDOWN_BPF_READ);
220 if (unlikely(ret < 0))
222 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
223 if (unlikely(ret < 0))
227 memset(dst, 0, size);
231 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232 const void *, unsafe_ptr)
234 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
237 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
238 .func = bpf_probe_read_kernel,
240 .ret_type = RET_INTEGER,
241 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
242 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
243 .arg3_type = ARG_ANYTHING,
246 static __always_inline int
247 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
249 int ret = security_locked_down(LOCKDOWN_BPF_READ);
251 if (unlikely(ret < 0))
255 * The strncpy_from_kernel_nofault() call will likely not fill the
256 * entire buffer, but that's okay in this circumstance as we're probing
257 * arbitrary memory anyway similar to bpf_probe_read_*() and might
258 * as well probe the stack. Thus, memory is explicitly cleared
259 * only in error case, so that improper users ignoring return
260 * code altogether don't copy garbage; otherwise length of string
261 * is returned that can be used for bpf_perf_event_output() et al.
263 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
264 if (unlikely(ret < 0))
269 memset(dst, 0, size);
273 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
274 const void *, unsafe_ptr)
276 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
279 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
280 .func = bpf_probe_read_kernel_str,
282 .ret_type = RET_INTEGER,
283 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
284 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
285 .arg3_type = ARG_ANYTHING,
288 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
289 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
290 const void *, unsafe_ptr)
292 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
293 return bpf_probe_read_user_common(dst, size,
294 (__force void __user *)unsafe_ptr);
296 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
299 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
300 .func = bpf_probe_read_compat,
302 .ret_type = RET_INTEGER,
303 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
304 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
305 .arg3_type = ARG_ANYTHING,
308 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
309 const void *, unsafe_ptr)
311 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
312 return bpf_probe_read_user_str_common(dst, size,
313 (__force void __user *)unsafe_ptr);
315 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
318 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
319 .func = bpf_probe_read_compat_str,
321 .ret_type = RET_INTEGER,
322 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
323 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
324 .arg3_type = ARG_ANYTHING,
326 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
328 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
332 * Ensure we're in user context which is safe for the helper to
333 * run. This helper has no business in a kthread.
335 * access_ok() should prevent writing to non-user memory, but in
336 * some situations (nommu, temporary switch, etc) access_ok() does
337 * not provide enough validation, hence the check on KERNEL_DS.
339 * nmi_uaccess_okay() ensures the probe is not run in an interim
340 * state, when the task or mm are switched. This is specifically
341 * required to prevent the use of temporary mm.
344 if (unlikely(in_interrupt() ||
345 current->flags & (PF_KTHREAD | PF_EXITING)))
347 if (unlikely(uaccess_kernel()))
349 if (unlikely(!nmi_uaccess_okay()))
352 return copy_to_user_nofault(unsafe_ptr, src, size);
355 static const struct bpf_func_proto bpf_probe_write_user_proto = {
356 .func = bpf_probe_write_user,
358 .ret_type = RET_INTEGER,
359 .arg1_type = ARG_ANYTHING,
360 .arg2_type = ARG_PTR_TO_MEM,
361 .arg3_type = ARG_CONST_SIZE,
364 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
366 if (!capable(CAP_SYS_ADMIN))
369 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
370 current->comm, task_pid_nr(current));
372 return &bpf_probe_write_user_proto;
375 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
377 #define MAX_TRACE_PRINTK_VARARGS 3
378 #define BPF_TRACE_PRINTK_SIZE 1024
380 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
381 u64, arg2, u64, arg3)
383 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
385 static char buf[BPF_TRACE_PRINTK_SIZE];
389 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
390 MAX_TRACE_PRINTK_VARARGS);
394 raw_spin_lock_irqsave(&trace_printk_lock, flags);
395 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
397 trace_bpf_trace_printk(buf);
398 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
400 bpf_bprintf_cleanup();
405 static const struct bpf_func_proto bpf_trace_printk_proto = {
406 .func = bpf_trace_printk,
408 .ret_type = RET_INTEGER,
409 .arg1_type = ARG_PTR_TO_MEM,
410 .arg2_type = ARG_CONST_SIZE,
413 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
416 * This program might be calling bpf_trace_printk,
417 * so enable the associated bpf_trace/bpf_trace_printk event.
418 * Repeat this each time as it is possible a user has
419 * disabled bpf_trace_printk events. By loading a program
420 * calling bpf_trace_printk() however the user has expressed
421 * the intent to see such events.
423 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
424 pr_warn_ratelimited("could not enable bpf_trace_printk events");
426 return &bpf_trace_printk_proto;
429 #define MAX_SEQ_PRINTF_VARARGS 12
431 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
432 const void *, data, u32, data_len)
437 if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
440 num_args = data_len / 8;
442 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
446 seq_bprintf(m, fmt, bin_args);
448 bpf_bprintf_cleanup();
450 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
453 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
455 static const struct bpf_func_proto bpf_seq_printf_proto = {
456 .func = bpf_seq_printf,
458 .ret_type = RET_INTEGER,
459 .arg1_type = ARG_PTR_TO_BTF_ID,
460 .arg1_btf_id = &btf_seq_file_ids[0],
461 .arg2_type = ARG_PTR_TO_MEM,
462 .arg3_type = ARG_CONST_SIZE,
463 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
464 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
467 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
469 return seq_write(m, data, len) ? -EOVERFLOW : 0;
472 static const struct bpf_func_proto bpf_seq_write_proto = {
473 .func = bpf_seq_write,
475 .ret_type = RET_INTEGER,
476 .arg1_type = ARG_PTR_TO_BTF_ID,
477 .arg1_btf_id = &btf_seq_file_ids[0],
478 .arg2_type = ARG_PTR_TO_MEM,
479 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
482 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
483 u32, btf_ptr_size, u64, flags)
485 const struct btf *btf;
489 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
493 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
496 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
497 .func = bpf_seq_printf_btf,
499 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_BTF_ID,
501 .arg1_btf_id = &btf_seq_file_ids[0],
502 .arg2_type = ARG_PTR_TO_MEM,
503 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
504 .arg4_type = ARG_ANYTHING,
507 static __always_inline int
508 get_map_perf_counter(struct bpf_map *map, u64 flags,
509 u64 *value, u64 *enabled, u64 *running)
511 struct bpf_array *array = container_of(map, struct bpf_array, map);
512 unsigned int cpu = smp_processor_id();
513 u64 index = flags & BPF_F_INDEX_MASK;
514 struct bpf_event_entry *ee;
516 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
518 if (index == BPF_F_CURRENT_CPU)
520 if (unlikely(index >= array->map.max_entries))
523 ee = READ_ONCE(array->ptrs[index]);
527 return perf_event_read_local(ee->event, value, enabled, running);
530 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
535 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
537 * this api is ugly since we miss [-22..-2] range of valid
538 * counter values, but that's uapi
545 static const struct bpf_func_proto bpf_perf_event_read_proto = {
546 .func = bpf_perf_event_read,
548 .ret_type = RET_INTEGER,
549 .arg1_type = ARG_CONST_MAP_PTR,
550 .arg2_type = ARG_ANYTHING,
553 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
554 struct bpf_perf_event_value *, buf, u32, size)
558 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
560 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
566 memset(buf, 0, size);
570 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
571 .func = bpf_perf_event_read_value,
573 .ret_type = RET_INTEGER,
574 .arg1_type = ARG_CONST_MAP_PTR,
575 .arg2_type = ARG_ANYTHING,
576 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
577 .arg4_type = ARG_CONST_SIZE,
580 static __always_inline u64
581 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
582 u64 flags, struct perf_sample_data *sd)
584 struct bpf_array *array = container_of(map, struct bpf_array, map);
585 unsigned int cpu = smp_processor_id();
586 u64 index = flags & BPF_F_INDEX_MASK;
587 struct bpf_event_entry *ee;
588 struct perf_event *event;
590 if (index == BPF_F_CURRENT_CPU)
592 if (unlikely(index >= array->map.max_entries))
595 ee = READ_ONCE(array->ptrs[index]);
600 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
601 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
604 if (unlikely(event->oncpu != cpu))
607 return perf_event_output(event, sd, regs);
611 * Support executing tracepoints in normal, irq, and nmi context that each call
612 * bpf_perf_event_output
614 struct bpf_trace_sample_data {
615 struct perf_sample_data sds[3];
618 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
619 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
620 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
621 u64, flags, void *, data, u64, size)
623 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
624 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
625 struct perf_raw_record raw = {
631 struct perf_sample_data *sd;
634 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
639 sd = &sds->sds[nest_level - 1];
641 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
646 perf_sample_data_init(sd, 0, 0);
649 err = __bpf_perf_event_output(regs, map, flags, sd);
652 this_cpu_dec(bpf_trace_nest_level);
656 static const struct bpf_func_proto bpf_perf_event_output_proto = {
657 .func = bpf_perf_event_output,
659 .ret_type = RET_INTEGER,
660 .arg1_type = ARG_PTR_TO_CTX,
661 .arg2_type = ARG_CONST_MAP_PTR,
662 .arg3_type = ARG_ANYTHING,
663 .arg4_type = ARG_PTR_TO_MEM,
664 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
667 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
668 struct bpf_nested_pt_regs {
669 struct pt_regs regs[3];
671 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
672 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
674 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
675 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
677 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
678 struct perf_raw_frag frag = {
683 struct perf_raw_record raw = {
686 .next = ctx_size ? &frag : NULL,
692 struct perf_sample_data *sd;
693 struct pt_regs *regs;
696 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
700 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
701 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
703 perf_fetch_caller_regs(regs);
704 perf_sample_data_init(sd, 0, 0);
707 ret = __bpf_perf_event_output(regs, map, flags, sd);
709 this_cpu_dec(bpf_event_output_nest_level);
713 BPF_CALL_0(bpf_get_current_task)
715 return (long) current;
718 const struct bpf_func_proto bpf_get_current_task_proto = {
719 .func = bpf_get_current_task,
721 .ret_type = RET_INTEGER,
724 BPF_CALL_0(bpf_get_current_task_btf)
726 return (unsigned long) current;
729 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
731 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
732 .func = bpf_get_current_task_btf,
734 .ret_type = RET_PTR_TO_BTF_ID,
735 .ret_btf_id = &bpf_get_current_btf_ids[0],
738 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
740 struct bpf_array *array = container_of(map, struct bpf_array, map);
743 if (unlikely(idx >= array->map.max_entries))
746 cgrp = READ_ONCE(array->ptrs[idx]);
750 return task_under_cgroup_hierarchy(current, cgrp);
753 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
754 .func = bpf_current_task_under_cgroup,
756 .ret_type = RET_INTEGER,
757 .arg1_type = ARG_CONST_MAP_PTR,
758 .arg2_type = ARG_ANYTHING,
761 struct send_signal_irq_work {
762 struct irq_work irq_work;
763 struct task_struct *task;
768 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
770 static void do_bpf_send_signal(struct irq_work *entry)
772 struct send_signal_irq_work *work;
774 work = container_of(entry, struct send_signal_irq_work, irq_work);
775 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
778 static int bpf_send_signal_common(u32 sig, enum pid_type type)
780 struct send_signal_irq_work *work = NULL;
782 /* Similar to bpf_probe_write_user, task needs to be
783 * in a sound condition and kernel memory access be
784 * permitted in order to send signal to the current
787 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
789 if (unlikely(uaccess_kernel()))
791 if (unlikely(!nmi_uaccess_okay()))
794 if (irqs_disabled()) {
795 /* Do an early check on signal validity. Otherwise,
796 * the error is lost in deferred irq_work.
798 if (unlikely(!valid_signal(sig)))
801 work = this_cpu_ptr(&send_signal_work);
802 if (irq_work_is_busy(&work->irq_work))
805 /* Add the current task, which is the target of sending signal,
806 * to the irq_work. The current task may change when queued
807 * irq works get executed.
809 work->task = current;
812 irq_work_queue(&work->irq_work);
816 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
819 BPF_CALL_1(bpf_send_signal, u32, sig)
821 return bpf_send_signal_common(sig, PIDTYPE_TGID);
824 static const struct bpf_func_proto bpf_send_signal_proto = {
825 .func = bpf_send_signal,
827 .ret_type = RET_INTEGER,
828 .arg1_type = ARG_ANYTHING,
831 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
833 return bpf_send_signal_common(sig, PIDTYPE_PID);
836 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
837 .func = bpf_send_signal_thread,
839 .ret_type = RET_INTEGER,
840 .arg1_type = ARG_ANYTHING,
843 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
851 p = d_path(path, buf, sz);
856 memmove(buf, p, len);
862 BTF_SET_START(btf_allowlist_d_path)
863 #ifdef CONFIG_SECURITY
864 BTF_ID(func, security_file_permission)
865 BTF_ID(func, security_inode_getattr)
866 BTF_ID(func, security_file_open)
868 #ifdef CONFIG_SECURITY_PATH
869 BTF_ID(func, security_path_truncate)
871 BTF_ID(func, vfs_truncate)
872 BTF_ID(func, vfs_fallocate)
873 BTF_ID(func, dentry_open)
874 BTF_ID(func, vfs_getattr)
875 BTF_ID(func, filp_close)
876 BTF_SET_END(btf_allowlist_d_path)
878 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
880 if (prog->type == BPF_PROG_TYPE_TRACING &&
881 prog->expected_attach_type == BPF_TRACE_ITER)
884 if (prog->type == BPF_PROG_TYPE_LSM)
885 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
887 return btf_id_set_contains(&btf_allowlist_d_path,
888 prog->aux->attach_btf_id);
891 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
893 static const struct bpf_func_proto bpf_d_path_proto = {
896 .ret_type = RET_INTEGER,
897 .arg1_type = ARG_PTR_TO_BTF_ID,
898 .arg1_btf_id = &bpf_d_path_btf_ids[0],
899 .arg2_type = ARG_PTR_TO_MEM,
900 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
901 .allowed = bpf_d_path_allowed,
904 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
905 BTF_F_PTR_RAW | BTF_F_ZERO)
907 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
908 u64 flags, const struct btf **btf,
911 const struct btf_type *t;
913 if (unlikely(flags & ~(BTF_F_ALL)))
916 if (btf_ptr_size != sizeof(struct btf_ptr))
919 *btf = bpf_get_btf_vmlinux();
921 if (IS_ERR_OR_NULL(*btf))
922 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
924 if (ptr->type_id > 0)
925 *btf_id = ptr->type_id;
930 t = btf_type_by_id(*btf, *btf_id);
931 if (*btf_id <= 0 || !t)
937 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
938 u32, btf_ptr_size, u64, flags)
940 const struct btf *btf;
944 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
948 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
952 const struct bpf_func_proto bpf_snprintf_btf_proto = {
953 .func = bpf_snprintf_btf,
955 .ret_type = RET_INTEGER,
956 .arg1_type = ARG_PTR_TO_MEM,
957 .arg2_type = ARG_CONST_SIZE,
958 .arg3_type = ARG_PTR_TO_MEM,
959 .arg4_type = ARG_CONST_SIZE,
960 .arg5_type = ARG_ANYTHING,
963 const struct bpf_func_proto *
964 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
967 case BPF_FUNC_map_lookup_elem:
968 return &bpf_map_lookup_elem_proto;
969 case BPF_FUNC_map_update_elem:
970 return &bpf_map_update_elem_proto;
971 case BPF_FUNC_map_delete_elem:
972 return &bpf_map_delete_elem_proto;
973 case BPF_FUNC_map_push_elem:
974 return &bpf_map_push_elem_proto;
975 case BPF_FUNC_map_pop_elem:
976 return &bpf_map_pop_elem_proto;
977 case BPF_FUNC_map_peek_elem:
978 return &bpf_map_peek_elem_proto;
979 case BPF_FUNC_ktime_get_ns:
980 return &bpf_ktime_get_ns_proto;
981 case BPF_FUNC_ktime_get_boot_ns:
982 return &bpf_ktime_get_boot_ns_proto;
983 case BPF_FUNC_ktime_get_coarse_ns:
984 return &bpf_ktime_get_coarse_ns_proto;
985 case BPF_FUNC_tail_call:
986 return &bpf_tail_call_proto;
987 case BPF_FUNC_get_current_pid_tgid:
988 return &bpf_get_current_pid_tgid_proto;
989 case BPF_FUNC_get_current_task:
990 return &bpf_get_current_task_proto;
991 case BPF_FUNC_get_current_task_btf:
992 return &bpf_get_current_task_btf_proto;
993 case BPF_FUNC_get_current_uid_gid:
994 return &bpf_get_current_uid_gid_proto;
995 case BPF_FUNC_get_current_comm:
996 return &bpf_get_current_comm_proto;
997 case BPF_FUNC_trace_printk:
998 return bpf_get_trace_printk_proto();
999 case BPF_FUNC_get_smp_processor_id:
1000 return &bpf_get_smp_processor_id_proto;
1001 case BPF_FUNC_get_numa_node_id:
1002 return &bpf_get_numa_node_id_proto;
1003 case BPF_FUNC_perf_event_read:
1004 return &bpf_perf_event_read_proto;
1005 case BPF_FUNC_probe_write_user:
1006 return bpf_get_probe_write_proto();
1007 case BPF_FUNC_current_task_under_cgroup:
1008 return &bpf_current_task_under_cgroup_proto;
1009 case BPF_FUNC_get_prandom_u32:
1010 return &bpf_get_prandom_u32_proto;
1011 case BPF_FUNC_probe_read_user:
1012 return &bpf_probe_read_user_proto;
1013 case BPF_FUNC_probe_read_kernel:
1014 return &bpf_probe_read_kernel_proto;
1015 case BPF_FUNC_probe_read_user_str:
1016 return &bpf_probe_read_user_str_proto;
1017 case BPF_FUNC_probe_read_kernel_str:
1018 return &bpf_probe_read_kernel_str_proto;
1019 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1020 case BPF_FUNC_probe_read:
1021 return &bpf_probe_read_compat_proto;
1022 case BPF_FUNC_probe_read_str:
1023 return &bpf_probe_read_compat_str_proto;
1025 #ifdef CONFIG_CGROUPS
1026 case BPF_FUNC_get_current_cgroup_id:
1027 return &bpf_get_current_cgroup_id_proto;
1029 case BPF_FUNC_send_signal:
1030 return &bpf_send_signal_proto;
1031 case BPF_FUNC_send_signal_thread:
1032 return &bpf_send_signal_thread_proto;
1033 case BPF_FUNC_perf_event_read_value:
1034 return &bpf_perf_event_read_value_proto;
1035 case BPF_FUNC_get_ns_current_pid_tgid:
1036 return &bpf_get_ns_current_pid_tgid_proto;
1037 case BPF_FUNC_ringbuf_output:
1038 return &bpf_ringbuf_output_proto;
1039 case BPF_FUNC_ringbuf_reserve:
1040 return &bpf_ringbuf_reserve_proto;
1041 case BPF_FUNC_ringbuf_submit:
1042 return &bpf_ringbuf_submit_proto;
1043 case BPF_FUNC_ringbuf_discard:
1044 return &bpf_ringbuf_discard_proto;
1045 case BPF_FUNC_ringbuf_query:
1046 return &bpf_ringbuf_query_proto;
1047 case BPF_FUNC_jiffies64:
1048 return &bpf_jiffies64_proto;
1049 case BPF_FUNC_get_task_stack:
1050 return &bpf_get_task_stack_proto;
1051 case BPF_FUNC_copy_from_user:
1052 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1053 case BPF_FUNC_snprintf_btf:
1054 return &bpf_snprintf_btf_proto;
1055 case BPF_FUNC_per_cpu_ptr:
1056 return &bpf_per_cpu_ptr_proto;
1057 case BPF_FUNC_this_cpu_ptr:
1058 return &bpf_this_cpu_ptr_proto;
1059 case BPF_FUNC_task_storage_get:
1060 return &bpf_task_storage_get_proto;
1061 case BPF_FUNC_task_storage_delete:
1062 return &bpf_task_storage_delete_proto;
1063 case BPF_FUNC_for_each_map_elem:
1064 return &bpf_for_each_map_elem_proto;
1065 case BPF_FUNC_snprintf:
1066 return &bpf_snprintf_proto;
1072 static const struct bpf_func_proto *
1073 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1076 case BPF_FUNC_perf_event_output:
1077 return &bpf_perf_event_output_proto;
1078 case BPF_FUNC_get_stackid:
1079 return &bpf_get_stackid_proto;
1080 case BPF_FUNC_get_stack:
1081 return &bpf_get_stack_proto;
1082 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1083 case BPF_FUNC_override_return:
1084 return &bpf_override_return_proto;
1087 return bpf_tracing_func_proto(func_id, prog);
1091 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1092 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1093 const struct bpf_prog *prog,
1094 struct bpf_insn_access_aux *info)
1096 if (off < 0 || off >= sizeof(struct pt_regs))
1098 if (type != BPF_READ)
1100 if (off % size != 0)
1103 * Assertion for 32 bit to make sure last 8 byte access
1104 * (BPF_DW) to the last 4 byte member is disallowed.
1106 if (off + size > sizeof(struct pt_regs))
1112 const struct bpf_verifier_ops kprobe_verifier_ops = {
1113 .get_func_proto = kprobe_prog_func_proto,
1114 .is_valid_access = kprobe_prog_is_valid_access,
1117 const struct bpf_prog_ops kprobe_prog_ops = {
1120 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1121 u64, flags, void *, data, u64, size)
1123 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1126 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1127 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1128 * from there and call the same bpf_perf_event_output() helper inline.
1130 return ____bpf_perf_event_output(regs, map, flags, data, size);
1133 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1134 .func = bpf_perf_event_output_tp,
1136 .ret_type = RET_INTEGER,
1137 .arg1_type = ARG_PTR_TO_CTX,
1138 .arg2_type = ARG_CONST_MAP_PTR,
1139 .arg3_type = ARG_ANYTHING,
1140 .arg4_type = ARG_PTR_TO_MEM,
1141 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1144 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1147 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1150 * Same comment as in bpf_perf_event_output_tp(), only that this time
1151 * the other helper's function body cannot be inlined due to being
1152 * external, thus we need to call raw helper function.
1154 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1158 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1159 .func = bpf_get_stackid_tp,
1161 .ret_type = RET_INTEGER,
1162 .arg1_type = ARG_PTR_TO_CTX,
1163 .arg2_type = ARG_CONST_MAP_PTR,
1164 .arg3_type = ARG_ANYTHING,
1167 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1170 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1172 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1173 (unsigned long) size, flags, 0);
1176 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1177 .func = bpf_get_stack_tp,
1179 .ret_type = RET_INTEGER,
1180 .arg1_type = ARG_PTR_TO_CTX,
1181 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1182 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1183 .arg4_type = ARG_ANYTHING,
1186 static const struct bpf_func_proto *
1187 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1190 case BPF_FUNC_perf_event_output:
1191 return &bpf_perf_event_output_proto_tp;
1192 case BPF_FUNC_get_stackid:
1193 return &bpf_get_stackid_proto_tp;
1194 case BPF_FUNC_get_stack:
1195 return &bpf_get_stack_proto_tp;
1197 return bpf_tracing_func_proto(func_id, prog);
1201 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1202 const struct bpf_prog *prog,
1203 struct bpf_insn_access_aux *info)
1205 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1207 if (type != BPF_READ)
1209 if (off % size != 0)
1212 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1216 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1217 .get_func_proto = tp_prog_func_proto,
1218 .is_valid_access = tp_prog_is_valid_access,
1221 const struct bpf_prog_ops tracepoint_prog_ops = {
1224 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1225 struct bpf_perf_event_value *, buf, u32, size)
1229 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1231 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1237 memset(buf, 0, size);
1241 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1242 .func = bpf_perf_prog_read_value,
1244 .ret_type = RET_INTEGER,
1245 .arg1_type = ARG_PTR_TO_CTX,
1246 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1247 .arg3_type = ARG_CONST_SIZE,
1250 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1251 void *, buf, u32, size, u64, flags)
1256 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1257 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1260 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1263 if (unlikely(!br_stack))
1266 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1267 return br_stack->nr * br_entry_size;
1269 if (!buf || (size % br_entry_size != 0))
1272 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1273 memcpy(buf, br_stack->entries, to_copy);
1279 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1280 .func = bpf_read_branch_records,
1282 .ret_type = RET_INTEGER,
1283 .arg1_type = ARG_PTR_TO_CTX,
1284 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1285 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1286 .arg4_type = ARG_ANYTHING,
1289 static const struct bpf_func_proto *
1290 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1293 case BPF_FUNC_perf_event_output:
1294 return &bpf_perf_event_output_proto_tp;
1295 case BPF_FUNC_get_stackid:
1296 return &bpf_get_stackid_proto_pe;
1297 case BPF_FUNC_get_stack:
1298 return &bpf_get_stack_proto_pe;
1299 case BPF_FUNC_perf_prog_read_value:
1300 return &bpf_perf_prog_read_value_proto;
1301 case BPF_FUNC_read_branch_records:
1302 return &bpf_read_branch_records_proto;
1304 return bpf_tracing_func_proto(func_id, prog);
1309 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1310 * to avoid potential recursive reuse issue when/if tracepoints are added
1311 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1313 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1314 * in normal, irq, and nmi context.
1316 struct bpf_raw_tp_regs {
1317 struct pt_regs regs[3];
1319 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1320 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1321 static struct pt_regs *get_bpf_raw_tp_regs(void)
1323 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1324 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1326 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1327 this_cpu_dec(bpf_raw_tp_nest_level);
1328 return ERR_PTR(-EBUSY);
1331 return &tp_regs->regs[nest_level - 1];
1334 static void put_bpf_raw_tp_regs(void)
1336 this_cpu_dec(bpf_raw_tp_nest_level);
1339 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1340 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1342 struct pt_regs *regs = get_bpf_raw_tp_regs();
1346 return PTR_ERR(regs);
1348 perf_fetch_caller_regs(regs);
1349 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1351 put_bpf_raw_tp_regs();
1355 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1356 .func = bpf_perf_event_output_raw_tp,
1358 .ret_type = RET_INTEGER,
1359 .arg1_type = ARG_PTR_TO_CTX,
1360 .arg2_type = ARG_CONST_MAP_PTR,
1361 .arg3_type = ARG_ANYTHING,
1362 .arg4_type = ARG_PTR_TO_MEM,
1363 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1366 extern const struct bpf_func_proto bpf_skb_output_proto;
1367 extern const struct bpf_func_proto bpf_xdp_output_proto;
1369 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1370 struct bpf_map *, map, u64, flags)
1372 struct pt_regs *regs = get_bpf_raw_tp_regs();
1376 return PTR_ERR(regs);
1378 perf_fetch_caller_regs(regs);
1379 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1380 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1382 put_bpf_raw_tp_regs();
1386 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1387 .func = bpf_get_stackid_raw_tp,
1389 .ret_type = RET_INTEGER,
1390 .arg1_type = ARG_PTR_TO_CTX,
1391 .arg2_type = ARG_CONST_MAP_PTR,
1392 .arg3_type = ARG_ANYTHING,
1395 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1396 void *, buf, u32, size, u64, flags)
1398 struct pt_regs *regs = get_bpf_raw_tp_regs();
1402 return PTR_ERR(regs);
1404 perf_fetch_caller_regs(regs);
1405 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1406 (unsigned long) size, flags, 0);
1407 put_bpf_raw_tp_regs();
1411 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1412 .func = bpf_get_stack_raw_tp,
1414 .ret_type = RET_INTEGER,
1415 .arg1_type = ARG_PTR_TO_CTX,
1416 .arg2_type = ARG_PTR_TO_MEM,
1417 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1418 .arg4_type = ARG_ANYTHING,
1421 static const struct bpf_func_proto *
1422 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1425 case BPF_FUNC_perf_event_output:
1426 return &bpf_perf_event_output_proto_raw_tp;
1427 case BPF_FUNC_get_stackid:
1428 return &bpf_get_stackid_proto_raw_tp;
1429 case BPF_FUNC_get_stack:
1430 return &bpf_get_stack_proto_raw_tp;
1432 return bpf_tracing_func_proto(func_id, prog);
1436 const struct bpf_func_proto *
1437 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1441 case BPF_FUNC_skb_output:
1442 return &bpf_skb_output_proto;
1443 case BPF_FUNC_xdp_output:
1444 return &bpf_xdp_output_proto;
1445 case BPF_FUNC_skc_to_tcp6_sock:
1446 return &bpf_skc_to_tcp6_sock_proto;
1447 case BPF_FUNC_skc_to_tcp_sock:
1448 return &bpf_skc_to_tcp_sock_proto;
1449 case BPF_FUNC_skc_to_tcp_timewait_sock:
1450 return &bpf_skc_to_tcp_timewait_sock_proto;
1451 case BPF_FUNC_skc_to_tcp_request_sock:
1452 return &bpf_skc_to_tcp_request_sock_proto;
1453 case BPF_FUNC_skc_to_udp6_sock:
1454 return &bpf_skc_to_udp6_sock_proto;
1455 case BPF_FUNC_sk_storage_get:
1456 return &bpf_sk_storage_get_tracing_proto;
1457 case BPF_FUNC_sk_storage_delete:
1458 return &bpf_sk_storage_delete_tracing_proto;
1459 case BPF_FUNC_sock_from_file:
1460 return &bpf_sock_from_file_proto;
1461 case BPF_FUNC_get_socket_cookie:
1462 return &bpf_get_socket_ptr_cookie_proto;
1464 case BPF_FUNC_seq_printf:
1465 return prog->expected_attach_type == BPF_TRACE_ITER ?
1466 &bpf_seq_printf_proto :
1468 case BPF_FUNC_seq_write:
1469 return prog->expected_attach_type == BPF_TRACE_ITER ?
1470 &bpf_seq_write_proto :
1472 case BPF_FUNC_seq_printf_btf:
1473 return prog->expected_attach_type == BPF_TRACE_ITER ?
1474 &bpf_seq_printf_btf_proto :
1476 case BPF_FUNC_d_path:
1477 return &bpf_d_path_proto;
1479 return raw_tp_prog_func_proto(func_id, prog);
1483 static bool raw_tp_prog_is_valid_access(int off, int size,
1484 enum bpf_access_type type,
1485 const struct bpf_prog *prog,
1486 struct bpf_insn_access_aux *info)
1488 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1490 if (type != BPF_READ)
1492 if (off % size != 0)
1497 static bool tracing_prog_is_valid_access(int off, int size,
1498 enum bpf_access_type type,
1499 const struct bpf_prog *prog,
1500 struct bpf_insn_access_aux *info)
1502 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1504 if (type != BPF_READ)
1506 if (off % size != 0)
1508 return btf_ctx_access(off, size, type, prog, info);
1511 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1512 const union bpf_attr *kattr,
1513 union bpf_attr __user *uattr)
1518 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1519 .get_func_proto = raw_tp_prog_func_proto,
1520 .is_valid_access = raw_tp_prog_is_valid_access,
1523 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1525 .test_run = bpf_prog_test_run_raw_tp,
1529 const struct bpf_verifier_ops tracing_verifier_ops = {
1530 .get_func_proto = tracing_prog_func_proto,
1531 .is_valid_access = tracing_prog_is_valid_access,
1534 const struct bpf_prog_ops tracing_prog_ops = {
1535 .test_run = bpf_prog_test_run_tracing,
1538 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1539 enum bpf_access_type type,
1540 const struct bpf_prog *prog,
1541 struct bpf_insn_access_aux *info)
1544 if (size != sizeof(u64) || type != BPF_READ)
1546 info->reg_type = PTR_TO_TP_BUFFER;
1548 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1551 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1552 .get_func_proto = raw_tp_prog_func_proto,
1553 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1556 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1559 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1560 const struct bpf_prog *prog,
1561 struct bpf_insn_access_aux *info)
1563 const int size_u64 = sizeof(u64);
1565 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1567 if (type != BPF_READ)
1569 if (off % size != 0) {
1570 if (sizeof(unsigned long) != 4)
1574 if (off % size != 4)
1579 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1580 bpf_ctx_record_field_size(info, size_u64);
1581 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1584 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1585 bpf_ctx_record_field_size(info, size_u64);
1586 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1590 if (size != sizeof(long))
1597 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1598 const struct bpf_insn *si,
1599 struct bpf_insn *insn_buf,
1600 struct bpf_prog *prog, u32 *target_size)
1602 struct bpf_insn *insn = insn_buf;
1605 case offsetof(struct bpf_perf_event_data, sample_period):
1606 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1607 data), si->dst_reg, si->src_reg,
1608 offsetof(struct bpf_perf_event_data_kern, data));
1609 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1610 bpf_target_off(struct perf_sample_data, period, 8,
1613 case offsetof(struct bpf_perf_event_data, addr):
1614 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1615 data), si->dst_reg, si->src_reg,
1616 offsetof(struct bpf_perf_event_data_kern, data));
1617 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1618 bpf_target_off(struct perf_sample_data, addr, 8,
1622 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1623 regs), si->dst_reg, si->src_reg,
1624 offsetof(struct bpf_perf_event_data_kern, regs));
1625 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1630 return insn - insn_buf;
1633 const struct bpf_verifier_ops perf_event_verifier_ops = {
1634 .get_func_proto = pe_prog_func_proto,
1635 .is_valid_access = pe_prog_is_valid_access,
1636 .convert_ctx_access = pe_prog_convert_ctx_access,
1639 const struct bpf_prog_ops perf_event_prog_ops = {
1642 static DEFINE_MUTEX(bpf_event_mutex);
1644 #define BPF_TRACE_MAX_PROGS 64
1646 int perf_event_attach_bpf_prog(struct perf_event *event,
1647 struct bpf_prog *prog)
1649 struct bpf_prog_array *old_array;
1650 struct bpf_prog_array *new_array;
1654 * Kprobe override only works if they are on the function entry,
1655 * and only if they are on the opt-in list.
1657 if (prog->kprobe_override &&
1658 (!trace_kprobe_on_func_entry(event->tp_event) ||
1659 !trace_kprobe_error_injectable(event->tp_event)))
1662 mutex_lock(&bpf_event_mutex);
1667 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1669 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1674 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1678 /* set the new array to event->tp_event and set event->prog */
1680 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1681 bpf_prog_array_free(old_array);
1684 mutex_unlock(&bpf_event_mutex);
1688 void perf_event_detach_bpf_prog(struct perf_event *event)
1690 struct bpf_prog_array *old_array;
1691 struct bpf_prog_array *new_array;
1694 mutex_lock(&bpf_event_mutex);
1699 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1700 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1704 bpf_prog_array_delete_safe(old_array, event->prog);
1706 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1707 bpf_prog_array_free(old_array);
1710 bpf_prog_put(event->prog);
1714 mutex_unlock(&bpf_event_mutex);
1717 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1719 struct perf_event_query_bpf __user *uquery = info;
1720 struct perf_event_query_bpf query = {};
1721 struct bpf_prog_array *progs;
1722 u32 *ids, prog_cnt, ids_len;
1725 if (!perfmon_capable())
1727 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1729 if (copy_from_user(&query, uquery, sizeof(query)))
1732 ids_len = query.ids_len;
1733 if (ids_len > BPF_TRACE_MAX_PROGS)
1735 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1739 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1740 * is required when user only wants to check for uquery->prog_cnt.
1741 * There is no need to check for it since the case is handled
1742 * gracefully in bpf_prog_array_copy_info.
1745 mutex_lock(&bpf_event_mutex);
1746 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1747 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1748 mutex_unlock(&bpf_event_mutex);
1750 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1751 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1758 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1759 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1761 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1763 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1765 for (; btp < __stop__bpf_raw_tp; btp++) {
1766 if (!strcmp(btp->tp->name, name))
1770 return bpf_get_raw_tracepoint_module(name);
1773 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1778 mod = __module_address((unsigned long)btp);
1783 static __always_inline
1784 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1788 (void) BPF_PROG_RUN(prog, args);
1792 #define UNPACK(...) __VA_ARGS__
1793 #define REPEAT_1(FN, DL, X, ...) FN(X)
1794 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1795 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1796 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1797 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1798 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1799 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1800 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1801 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1802 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1803 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1804 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1805 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1807 #define SARG(X) u64 arg##X
1808 #define COPY(X) args[X] = arg##X
1810 #define __DL_COM (,)
1811 #define __DL_SEM (;)
1813 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1815 #define BPF_TRACE_DEFN_x(x) \
1816 void bpf_trace_run##x(struct bpf_prog *prog, \
1817 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1820 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1821 __bpf_trace_run(prog, args); \
1823 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1824 BPF_TRACE_DEFN_x(1);
1825 BPF_TRACE_DEFN_x(2);
1826 BPF_TRACE_DEFN_x(3);
1827 BPF_TRACE_DEFN_x(4);
1828 BPF_TRACE_DEFN_x(5);
1829 BPF_TRACE_DEFN_x(6);
1830 BPF_TRACE_DEFN_x(7);
1831 BPF_TRACE_DEFN_x(8);
1832 BPF_TRACE_DEFN_x(9);
1833 BPF_TRACE_DEFN_x(10);
1834 BPF_TRACE_DEFN_x(11);
1835 BPF_TRACE_DEFN_x(12);
1837 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1839 struct tracepoint *tp = btp->tp;
1842 * check that program doesn't access arguments beyond what's
1843 * available in this tracepoint
1845 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1848 if (prog->aux->max_tp_access > btp->writable_size)
1851 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1854 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1856 return __bpf_probe_register(btp, prog);
1859 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1861 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1864 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1865 u32 *fd_type, const char **buf,
1866 u64 *probe_offset, u64 *probe_addr)
1868 bool is_tracepoint, is_syscall_tp;
1869 struct bpf_prog *prog;
1876 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1877 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1880 *prog_id = prog->aux->id;
1881 flags = event->tp_event->flags;
1882 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1883 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1885 if (is_tracepoint || is_syscall_tp) {
1886 *buf = is_tracepoint ? event->tp_event->tp->name
1887 : event->tp_event->name;
1888 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1889 *probe_offset = 0x0;
1894 #ifdef CONFIG_KPROBE_EVENTS
1895 if (flags & TRACE_EVENT_FL_KPROBE)
1896 err = bpf_get_kprobe_info(event, fd_type, buf,
1897 probe_offset, probe_addr,
1898 event->attr.type == PERF_TYPE_TRACEPOINT);
1900 #ifdef CONFIG_UPROBE_EVENTS
1901 if (flags & TRACE_EVENT_FL_UPROBE)
1902 err = bpf_get_uprobe_info(event, fd_type, buf,
1904 event->attr.type == PERF_TYPE_TRACEPOINT);
1911 static int __init send_signal_irq_work_init(void)
1914 struct send_signal_irq_work *work;
1916 for_each_possible_cpu(cpu) {
1917 work = per_cpu_ptr(&send_signal_work, cpu);
1918 init_irq_work(&work->irq_work, do_bpf_send_signal);
1923 subsys_initcall(send_signal_irq_work_init);
1925 #ifdef CONFIG_MODULES
1926 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1929 struct bpf_trace_module *btm, *tmp;
1930 struct module *mod = module;
1933 if (mod->num_bpf_raw_events == 0 ||
1934 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1937 mutex_lock(&bpf_module_mutex);
1940 case MODULE_STATE_COMING:
1941 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1943 btm->module = module;
1944 list_add(&btm->list, &bpf_trace_modules);
1949 case MODULE_STATE_GOING:
1950 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1951 if (btm->module == module) {
1952 list_del(&btm->list);
1960 mutex_unlock(&bpf_module_mutex);
1963 return notifier_from_errno(ret);
1966 static struct notifier_block bpf_module_nb = {
1967 .notifier_call = bpf_event_notify,
1970 static int __init bpf_event_init(void)
1972 register_module_notifier(&bpf_module_nb);
1976 fs_initcall(bpf_event_init);
1977 #endif /* CONFIG_MODULES */