1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
22 #define UPROBE_EVENT_SYSTEM "uprobes"
24 struct uprobe_trace_entry_head {
25 struct trace_entry ent;
26 unsigned long vaddr[];
29 #define SIZEOF_TRACE_ENTRY(is_return) \
30 (sizeof(struct uprobe_trace_entry_head) + \
31 sizeof(unsigned long) * (is_return ? 2 : 1))
33 #define DATAOF_TRACE_ENTRY(entry, is_return) \
34 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36 struct trace_uprobe_filter {
39 struct list_head perf_events;
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47 int argc, const char **argv, struct dyn_event *ev);
49 static struct dyn_event_operations trace_uprobe_ops = {
50 .create = trace_uprobe_create,
51 .show = trace_uprobe_show,
52 .is_busy = trace_uprobe_is_busy,
53 .free = trace_uprobe_release,
54 .match = trace_uprobe_match,
58 * uprobe event core functions
61 struct dyn_event devent;
62 struct trace_uprobe_filter filter;
63 struct uprobe_consumer consumer;
68 unsigned long ref_ctr_offset;
70 struct trace_probe tp;
73 static bool is_trace_uprobe(struct dyn_event *ev)
75 return ev->ops == &trace_uprobe_ops;
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
80 return container_of(ev, struct trace_uprobe, devent);
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
88 #define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
92 #define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
94 (sizeof(struct probe_arg) * (n)))
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
99 struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
111 return addr - (n * sizeof(long));
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
116 return addr + (n * sizeof(long));
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
123 unsigned long addr = user_stack_pointer(regs);
125 addr = adjust_stack_addr(addr, n);
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
134 * Uprobes-specific fetch functions
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
139 void __user *vaddr = (void __force __user *)src;
141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
144 static nokprobe_inline int
145 probe_mem_read_user(void *dest, void *src, size_t size)
147 return probe_mem_read(dest, src, size);
151 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152 * length and relative data location.
154 static nokprobe_inline int
155 fetch_store_string(unsigned long addr, void *dest, void *base)
158 u32 loc = *(u32 *)dest;
159 int maxlen = get_loc_len(loc);
160 u8 *dst = get_loc_data(dest, base);
161 void __user *src = (void __force __user *) addr;
163 if (unlikely(!maxlen))
166 if (addr == FETCH_TOKEN_COMM)
167 ret = strlcpy(dst, current->comm, maxlen);
169 ret = strncpy_from_user(dst, src, maxlen);
175 * Include the terminating null byte. In this case it
176 * was copied by strncpy_from_user but not accounted
180 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
186 static nokprobe_inline int
187 fetch_store_string_user(unsigned long addr, void *dest, void *base)
189 return fetch_store_string(addr, dest, base);
192 /* Return the length of string -- including null terminal byte */
193 static nokprobe_inline int
194 fetch_store_strlen(unsigned long addr)
197 void __user *vaddr = (void __force __user *) addr;
199 if (addr == FETCH_TOKEN_COMM)
200 len = strlen(current->comm) + 1;
202 len = strnlen_user(vaddr, MAX_STRING_SIZE);
204 return (len > MAX_STRING_SIZE) ? 0 : len;
207 static nokprobe_inline int
208 fetch_store_strlen_user(unsigned long addr)
210 return fetch_store_strlen(addr);
213 static unsigned long translate_user_vaddr(unsigned long file_offset)
215 unsigned long base_addr;
216 struct uprobe_dispatch_data *udd;
218 udd = (void *) current->utask->vaddr;
220 base_addr = udd->bp_addr - udd->tu->offset;
221 return base_addr + file_offset;
224 /* Note that we don't verify it, since the code does not come from user space */
226 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
231 /* 1st stage: get value from context */
234 val = regs_get_register(regs, code->param);
237 val = get_user_stack_nth(regs, code->param);
239 case FETCH_OP_STACKP:
240 val = user_stack_pointer(regs);
242 case FETCH_OP_RETVAL:
243 val = regs_return_value(regs);
246 val = code->immediate;
249 val = FETCH_TOKEN_COMM;
252 val = (unsigned long)code->data;
255 val = translate_user_vaddr(code->immediate);
262 return process_fetch_insn_bottom(code, val, dest, base);
264 NOKPROBE_SYMBOL(process_fetch_insn)
266 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
268 rwlock_init(&filter->rwlock);
269 filter->nr_systemwide = 0;
270 INIT_LIST_HEAD(&filter->perf_events);
273 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
275 return !filter->nr_systemwide && list_empty(&filter->perf_events);
278 static inline bool is_ret_probe(struct trace_uprobe *tu)
280 return tu->consumer.ret_handler != NULL;
283 static bool trace_uprobe_is_busy(struct dyn_event *ev)
285 struct trace_uprobe *tu = to_trace_uprobe(ev);
287 return trace_probe_is_enabled(&tu->tp);
290 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
291 int argc, const char **argv)
293 char buf[MAX_ARGSTR_LEN + 1];
299 len = strlen(tu->filename);
300 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
303 if (tu->ref_ctr_offset == 0)
304 snprintf(buf, sizeof(buf), "0x%0*lx",
305 (int)(sizeof(void *) * 2), tu->offset);
307 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
308 (int)(sizeof(void *) * 2), tu->offset,
310 if (strcmp(buf, &argv[0][len + 1]))
315 return trace_probe_match_command_args(&tu->tp, argc, argv);
318 static bool trace_uprobe_match(const char *system, const char *event,
319 int argc, const char **argv, struct dyn_event *ev)
321 struct trace_uprobe *tu = to_trace_uprobe(ev);
323 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
324 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
325 trace_uprobe_match_command_head(tu, argc, argv);
328 static nokprobe_inline struct trace_uprobe *
329 trace_uprobe_primary_from_call(struct trace_event_call *call)
331 struct trace_probe *tp;
333 tp = trace_probe_primary_from_call(call);
334 if (WARN_ON_ONCE(!tp))
337 return container_of(tp, struct trace_uprobe, tp);
341 * Allocate new trace_uprobe and initialize it (including uprobes).
343 static struct trace_uprobe *
344 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
346 struct trace_uprobe *tu;
349 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
351 return ERR_PTR(-ENOMEM);
353 ret = trace_probe_init(&tu->tp, event, group);
357 dyn_event_init(&tu->devent, &trace_uprobe_ops);
358 tu->consumer.handler = uprobe_dispatcher;
360 tu->consumer.ret_handler = uretprobe_dispatcher;
361 init_trace_uprobe_filter(&tu->filter);
370 static void free_trace_uprobe(struct trace_uprobe *tu)
376 trace_probe_cleanup(&tu->tp);
381 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
383 struct dyn_event *pos;
384 struct trace_uprobe *tu;
386 for_each_trace_uprobe(tu, pos)
387 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
388 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
394 /* Unregister a trace_uprobe and probe_event */
395 static int unregister_trace_uprobe(struct trace_uprobe *tu)
399 if (trace_probe_has_sibling(&tu->tp))
402 ret = unregister_uprobe_event(tu);
407 dyn_event_remove(&tu->devent);
408 trace_probe_unlink(&tu->tp);
409 free_trace_uprobe(tu);
413 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
414 struct trace_uprobe *comp)
416 struct trace_probe_event *tpe = orig->tp.event;
417 struct trace_probe *pos;
418 struct inode *comp_inode = d_real_inode(comp->path.dentry);
421 list_for_each_entry(pos, &tpe->probes, list) {
422 orig = container_of(pos, struct trace_uprobe, tp);
423 if (comp_inode != d_real_inode(orig->path.dentry) ||
424 comp->offset != orig->offset)
428 * trace_probe_compare_arg_type() ensured that nr_args and
429 * each argument name and type are same. Let's compare comm.
431 for (i = 0; i < orig->tp.nr_args; i++) {
432 if (strcmp(orig->tp.args[i].comm,
433 comp->tp.args[i].comm))
443 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
447 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
449 /* Note that argument starts index = 2 */
450 trace_probe_log_set_index(ret + 1);
451 trace_probe_log_err(0, DIFF_ARG_TYPE);
454 if (trace_uprobe_has_same_uprobe(to, tu)) {
455 trace_probe_log_set_index(0);
456 trace_probe_log_err(0, SAME_PROBE);
460 /* Append to existing event */
461 ret = trace_probe_append(&tu->tp, &to->tp);
463 dyn_event_add(&tu->devent);
469 * Uprobe with multiple reference counter is not allowed. i.e.
470 * If inode and offset matches, reference counter offset *must*
471 * match as well. Though, there is one exception: If user is
472 * replacing old trace_uprobe with new one(same group/event),
473 * then we allow same uprobe with new reference counter as far
474 * as the new one does not conflict with any other existing
477 static int validate_ref_ctr_offset(struct trace_uprobe *new)
479 struct dyn_event *pos;
480 struct trace_uprobe *tmp;
481 struct inode *new_inode = d_real_inode(new->path.dentry);
483 for_each_trace_uprobe(tmp, pos) {
484 if (new_inode == d_real_inode(tmp->path.dentry) &&
485 new->offset == tmp->offset &&
486 new->ref_ctr_offset != tmp->ref_ctr_offset) {
487 pr_warn("Reference counter offset mismatch.");
494 /* Register a trace_uprobe and probe_event */
495 static int register_trace_uprobe(struct trace_uprobe *tu)
497 struct trace_uprobe *old_tu;
500 mutex_lock(&event_mutex);
502 ret = validate_ref_ctr_offset(tu);
506 /* register as an event */
507 old_tu = find_probe_event(trace_probe_name(&tu->tp),
508 trace_probe_group_name(&tu->tp));
510 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
511 trace_probe_log_set_index(0);
512 trace_probe_log_err(0, DIFF_PROBE_TYPE);
515 ret = append_trace_uprobe(tu, old_tu);
520 ret = register_uprobe_event(tu);
522 pr_warn("Failed to register probe event(%d)\n", ret);
526 dyn_event_add(&tu->devent);
529 mutex_unlock(&event_mutex);
536 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
538 static int trace_uprobe_create(int argc, const char **argv)
540 struct trace_uprobe *tu;
541 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542 char *arg, *filename, *rctr, *rctr_end, *tmp;
543 char buf[MAX_EVENT_NAME_LEN];
545 unsigned long offset, ref_ctr_offset;
546 bool is_return = false;
552 switch (argv[0][0]) {
565 if (argv[0][1] == ':')
568 if (!strchr(argv[1], '/'))
571 filename = kstrdup(argv[1], GFP_KERNEL);
575 /* Find the last occurrence, in case the path contains ':' too. */
576 arg = strrchr(filename, ':');
577 if (!arg || !isdigit(arg[1])) {
582 trace_probe_log_init("trace_uprobe", argc, argv);
583 trace_probe_log_set_index(1); /* filename is the 2nd argument */
586 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
588 trace_probe_log_err(0, FILE_NOT_FOUND);
590 trace_probe_log_clear();
593 if (!d_is_reg(path.dentry)) {
594 trace_probe_log_err(0, NO_REGULAR_FILE);
596 goto fail_address_parse;
599 /* Parse reference counter offset if specified. */
600 rctr = strchr(arg, '(');
602 rctr_end = strchr(rctr, ')');
605 rctr_end = rctr + strlen(rctr);
606 trace_probe_log_err(rctr_end - filename,
608 goto fail_address_parse;
609 } else if (rctr_end[1] != '\0') {
611 trace_probe_log_err(rctr_end + 1 - filename,
613 goto fail_address_parse;
618 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
620 trace_probe_log_err(rctr - filename, BAD_REFCNT);
621 goto fail_address_parse;
625 /* Parse uprobe offset. */
626 ret = kstrtoul(arg, 0, &offset);
628 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
629 goto fail_address_parse;
633 trace_probe_log_set_index(0);
635 ret = traceprobe_parse_event_name(&event, &group, buf,
638 goto fail_address_parse;
643 tail = kstrdup(kbasename(filename), GFP_KERNEL);
646 goto fail_address_parse;
649 ptr = strpbrk(tail, ".-_");
653 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
661 tu = alloc_trace_uprobe(group, event, argc, is_return);
664 /* This must return -ENOMEM otherwise there is a bug */
665 WARN_ON_ONCE(ret != -ENOMEM);
666 goto fail_address_parse;
669 tu->ref_ctr_offset = ref_ctr_offset;
671 tu->filename = filename;
673 /* parse arguments */
674 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
675 tmp = kstrdup(argv[i], GFP_KERNEL);
681 trace_probe_log_set_index(i + 2);
682 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
683 is_return ? TPARG_FL_RETURN : 0);
689 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
693 ret = register_trace_uprobe(tu);
698 free_trace_uprobe(tu);
700 trace_probe_log_clear();
704 trace_probe_log_clear();
711 static int create_or_delete_trace_uprobe(int argc, char **argv)
715 if (argv[0][0] == '-')
716 return dyn_event_release(argc, argv, &trace_uprobe_ops);
718 ret = trace_uprobe_create(argc, (const char **)argv);
719 return ret == -ECANCELED ? -EINVAL : ret;
722 static int trace_uprobe_release(struct dyn_event *ev)
724 struct trace_uprobe *tu = to_trace_uprobe(ev);
726 return unregister_trace_uprobe(tu);
729 /* Probes listing interfaces */
730 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
732 struct trace_uprobe *tu = to_trace_uprobe(ev);
733 char c = is_ret_probe(tu) ? 'r' : 'p';
736 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
737 trace_probe_name(&tu->tp), tu->filename,
738 (int)(sizeof(void *) * 2), tu->offset);
740 if (tu->ref_ctr_offset)
741 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
743 for (i = 0; i < tu->tp.nr_args; i++)
744 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
750 static int probes_seq_show(struct seq_file *m, void *v)
752 struct dyn_event *ev = v;
754 if (!is_trace_uprobe(ev))
757 return trace_uprobe_show(m, ev);
760 static const struct seq_operations probes_seq_op = {
761 .start = dyn_event_seq_start,
762 .next = dyn_event_seq_next,
763 .stop = dyn_event_seq_stop,
764 .show = probes_seq_show
767 static int probes_open(struct inode *inode, struct file *file)
771 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
772 ret = dyn_events_release_all(&trace_uprobe_ops);
777 return seq_open(file, &probes_seq_op);
780 static ssize_t probes_write(struct file *file, const char __user *buffer,
781 size_t count, loff_t *ppos)
783 return trace_parse_run_command(file, buffer, count, ppos,
784 create_or_delete_trace_uprobe);
787 static const struct file_operations uprobe_events_ops = {
788 .owner = THIS_MODULE,
792 .release = seq_release,
793 .write = probes_write,
796 /* Probes profiling interfaces */
797 static int probes_profile_seq_show(struct seq_file *m, void *v)
799 struct dyn_event *ev = v;
800 struct trace_uprobe *tu;
802 if (!is_trace_uprobe(ev))
805 tu = to_trace_uprobe(ev);
806 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
807 trace_probe_name(&tu->tp), tu->nhit);
811 static const struct seq_operations profile_seq_op = {
812 .start = dyn_event_seq_start,
813 .next = dyn_event_seq_next,
814 .stop = dyn_event_seq_stop,
815 .show = probes_profile_seq_show
818 static int profile_open(struct inode *inode, struct file *file)
820 return seq_open(file, &profile_seq_op);
823 static const struct file_operations uprobe_profile_ops = {
824 .owner = THIS_MODULE,
825 .open = profile_open,
828 .release = seq_release,
831 struct uprobe_cpu_buffer {
835 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
836 static int uprobe_buffer_refcnt;
838 static int uprobe_buffer_init(void)
842 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
843 if (uprobe_cpu_buffer == NULL)
846 for_each_possible_cpu(cpu) {
847 struct page *p = alloc_pages_node(cpu_to_node(cpu),
853 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
854 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
860 for_each_possible_cpu(cpu) {
863 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
866 free_percpu(uprobe_cpu_buffer);
870 static int uprobe_buffer_enable(void)
874 BUG_ON(!mutex_is_locked(&event_mutex));
876 if (uprobe_buffer_refcnt++ == 0) {
877 ret = uprobe_buffer_init();
879 uprobe_buffer_refcnt--;
885 static void uprobe_buffer_disable(void)
889 BUG_ON(!mutex_is_locked(&event_mutex));
891 if (--uprobe_buffer_refcnt == 0) {
892 for_each_possible_cpu(cpu)
893 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
896 free_percpu(uprobe_cpu_buffer);
897 uprobe_cpu_buffer = NULL;
901 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
903 struct uprobe_cpu_buffer *ucb;
906 cpu = raw_smp_processor_id();
907 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
910 * Use per-cpu buffers for fastest access, but we might migrate
911 * so the mutex makes sure we have sole access to it.
913 mutex_lock(&ucb->mutex);
918 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
920 mutex_unlock(&ucb->mutex);
923 static void __uprobe_trace_func(struct trace_uprobe *tu,
924 unsigned long func, struct pt_regs *regs,
925 struct uprobe_cpu_buffer *ucb, int dsize,
926 struct trace_event_file *trace_file)
928 struct uprobe_trace_entry_head *entry;
929 struct ring_buffer_event *event;
930 struct ring_buffer *buffer;
933 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
935 WARN_ON(call != trace_file->event_call);
937 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
940 if (trace_trigger_soft_disabled(trace_file))
943 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
944 size = esize + tu->tp.size + dsize;
945 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
946 call->event.type, size, 0, 0);
950 entry = ring_buffer_event_data(event);
951 if (is_ret_probe(tu)) {
952 entry->vaddr[0] = func;
953 entry->vaddr[1] = instruction_pointer(regs);
954 data = DATAOF_TRACE_ENTRY(entry, true);
956 entry->vaddr[0] = instruction_pointer(regs);
957 data = DATAOF_TRACE_ENTRY(entry, false);
960 memcpy(data, ucb->buf, tu->tp.size + dsize);
962 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
966 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
967 struct uprobe_cpu_buffer *ucb, int dsize)
969 struct event_file_link *link;
971 if (is_ret_probe(tu))
975 trace_probe_for_each_link_rcu(link, &tu->tp)
976 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
982 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
983 struct pt_regs *regs,
984 struct uprobe_cpu_buffer *ucb, int dsize)
986 struct event_file_link *link;
989 trace_probe_for_each_link_rcu(link, &tu->tp)
990 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
994 /* Event entry printers */
995 static enum print_line_t
996 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
998 struct uprobe_trace_entry_head *entry;
999 struct trace_seq *s = &iter->seq;
1000 struct trace_uprobe *tu;
1003 entry = (struct uprobe_trace_entry_head *)iter->ent;
1004 tu = trace_uprobe_primary_from_call(
1005 container_of(event, struct trace_event_call, event));
1009 if (is_ret_probe(tu)) {
1010 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1011 trace_probe_name(&tu->tp),
1012 entry->vaddr[1], entry->vaddr[0]);
1013 data = DATAOF_TRACE_ENTRY(entry, true);
1015 trace_seq_printf(s, "%s: (0x%lx)",
1016 trace_probe_name(&tu->tp),
1018 data = DATAOF_TRACE_ENTRY(entry, false);
1021 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1024 trace_seq_putc(s, '\n');
1027 return trace_handle_return(s);
1030 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1031 enum uprobe_filter_ctx ctx,
1032 struct mm_struct *mm);
1034 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1038 tu->consumer.filter = filter;
1039 tu->inode = d_real_inode(tu->path.dentry);
1041 if (tu->ref_ctr_offset)
1042 ret = uprobe_register_refctr(tu->inode, tu->offset,
1043 tu->ref_ctr_offset, &tu->consumer);
1045 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1053 static void __probe_event_disable(struct trace_probe *tp)
1055 struct trace_probe *pos;
1056 struct trace_uprobe *tu;
1058 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1059 tu = container_of(pos, struct trace_uprobe, tp);
1063 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1065 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1070 static int probe_event_enable(struct trace_event_call *call,
1071 struct trace_event_file *file, filter_func_t filter)
1073 struct trace_probe *pos, *tp;
1074 struct trace_uprobe *tu;
1078 tp = trace_probe_primary_from_call(call);
1079 if (WARN_ON_ONCE(!tp))
1081 enabled = trace_probe_is_enabled(tp);
1083 /* This may also change "enabled" state */
1085 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1088 ret = trace_probe_add_file(tp, file);
1092 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1095 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1098 tu = container_of(tp, struct trace_uprobe, tp);
1099 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1104 ret = uprobe_buffer_enable();
1108 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1109 tu = container_of(pos, struct trace_uprobe, tp);
1110 ret = trace_uprobe_enable(tu, filter);
1112 __probe_event_disable(tp);
1120 uprobe_buffer_disable();
1124 trace_probe_remove_file(tp, file);
1126 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1131 static void probe_event_disable(struct trace_event_call *call,
1132 struct trace_event_file *file)
1134 struct trace_probe *tp;
1136 tp = trace_probe_primary_from_call(call);
1137 if (WARN_ON_ONCE(!tp))
1140 if (!trace_probe_is_enabled(tp))
1144 if (trace_probe_remove_file(tp, file) < 0)
1147 if (trace_probe_is_enabled(tp))
1150 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1152 __probe_event_disable(tp);
1153 uprobe_buffer_disable();
1156 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1159 struct uprobe_trace_entry_head field;
1160 struct trace_uprobe *tu;
1162 tu = trace_uprobe_primary_from_call(event_call);
1166 if (is_ret_probe(tu)) {
1167 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1168 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1169 size = SIZEOF_TRACE_ENTRY(true);
1171 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1172 size = SIZEOF_TRACE_ENTRY(false);
1175 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1178 #ifdef CONFIG_PERF_EVENTS
1180 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1182 struct perf_event *event;
1184 if (filter->nr_systemwide)
1187 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1188 if (event->hw.target->mm == mm)
1196 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1198 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1201 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1205 write_lock(&tu->filter.rwlock);
1206 if (event->hw.target) {
1207 list_del(&event->hw.tp_list);
1208 done = tu->filter.nr_systemwide ||
1209 (event->hw.target->flags & PF_EXITING) ||
1210 uprobe_filter_event(tu, event);
1212 tu->filter.nr_systemwide--;
1213 done = tu->filter.nr_systemwide;
1215 write_unlock(&tu->filter.rwlock);
1218 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1223 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1228 write_lock(&tu->filter.rwlock);
1229 if (event->hw.target) {
1231 * event->parent != NULL means copy_process(), we can avoid
1232 * uprobe_apply(). current->mm must be probed and we can rely
1233 * on dup_mmap() which preserves the already installed bp's.
1235 * attr.enable_on_exec means that exec/mmap will install the
1236 * breakpoints we need.
1238 done = tu->filter.nr_systemwide ||
1239 event->parent || event->attr.enable_on_exec ||
1240 uprobe_filter_event(tu, event);
1241 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1243 done = tu->filter.nr_systemwide;
1244 tu->filter.nr_systemwide++;
1246 write_unlock(&tu->filter.rwlock);
1250 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1252 uprobe_perf_close(tu, event);
1257 static int uprobe_perf_multi_call(struct trace_event_call *call,
1258 struct perf_event *event,
1259 int (*op)(struct trace_uprobe *tu, struct perf_event *event))
1261 struct trace_probe *pos, *tp;
1262 struct trace_uprobe *tu;
1265 tp = trace_probe_primary_from_call(call);
1266 if (WARN_ON_ONCE(!tp))
1269 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1270 tu = container_of(pos, struct trace_uprobe, tp);
1271 ret = op(tu, event);
1278 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1279 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1281 struct trace_uprobe *tu;
1284 tu = container_of(uc, struct trace_uprobe, consumer);
1285 read_lock(&tu->filter.rwlock);
1286 ret = __uprobe_perf_filter(&tu->filter, mm);
1287 read_unlock(&tu->filter.rwlock);
1292 static void __uprobe_perf_func(struct trace_uprobe *tu,
1293 unsigned long func, struct pt_regs *regs,
1294 struct uprobe_cpu_buffer *ucb, int dsize)
1296 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1297 struct uprobe_trace_entry_head *entry;
1298 struct hlist_head *head;
1303 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1306 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1308 size = esize + tu->tp.size + dsize;
1309 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1310 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1314 head = this_cpu_ptr(call->perf_events);
1315 if (hlist_empty(head))
1318 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1322 if (is_ret_probe(tu)) {
1323 entry->vaddr[0] = func;
1324 entry->vaddr[1] = instruction_pointer(regs);
1325 data = DATAOF_TRACE_ENTRY(entry, true);
1327 entry->vaddr[0] = instruction_pointer(regs);
1328 data = DATAOF_TRACE_ENTRY(entry, false);
1331 memcpy(data, ucb->buf, tu->tp.size + dsize);
1333 if (size - esize > tu->tp.size + dsize) {
1334 int len = tu->tp.size + dsize;
1336 memset(data + len, 0, size - esize - len);
1339 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1345 /* uprobe profile handler */
1346 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1347 struct uprobe_cpu_buffer *ucb, int dsize)
1349 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1350 return UPROBE_HANDLER_REMOVE;
1352 if (!is_ret_probe(tu))
1353 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1357 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1358 struct pt_regs *regs,
1359 struct uprobe_cpu_buffer *ucb, int dsize)
1361 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1364 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1365 const char **filename, u64 *probe_offset,
1366 bool perf_type_tracepoint)
1368 const char *pevent = trace_event_name(event->tp_event);
1369 const char *group = event->tp_event->class->system;
1370 struct trace_uprobe *tu;
1372 if (perf_type_tracepoint)
1373 tu = find_probe_event(pevent, group);
1375 tu = event->tp_event->data;
1379 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1380 : BPF_FD_TYPE_UPROBE;
1381 *filename = tu->filename;
1382 *probe_offset = tu->offset;
1385 #endif /* CONFIG_PERF_EVENTS */
1388 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1391 struct trace_event_file *file = data;
1394 case TRACE_REG_REGISTER:
1395 return probe_event_enable(event, file, NULL);
1397 case TRACE_REG_UNREGISTER:
1398 probe_event_disable(event, file);
1401 #ifdef CONFIG_PERF_EVENTS
1402 case TRACE_REG_PERF_REGISTER:
1403 return probe_event_enable(event, NULL, uprobe_perf_filter);
1405 case TRACE_REG_PERF_UNREGISTER:
1406 probe_event_disable(event, NULL);
1409 case TRACE_REG_PERF_OPEN:
1410 return uprobe_perf_multi_call(event, data, uprobe_perf_open);
1412 case TRACE_REG_PERF_CLOSE:
1413 return uprobe_perf_multi_call(event, data, uprobe_perf_close);
1422 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1424 struct trace_uprobe *tu;
1425 struct uprobe_dispatch_data udd;
1426 struct uprobe_cpu_buffer *ucb;
1431 tu = container_of(con, struct trace_uprobe, consumer);
1435 udd.bp_addr = instruction_pointer(regs);
1437 current->utask->vaddr = (unsigned long) &udd;
1439 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1442 dsize = __get_data_size(&tu->tp, regs);
1443 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1445 ucb = uprobe_buffer_get();
1446 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1448 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1449 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1451 #ifdef CONFIG_PERF_EVENTS
1452 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1453 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1455 uprobe_buffer_put(ucb);
1459 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1460 unsigned long func, struct pt_regs *regs)
1462 struct trace_uprobe *tu;
1463 struct uprobe_dispatch_data udd;
1464 struct uprobe_cpu_buffer *ucb;
1467 tu = container_of(con, struct trace_uprobe, consumer);
1472 current->utask->vaddr = (unsigned long) &udd;
1474 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1477 dsize = __get_data_size(&tu->tp, regs);
1478 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1480 ucb = uprobe_buffer_get();
1481 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1483 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1484 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1486 #ifdef CONFIG_PERF_EVENTS
1487 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1488 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1490 uprobe_buffer_put(ucb);
1494 static struct trace_event_functions uprobe_funcs = {
1495 .trace = print_uprobe_event
1498 static inline void init_trace_event_call(struct trace_uprobe *tu)
1500 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1502 call->event.funcs = &uprobe_funcs;
1503 call->class->define_fields = uprobe_event_define_fields;
1505 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1506 call->class->reg = trace_uprobe_register;
1509 static int register_uprobe_event(struct trace_uprobe *tu)
1511 init_trace_event_call(tu);
1513 return trace_probe_register_event_call(&tu->tp);
1516 static int unregister_uprobe_event(struct trace_uprobe *tu)
1518 return trace_probe_unregister_event_call(&tu->tp);
1521 #ifdef CONFIG_PERF_EVENTS
1522 struct trace_event_call *
1523 create_local_trace_uprobe(char *name, unsigned long offs,
1524 unsigned long ref_ctr_offset, bool is_return)
1526 struct trace_uprobe *tu;
1530 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1532 return ERR_PTR(ret);
1534 if (!d_is_reg(path.dentry)) {
1536 return ERR_PTR(-EINVAL);
1540 * local trace_kprobes are not added to dyn_event, so they are never
1541 * searched in find_trace_kprobe(). Therefore, there is no concern of
1542 * duplicated name "DUMMY_EVENT" here.
1544 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1548 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1551 return ERR_CAST(tu);
1556 tu->ref_ctr_offset = ref_ctr_offset;
1557 tu->filename = kstrdup(name, GFP_KERNEL);
1558 init_trace_event_call(tu);
1560 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1565 return trace_probe_event_call(&tu->tp);
1567 free_trace_uprobe(tu);
1568 return ERR_PTR(ret);
1571 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1573 struct trace_uprobe *tu;
1575 tu = trace_uprobe_primary_from_call(event_call);
1577 free_trace_uprobe(tu);
1579 #endif /* CONFIG_PERF_EVENTS */
1581 /* Make a trace interface for controling probe points */
1582 static __init int init_uprobe_trace(void)
1584 struct dentry *d_tracer;
1587 ret = dyn_event_register(&trace_uprobe_ops);
1591 d_tracer = tracing_init_dentry();
1592 if (IS_ERR(d_tracer))
1595 trace_create_file("uprobe_events", 0644, d_tracer,
1596 NULL, &uprobe_events_ops);
1597 /* Profile interface */
1598 trace_create_file("uprobe_profile", 0444, d_tracer,
1599 NULL, &uprobe_profile_ops);
1603 fs_initcall(init_uprobe_trace);