2 * Kprobes-based tracing events
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
34 #include "trace_output.h"
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
47 const char *reserved_field_names[] = {
50 "common_preempt_count",
61 unsigned long (*func)(struct pt_regs *, void *);
65 static __kprobes unsigned long call_fetch(struct fetch_func *f,
68 return f->func(regs, f->data);
72 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
75 return regs_get_register(regs, (unsigned int)((unsigned long)offset));
78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
81 return regs_get_kernel_stack_nth(regs,
82 (unsigned int)((unsigned long)num));
85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
89 if (probe_kernel_address(addr, retval))
94 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
99 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
102 return regs_return_value(regs);
105 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
108 return kernel_stack_pointer(regs);
111 /* Memory fetching by symbol */
112 struct symbol_cache {
118 static unsigned long update_symbol_cache(struct symbol_cache *sc)
120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
122 sc->addr += sc->offset;
126 static void free_symbol_cache(struct symbol_cache *sc)
132 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
134 struct symbol_cache *sc;
136 if (!sym || strlen(sym) == 0)
138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
142 sc->symbol = kstrdup(sym, GFP_KERNEL);
149 update_symbol_cache(sc);
153 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
155 struct symbol_cache *sc = data;
158 return fetch_memory(regs, (void *)sc->addr);
163 /* Special indirect memory access interface */
164 struct indirect_fetch_data {
165 struct fetch_func orig;
169 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
171 struct indirect_fetch_data *ind = data;
174 addr = call_fetch(&ind->orig, regs);
177 return fetch_memory(regs, (void *)addr);
182 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
184 if (data->orig.func == fetch_indirect)
185 free_indirect_fetch_data(data->orig.data);
186 else if (data->orig.func == fetch_symbol)
187 free_symbol_cache(data->orig.data);
192 * Kprobe event core functions
196 struct fetch_func fetch;
200 /* Flags for trace_probe */
201 #define TP_FLAG_TRACE 1
202 #define TP_FLAG_PROFILE 2
205 struct list_head list;
206 struct kretprobe rp; /* Use rp.kp for kprobe use */
208 unsigned int flags; /* For TP_FLAG_* */
209 const char *symbol; /* symbol name */
210 struct ftrace_event_call call;
211 struct trace_event event;
212 unsigned int nr_args;
213 struct probe_arg args[];
216 #define SIZEOF_TRACE_PROBE(n) \
217 (offsetof(struct trace_probe, args) + \
218 (sizeof(struct probe_arg) * (n)))
220 static __kprobes int probe_is_return(struct trace_probe *tp)
222 return tp->rp.handler != NULL;
225 static __kprobes const char *probe_symbol(struct trace_probe *tp)
227 return tp->symbol ? tp->symbol : "unknown";
230 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
234 if (ff->func == fetch_argument)
235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
236 else if (ff->func == fetch_register) {
238 name = regs_query_register_name((unsigned int)((long)ff->data));
239 ret = snprintf(buf, n, "%%%s", name);
240 } else if (ff->func == fetch_stack)
241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
242 else if (ff->func == fetch_memory)
243 ret = snprintf(buf, n, "@0x%p", ff->data);
244 else if (ff->func == fetch_symbol) {
245 struct symbol_cache *sc = ff->data;
247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
250 ret = snprintf(buf, n, "@%s", sc->symbol);
251 } else if (ff->func == fetch_retvalue)
252 ret = snprintf(buf, n, "$retval");
253 else if (ff->func == fetch_stack_address)
254 ret = snprintf(buf, n, "$stack");
255 else if (ff->func == fetch_indirect) {
256 struct indirect_fetch_data *id = ff->data;
258 ret = snprintf(buf, n, "%+ld(", id->offset);
262 ret = probe_arg_string(buf + l, n - l, &id->orig);
266 ret = snprintf(buf + l, n - l, ")");
275 static int register_probe_event(struct trace_probe *tp);
276 static void unregister_probe_event(struct trace_probe *tp);
278 static DEFINE_MUTEX(probe_lock);
279 static LIST_HEAD(probe_list);
281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
282 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
283 struct pt_regs *regs);
286 * Allocate new trace_probe and initialize it (including kprobes).
288 static struct trace_probe *alloc_trace_probe(const char *group,
293 int nargs, int is_return)
295 struct trace_probe *tp;
297 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
299 return ERR_PTR(-ENOMEM);
302 tp->symbol = kstrdup(symbol, GFP_KERNEL);
305 tp->rp.kp.symbol_name = tp->symbol;
306 tp->rp.kp.offset = offs;
308 tp->rp.kp.addr = addr;
311 tp->rp.handler = kretprobe_dispatcher;
313 tp->rp.kp.pre_handler = kprobe_dispatcher;
317 tp->call.name = kstrdup(event, GFP_KERNEL);
323 tp->call.system = kstrdup(group, GFP_KERNEL);
324 if (!tp->call.system)
327 INIT_LIST_HEAD(&tp->list);
330 kfree(tp->call.name);
333 return ERR_PTR(-ENOMEM);
336 static void free_probe_arg(struct probe_arg *arg)
338 if (arg->fetch.func == fetch_symbol)
339 free_symbol_cache(arg->fetch.data);
340 else if (arg->fetch.func == fetch_indirect)
341 free_indirect_fetch_data(arg->fetch.data);
345 static void free_trace_probe(struct trace_probe *tp)
349 for (i = 0; i < tp->nr_args; i++)
350 free_probe_arg(&tp->args[i]);
352 kfree(tp->call.system);
353 kfree(tp->call.name);
358 static struct trace_probe *find_probe_event(const char *event,
361 struct trace_probe *tp;
363 list_for_each_entry(tp, &probe_list, list)
364 if (strcmp(tp->call.name, event) == 0 &&
365 strcmp(tp->call.system, group) == 0)
370 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
371 static void unregister_trace_probe(struct trace_probe *tp)
373 if (probe_is_return(tp))
374 unregister_kretprobe(&tp->rp);
376 unregister_kprobe(&tp->rp.kp);
378 unregister_probe_event(tp);
381 /* Register a trace_probe and probe_event */
382 static int register_trace_probe(struct trace_probe *tp)
384 struct trace_probe *old_tp;
387 mutex_lock(&probe_lock);
389 /* register as an event */
390 old_tp = find_probe_event(tp->call.name, tp->call.system);
392 /* delete old event */
393 unregister_trace_probe(old_tp);
394 free_trace_probe(old_tp);
396 ret = register_probe_event(tp);
398 pr_warning("Faild to register probe event(%d)\n", ret);
402 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
403 if (probe_is_return(tp))
404 ret = register_kretprobe(&tp->rp);
406 ret = register_kprobe(&tp->rp.kp);
409 pr_warning("Could not insert probe(%d)\n", ret);
410 if (ret == -EILSEQ) {
411 pr_warning("Probing address(0x%p) is not an "
412 "instruction boundary.\n",
416 unregister_probe_event(tp);
418 list_add_tail(&tp->list, &probe_list);
420 mutex_unlock(&probe_lock);
424 /* Split symbol and offset. */
425 static int split_symbol_offset(char *symbol, unsigned long *offset)
433 tmp = strchr(symbol, '+');
435 /* skip sign because strict_strtol doesn't accept '+' */
436 ret = strict_strtoul(tmp + 1, 0, offset);
445 #define PARAM_MAX_ARGS 16
446 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
448 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
453 if (strcmp(arg, "retval") == 0) {
455 ff->func = fetch_retvalue;
459 } else if (strncmp(arg, "stack", 5) == 0) {
460 if (arg[5] == '\0') {
461 ff->func = fetch_stack_address;
463 } else if (isdigit(arg[5])) {
464 ret = strict_strtoul(arg + 5, 10, ¶m);
465 if (ret || param > PARAM_MAX_STACK)
468 ff->func = fetch_stack;
469 ff->data = (void *)param;
473 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
474 ret = strict_strtoul(arg + 3, 10, ¶m);
475 if (ret || param > PARAM_MAX_ARGS)
478 ff->func = fetch_argument;
479 ff->data = (void *)param;
486 /* Recursive argument parser */
487 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
496 ret = parse_probe_vars(arg + 1, ff, is_return);
498 case '%': /* named register */
499 ret = regs_query_register_offset(arg + 1);
501 ff->func = fetch_register;
502 ff->data = (void *)(unsigned long)ret;
506 case '@': /* memory or symbol */
507 if (isdigit(arg[1])) {
508 ret = strict_strtoul(arg + 1, 0, ¶m);
511 ff->func = fetch_memory;
512 ff->data = (void *)param;
514 ret = split_symbol_offset(arg + 1, &offset);
517 ff->data = alloc_symbol_cache(arg + 1, offset);
519 ff->func = fetch_symbol;
524 case '+': /* indirect memory */
526 tmp = strchr(arg, '(');
532 ret = strict_strtol(arg + 1, 0, &offset);
538 tmp = strrchr(arg, ')');
540 struct indirect_fetch_data *id;
542 id = kzalloc(sizeof(struct indirect_fetch_data),
547 ret = __parse_probe_arg(arg, &id->orig, is_return);
551 ff->func = fetch_indirect;
552 ff->data = (void *)id;
558 /* TODO: support custom handler */
564 /* String length checking wrapper */
565 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
567 if (strlen(arg) > MAX_ARGSTR_LEN) {
568 pr_info("Argument is too long.: %s\n", arg);
571 return __parse_probe_arg(arg, ff, is_return);
574 /* Return 1 if name is reserved or already used by another argument */
575 static int conflict_field_name(const char *name,
576 struct probe_arg *args, int narg)
579 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
580 if (strcmp(reserved_field_names[i], name) == 0)
582 for (i = 0; i < narg; i++)
583 if (strcmp(args[i].name, name) == 0)
588 static int create_trace_probe(int argc, char **argv)
592 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
593 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
595 * $argN : fetch Nth of function argument. (N:0-)
596 * $retval : fetch return value
597 * $stack : fetch stack address
598 * $stackN : fetch Nth of stack (N:0-)
599 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
600 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
601 * %REG : fetch register REG
602 * Indirect memory fetch:
603 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
604 * Alias name of args:
605 * NAME=FETCHARG : set NAME as alias of FETCHARG.
607 struct trace_probe *tp;
609 int is_return = 0, is_delete = 0;
610 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
611 unsigned long offset = 0;
613 char buf[MAX_EVENT_NAME_LEN];
615 /* argc must be >= 1 */
616 if (argv[0][0] == 'p')
618 else if (argv[0][0] == 'r')
620 else if (argv[0][0] == '-')
623 pr_info("Probe definition must be started with 'p', 'r' or"
628 if (argv[0][1] == ':') {
630 if (strchr(event, '/')) {
632 event = strchr(group, '/') + 1;
634 if (strlen(group) == 0) {
635 pr_info("Group name is not specifiled\n");
639 if (strlen(event) == 0) {
640 pr_info("Event name is not specifiled\n");
645 group = KPROBE_EVENT_SYSTEM;
649 pr_info("Delete command needs an event name.\n");
652 tp = find_probe_event(event, group);
654 pr_info("Event %s/%s doesn't exist.\n", group, event);
657 /* delete an event */
658 unregister_trace_probe(tp);
659 free_trace_probe(tp);
664 pr_info("Probe point is not specified.\n");
667 if (isdigit(argv[1][0])) {
669 pr_info("Return probe point must be a symbol.\n");
672 /* an address specified */
673 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
675 pr_info("Failed to parse address.\n");
679 /* a symbol specified */
681 /* TODO: support .init module functions */
682 ret = split_symbol_offset(symbol, &offset);
684 pr_info("Failed to parse symbol.\n");
687 if (offset && is_return) {
688 pr_info("Return probe must be used without offset.\n");
692 argc -= 2; argv += 2;
696 /* Make a new event name */
698 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
699 is_return ? 'r' : 'p', symbol, offset);
701 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
702 is_return ? 'r' : 'p', addr);
705 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
708 pr_info("Failed to allocate trace_probe.(%d)\n",
713 /* parse arguments */
715 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
716 /* Parse argument name */
717 arg = strchr(argv[i], '=');
723 if (conflict_field_name(argv[i], tp->args, i)) {
724 pr_info("Argument%d name '%s' conflicts with "
725 "another field.\n", i, argv[i]);
730 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
731 if (!tp->args[i].name) {
732 pr_info("Failed to allocate argument%d name '%s'.\n",
738 /* Parse fetch argument */
739 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
741 pr_info("Parse error at argument%d. (%d)\n", i, ret);
742 kfree(tp->args[i].name);
749 ret = register_trace_probe(tp);
755 free_trace_probe(tp);
759 static void cleanup_all_probes(void)
761 struct trace_probe *tp;
763 mutex_lock(&probe_lock);
764 /* TODO: Use batch unregistration */
765 while (!list_empty(&probe_list)) {
766 tp = list_entry(probe_list.next, struct trace_probe, list);
767 unregister_trace_probe(tp);
768 free_trace_probe(tp);
770 mutex_unlock(&probe_lock);
774 /* Probes listing interfaces */
775 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
777 mutex_lock(&probe_lock);
778 return seq_list_start(&probe_list, *pos);
781 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
783 return seq_list_next(v, &probe_list, pos);
786 static void probes_seq_stop(struct seq_file *m, void *v)
788 mutex_unlock(&probe_lock);
791 static int probes_seq_show(struct seq_file *m, void *v)
793 struct trace_probe *tp = v;
795 char buf[MAX_ARGSTR_LEN + 1];
797 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
798 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
801 seq_printf(m, " 0x%p", tp->rp.kp.addr);
802 else if (tp->rp.kp.offset)
803 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
805 seq_printf(m, " %s", probe_symbol(tp));
807 for (i = 0; i < tp->nr_args; i++) {
808 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
810 pr_warning("Argument%d decoding error(%d).\n", i, ret);
813 seq_printf(m, " %s=%s", tp->args[i].name, buf);
819 static const struct seq_operations probes_seq_op = {
820 .start = probes_seq_start,
821 .next = probes_seq_next,
822 .stop = probes_seq_stop,
823 .show = probes_seq_show
826 static int probes_open(struct inode *inode, struct file *file)
828 if ((file->f_mode & FMODE_WRITE) &&
829 (file->f_flags & O_TRUNC))
830 cleanup_all_probes();
832 return seq_open(file, &probes_seq_op);
835 static int command_trace_probe(const char *buf)
838 int argc = 0, ret = 0;
840 argv = argv_split(GFP_KERNEL, buf, &argc);
845 ret = create_trace_probe(argc, argv);
851 #define WRITE_BUFSIZE 128
853 static ssize_t probes_write(struct file *file, const char __user *buffer,
854 size_t count, loff_t *ppos)
861 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
866 while (done < count) {
868 if (size >= WRITE_BUFSIZE)
869 size = WRITE_BUFSIZE - 1;
870 if (copy_from_user(kbuf, buffer + done, size)) {
875 tmp = strchr(kbuf, '\n');
878 size = tmp - kbuf + 1;
879 } else if (done + size < count) {
880 pr_warning("Line length is too long: "
881 "Should be less than %d.", WRITE_BUFSIZE);
886 /* Remove comments */
887 tmp = strchr(kbuf, '#');
891 ret = command_trace_probe(kbuf);
901 static const struct file_operations kprobe_events_ops = {
902 .owner = THIS_MODULE,
906 .release = seq_release,
907 .write = probes_write,
910 /* Probes profiling interfaces */
911 static int probes_profile_seq_show(struct seq_file *m, void *v)
913 struct trace_probe *tp = v;
915 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
921 static const struct seq_operations profile_seq_op = {
922 .start = probes_seq_start,
923 .next = probes_seq_next,
924 .stop = probes_seq_stop,
925 .show = probes_profile_seq_show
928 static int profile_open(struct inode *inode, struct file *file)
930 return seq_open(file, &profile_seq_op);
933 static const struct file_operations kprobe_profile_ops = {
934 .owner = THIS_MODULE,
935 .open = profile_open,
938 .release = seq_release,
942 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
944 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
945 struct kprobe_trace_entry *entry;
946 struct ring_buffer_event *event;
947 struct ring_buffer *buffer;
949 unsigned long irq_flags;
950 struct ftrace_event_call *call = &tp->call;
954 local_save_flags(irq_flags);
955 pc = preempt_count();
957 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
959 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
964 entry = ring_buffer_event_data(event);
965 entry->nargs = tp->nr_args;
966 entry->ip = (unsigned long)kp->addr;
967 for (i = 0; i < tp->nr_args; i++)
968 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
970 if (!filter_current_check_discard(buffer, call, entry, event))
971 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
975 /* Kretprobe handler */
976 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
977 struct pt_regs *regs)
979 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
980 struct kretprobe_trace_entry *entry;
981 struct ring_buffer_event *event;
982 struct ring_buffer *buffer;
984 unsigned long irq_flags;
985 struct ftrace_event_call *call = &tp->call;
987 local_save_flags(irq_flags);
988 pc = preempt_count();
990 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
992 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
997 entry = ring_buffer_event_data(event);
998 entry->nargs = tp->nr_args;
999 entry->func = (unsigned long)tp->rp.kp.addr;
1000 entry->ret_ip = (unsigned long)ri->ret_addr;
1001 for (i = 0; i < tp->nr_args; i++)
1002 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1004 if (!filter_current_check_discard(buffer, call, entry, event))
1005 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1010 /* Event entry printers */
1012 print_kprobe_event(struct trace_iterator *iter, int flags)
1014 struct kprobe_trace_entry *field;
1015 struct trace_seq *s = &iter->seq;
1016 struct trace_event *event;
1017 struct trace_probe *tp;
1020 field = (struct kprobe_trace_entry *)iter->ent;
1021 event = ftrace_find_event(field->ent.type);
1022 tp = container_of(event, struct trace_probe, event);
1024 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1027 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1030 if (!trace_seq_puts(s, ")"))
1033 for (i = 0; i < field->nargs; i++)
1034 if (!trace_seq_printf(s, " %s=%lx",
1035 tp->args[i].name, field->args[i]))
1038 if (!trace_seq_puts(s, "\n"))
1041 return TRACE_TYPE_HANDLED;
1043 return TRACE_TYPE_PARTIAL_LINE;
1047 print_kretprobe_event(struct trace_iterator *iter, int flags)
1049 struct kretprobe_trace_entry *field;
1050 struct trace_seq *s = &iter->seq;
1051 struct trace_event *event;
1052 struct trace_probe *tp;
1055 field = (struct kretprobe_trace_entry *)iter->ent;
1056 event = ftrace_find_event(field->ent.type);
1057 tp = container_of(event, struct trace_probe, event);
1059 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1062 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1065 if (!trace_seq_puts(s, " <- "))
1068 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1071 if (!trace_seq_puts(s, ")"))
1074 for (i = 0; i < field->nargs; i++)
1075 if (!trace_seq_printf(s, " %s=%lx",
1076 tp->args[i].name, field->args[i]))
1079 if (!trace_seq_puts(s, "\n"))
1082 return TRACE_TYPE_HANDLED;
1084 return TRACE_TYPE_PARTIAL_LINE;
1087 static int probe_event_enable(struct ftrace_event_call *call)
1089 struct trace_probe *tp = (struct trace_probe *)call->data;
1091 tp->flags |= TP_FLAG_TRACE;
1092 if (probe_is_return(tp))
1093 return enable_kretprobe(&tp->rp);
1095 return enable_kprobe(&tp->rp.kp);
1098 static void probe_event_disable(struct ftrace_event_call *call)
1100 struct trace_probe *tp = (struct trace_probe *)call->data;
1102 tp->flags &= ~TP_FLAG_TRACE;
1103 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1104 if (probe_is_return(tp))
1105 disable_kretprobe(&tp->rp);
1107 disable_kprobe(&tp->rp.kp);
1111 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1113 INIT_LIST_HEAD(&event_call->fields);
1119 #define DEFINE_FIELD(type, item, name, is_signed) \
1121 ret = trace_define_field(event_call, #type, name, \
1122 offsetof(typeof(field), item), \
1123 sizeof(field.item), is_signed, \
1129 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1132 struct kprobe_trace_entry field;
1133 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1135 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1136 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1137 /* Set argument names as fields */
1138 for (i = 0; i < tp->nr_args; i++)
1139 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1143 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1146 struct kretprobe_trace_entry field;
1147 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1149 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1150 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1151 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1152 /* Set argument names as fields */
1153 for (i = 0; i < tp->nr_args; i++)
1154 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1158 static int __probe_event_show_format(struct trace_seq *s,
1159 struct trace_probe *tp, const char *fmt,
1165 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1168 for (i = 0; i < tp->nr_args; i++)
1169 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1172 if (!trace_seq_printf(s, "\", %s", arg))
1175 for (i = 0; i < tp->nr_args; i++)
1176 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1179 return trace_seq_puts(s, "\n");
1183 #define SHOW_FIELD(type, item, name) \
1185 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
1186 "offset:%u;\tsize:%u;\n", name, \
1187 (unsigned int)offsetof(typeof(field), item),\
1188 (unsigned int)sizeof(type)); \
1193 static int kprobe_event_show_format(struct ftrace_event_call *call,
1194 struct trace_seq *s)
1196 struct kprobe_trace_entry field __attribute__((unused));
1198 struct trace_probe *tp = (struct trace_probe *)call->data;
1200 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1201 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1204 for (i = 0; i < tp->nr_args; i++)
1205 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1206 trace_seq_puts(s, "\n");
1208 return __probe_event_show_format(s, tp, "(%lx)",
1209 "REC->" FIELD_STRING_IP);
1212 static int kretprobe_event_show_format(struct ftrace_event_call *call,
1213 struct trace_seq *s)
1215 struct kretprobe_trace_entry field __attribute__((unused));
1217 struct trace_probe *tp = (struct trace_probe *)call->data;
1219 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
1220 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
1221 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1224 for (i = 0; i < tp->nr_args; i++)
1225 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1226 trace_seq_puts(s, "\n");
1228 return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1229 "REC->" FIELD_STRING_FUNC
1230 ", REC->" FIELD_STRING_RETIP);
1233 #ifdef CONFIG_EVENT_PROFILE
1235 /* Kprobe profile handler */
1236 static __kprobes int kprobe_profile_func(struct kprobe *kp,
1237 struct pt_regs *regs)
1239 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1240 struct ftrace_event_call *call = &tp->call;
1241 struct kprobe_trace_entry *entry;
1242 struct trace_entry *ent;
1243 int size, __size, i, pc, __cpu;
1244 unsigned long irq_flags;
1249 pc = preempt_count();
1250 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1251 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1252 size -= sizeof(u32);
1253 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1254 "profile buffer not large enough"))
1258 * Protect the non nmi buffer
1259 * This also protects the rcu read side
1261 local_irq_save(irq_flags);
1263 rctx = perf_swevent_get_recursion_context();
1267 __cpu = smp_processor_id();
1270 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1272 trace_buf = rcu_dereference(perf_trace_buf);
1277 raw_data = per_cpu_ptr(trace_buf, __cpu);
1279 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1280 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1281 entry = (struct kprobe_trace_entry *)raw_data;
1284 tracing_generic_entry_update(ent, irq_flags, pc);
1285 ent->type = call->id;
1286 entry->nargs = tp->nr_args;
1287 entry->ip = (unsigned long)kp->addr;
1288 for (i = 0; i < tp->nr_args; i++)
1289 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1290 perf_tp_event(call->id, entry->ip, 1, entry, size);
1293 perf_swevent_put_recursion_context(rctx);
1295 local_irq_restore(irq_flags);
1300 /* Kretprobe profile handler */
1301 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1302 struct pt_regs *regs)
1304 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1305 struct ftrace_event_call *call = &tp->call;
1306 struct kretprobe_trace_entry *entry;
1307 struct trace_entry *ent;
1308 int size, __size, i, pc, __cpu;
1309 unsigned long irq_flags;
1314 pc = preempt_count();
1315 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1316 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1317 size -= sizeof(u32);
1318 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1319 "profile buffer not large enough"))
1323 * Protect the non nmi buffer
1324 * This also protects the rcu read side
1326 local_irq_save(irq_flags);
1328 rctx = perf_swevent_get_recursion_context();
1332 __cpu = smp_processor_id();
1335 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1337 trace_buf = rcu_dereference(perf_trace_buf);
1342 raw_data = per_cpu_ptr(trace_buf, __cpu);
1344 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1345 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1346 entry = (struct kretprobe_trace_entry *)raw_data;
1349 tracing_generic_entry_update(ent, irq_flags, pc);
1350 ent->type = call->id;
1351 entry->nargs = tp->nr_args;
1352 entry->func = (unsigned long)tp->rp.kp.addr;
1353 entry->ret_ip = (unsigned long)ri->ret_addr;
1354 for (i = 0; i < tp->nr_args; i++)
1355 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1356 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1359 perf_swevent_put_recursion_context(rctx);
1361 local_irq_restore(irq_flags);
1366 static int probe_profile_enable(struct ftrace_event_call *call)
1368 struct trace_probe *tp = (struct trace_probe *)call->data;
1370 tp->flags |= TP_FLAG_PROFILE;
1372 if (probe_is_return(tp))
1373 return enable_kretprobe(&tp->rp);
1375 return enable_kprobe(&tp->rp.kp);
1378 static void probe_profile_disable(struct ftrace_event_call *call)
1380 struct trace_probe *tp = (struct trace_probe *)call->data;
1382 tp->flags &= ~TP_FLAG_PROFILE;
1384 if (!(tp->flags & TP_FLAG_TRACE)) {
1385 if (probe_is_return(tp))
1386 disable_kretprobe(&tp->rp);
1388 disable_kprobe(&tp->rp.kp);
1391 #endif /* CONFIG_EVENT_PROFILE */
1395 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1397 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1399 if (tp->flags & TP_FLAG_TRACE)
1400 kprobe_trace_func(kp, regs);
1401 #ifdef CONFIG_EVENT_PROFILE
1402 if (tp->flags & TP_FLAG_PROFILE)
1403 kprobe_profile_func(kp, regs);
1404 #endif /* CONFIG_EVENT_PROFILE */
1405 return 0; /* We don't tweek kernel, so just return 0 */
1409 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1411 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1413 if (tp->flags & TP_FLAG_TRACE)
1414 kretprobe_trace_func(ri, regs);
1415 #ifdef CONFIG_EVENT_PROFILE
1416 if (tp->flags & TP_FLAG_PROFILE)
1417 kretprobe_profile_func(ri, regs);
1418 #endif /* CONFIG_EVENT_PROFILE */
1419 return 0; /* We don't tweek kernel, so just return 0 */
1422 static int register_probe_event(struct trace_probe *tp)
1424 struct ftrace_event_call *call = &tp->call;
1427 /* Initialize ftrace_event_call */
1428 if (probe_is_return(tp)) {
1429 tp->event.trace = print_kretprobe_event;
1430 call->raw_init = probe_event_raw_init;
1431 call->show_format = kretprobe_event_show_format;
1432 call->define_fields = kretprobe_event_define_fields;
1434 tp->event.trace = print_kprobe_event;
1435 call->raw_init = probe_event_raw_init;
1436 call->show_format = kprobe_event_show_format;
1437 call->define_fields = kprobe_event_define_fields;
1439 call->event = &tp->event;
1440 call->id = register_ftrace_event(&tp->event);
1444 call->regfunc = probe_event_enable;
1445 call->unregfunc = probe_event_disable;
1447 #ifdef CONFIG_EVENT_PROFILE
1448 call->profile_enable = probe_profile_enable;
1449 call->profile_disable = probe_profile_disable;
1452 ret = trace_add_event_call(call);
1454 pr_info("Failed to register kprobe event: %s\n", call->name);
1455 unregister_ftrace_event(&tp->event);
1460 static void unregister_probe_event(struct trace_probe *tp)
1462 /* tp->event is unregistered in trace_remove_event_call() */
1463 trace_remove_event_call(&tp->call);
1466 /* Make a debugfs interface for controling probe points */
1467 static __init int init_kprobe_trace(void)
1469 struct dentry *d_tracer;
1470 struct dentry *entry;
1472 d_tracer = tracing_init_dentry();
1476 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1477 NULL, &kprobe_events_ops);
1479 /* Event list interface */
1481 pr_warning("Could not create debugfs "
1482 "'kprobe_events' entry\n");
1484 /* Profile interface */
1485 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1486 NULL, &kprobe_profile_ops);
1489 pr_warning("Could not create debugfs "
1490 "'kprobe_profile' entry\n");
1493 fs_initcall(init_kprobe_trace);
1496 #ifdef CONFIG_FTRACE_STARTUP_TEST
1498 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1499 int a4, int a5, int a6)
1501 return a1 + a2 + a3 + a4 + a5 + a6;
1504 static __init int kprobe_trace_self_tests_init(void)
1507 int (*target)(int, int, int, int, int, int);
1509 target = kprobe_trace_selftest_target;
1511 pr_info("Testing kprobe tracing: ");
1513 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1514 "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1515 if (WARN_ON_ONCE(ret))
1516 pr_warning("error enabling function entry\n");
1518 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1520 if (WARN_ON_ONCE(ret))
1521 pr_warning("error enabling function return\n");
1523 ret = target(1, 2, 3, 4, 5, 6);
1525 cleanup_all_probes();
1531 late_initcall(kprobe_trace_self_tests_init);