tracing/probes: Implement 'memory' fetch method for uprobes
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22
23 #include "trace_probe.h"
24
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26
27 /**
28  * Kprobe event core functions
29  */
30 struct trace_kprobe {
31         struct list_head        list;
32         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
33         unsigned long           nhit;
34         const char              *symbol;        /* symbol name */
35         struct trace_probe      tp;
36 };
37
38 struct event_file_link {
39         struct ftrace_event_file        *file;
40         struct list_head                list;
41 };
42
43 #define SIZEOF_TRACE_KPROBE(n)                          \
44         (offsetof(struct trace_kprobe, tp.args) +       \
45         (sizeof(struct probe_arg) * (n)))
46
47
48 static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
49 {
50         return tk->rp.handler != NULL;
51 }
52
53 static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
54 {
55         return tk->symbol ? tk->symbol : "unknown";
56 }
57
58 static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
59 {
60         return tk->rp.kp.offset;
61 }
62
63 static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
64 {
65         return !!(kprobe_gone(&tk->rp.kp));
66 }
67
68 static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
69                                                  struct module *mod)
70 {
71         int len = strlen(mod->name);
72         const char *name = trace_kprobe_symbol(tk);
73         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
74 }
75
76 static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
77 {
78         return !!strchr(trace_kprobe_symbol(tk), ':');
79 }
80
81 static int register_kprobe_event(struct trace_kprobe *tk);
82 static int unregister_kprobe_event(struct trace_kprobe *tk);
83
84 static DEFINE_MUTEX(probe_lock);
85 static LIST_HEAD(probe_list);
86
87 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
88 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
89                                 struct pt_regs *regs);
90
91 /* Memory fetching by symbol */
92 struct symbol_cache {
93         char            *symbol;
94         long            offset;
95         unsigned long   addr;
96 };
97
98 unsigned long update_symbol_cache(struct symbol_cache *sc)
99 {
100         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
101
102         if (sc->addr)
103                 sc->addr += sc->offset;
104
105         return sc->addr;
106 }
107
108 void free_symbol_cache(struct symbol_cache *sc)
109 {
110         kfree(sc->symbol);
111         kfree(sc);
112 }
113
114 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
115 {
116         struct symbol_cache *sc;
117
118         if (!sym || strlen(sym) == 0)
119                 return NULL;
120
121         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
122         if (!sc)
123                 return NULL;
124
125         sc->symbol = kstrdup(sym, GFP_KERNEL);
126         if (!sc->symbol) {
127                 kfree(sc);
128                 return NULL;
129         }
130         sc->offset = offset;
131         update_symbol_cache(sc);
132
133         return sc;
134 }
135
136 /*
137  * Kprobes-specific fetch functions
138  */
139 #define DEFINE_FETCH_stack(type)                                        \
140 static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
141                                           void *offset, void *dest)     \
142 {                                                                       \
143         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
144                                 (unsigned int)((unsigned long)offset)); \
145 }
146 DEFINE_BASIC_FETCH_FUNCS(stack)
147 /* No string on the stack entry */
148 #define fetch_stack_string      NULL
149 #define fetch_stack_string_size NULL
150
151 #define DEFINE_FETCH_memory(type)                                       \
152 static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
153                                           void *addr, void *dest)       \
154 {                                                                       \
155         type retval;                                                    \
156         if (probe_kernel_address(addr, retval))                         \
157                 *(type *)dest = 0;                                      \
158         else                                                            \
159                 *(type *)dest = retval;                                 \
160 }
161 DEFINE_BASIC_FETCH_FUNCS(memory)
162 /*
163  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
164  * length and relative data location.
165  */
166 static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
167                                                       void *addr, void *dest)
168 {
169         long ret;
170         int maxlen = get_rloc_len(*(u32 *)dest);
171         u8 *dst = get_rloc_data(dest);
172         u8 *src = addr;
173         mm_segment_t old_fs = get_fs();
174
175         if (!maxlen)
176                 return;
177
178         /*
179          * Try to get string again, since the string can be changed while
180          * probing.
181          */
182         set_fs(KERNEL_DS);
183         pagefault_disable();
184
185         do
186                 ret = __copy_from_user_inatomic(dst++, src++, 1);
187         while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
188
189         dst[-1] = '\0';
190         pagefault_enable();
191         set_fs(old_fs);
192
193         if (ret < 0) {  /* Failed to fetch string */
194                 ((u8 *)get_rloc_data(dest))[0] = '\0';
195                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
196         } else {
197                 *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
198                                               get_rloc_offs(*(u32 *)dest));
199         }
200 }
201
202 /* Return the length of string -- including null terminal byte */
203 static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
204                                                         void *addr, void *dest)
205 {
206         mm_segment_t old_fs;
207         int ret, len = 0;
208         u8 c;
209
210         old_fs = get_fs();
211         set_fs(KERNEL_DS);
212         pagefault_disable();
213
214         do {
215                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
216                 len++;
217         } while (c && ret == 0 && len < MAX_STRING_SIZE);
218
219         pagefault_enable();
220         set_fs(old_fs);
221
222         if (ret < 0)    /* Failed to check the length */
223                 *(u32 *)dest = 0;
224         else
225                 *(u32 *)dest = len;
226 }
227
228 #define DEFINE_FETCH_symbol(type)                                       \
229 __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,      \
230                                           void *data, void *dest)       \
231 {                                                                       \
232         struct symbol_cache *sc = data;                                 \
233         if (sc->addr)                                                   \
234                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
235         else                                                            \
236                 *(type *)dest = 0;                                      \
237 }
238 DEFINE_BASIC_FETCH_FUNCS(symbol)
239 DEFINE_FETCH_symbol(string)
240 DEFINE_FETCH_symbol(string_size)
241
242 /* Fetch type information table */
243 const struct fetch_type kprobes_fetch_type_table[] = {
244         /* Special types */
245         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
246                                         sizeof(u32), 1, "__data_loc char[]"),
247         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
248                                         string_size, sizeof(u32), 0, "u32"),
249         /* Basic types */
250         ASSIGN_FETCH_TYPE(u8,  u8,  0),
251         ASSIGN_FETCH_TYPE(u16, u16, 0),
252         ASSIGN_FETCH_TYPE(u32, u32, 0),
253         ASSIGN_FETCH_TYPE(u64, u64, 0),
254         ASSIGN_FETCH_TYPE(s8,  u8,  1),
255         ASSIGN_FETCH_TYPE(s16, u16, 1),
256         ASSIGN_FETCH_TYPE(s32, u32, 1),
257         ASSIGN_FETCH_TYPE(s64, u64, 1),
258
259         ASSIGN_FETCH_TYPE_END
260 };
261
262 /*
263  * Allocate new trace_probe and initialize it (including kprobes).
264  */
265 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
266                                              const char *event,
267                                              void *addr,
268                                              const char *symbol,
269                                              unsigned long offs,
270                                              int nargs, bool is_return)
271 {
272         struct trace_kprobe *tk;
273         int ret = -ENOMEM;
274
275         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
276         if (!tk)
277                 return ERR_PTR(ret);
278
279         if (symbol) {
280                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
281                 if (!tk->symbol)
282                         goto error;
283                 tk->rp.kp.symbol_name = tk->symbol;
284                 tk->rp.kp.offset = offs;
285         } else
286                 tk->rp.kp.addr = addr;
287
288         if (is_return)
289                 tk->rp.handler = kretprobe_dispatcher;
290         else
291                 tk->rp.kp.pre_handler = kprobe_dispatcher;
292
293         if (!event || !is_good_name(event)) {
294                 ret = -EINVAL;
295                 goto error;
296         }
297
298         tk->tp.call.class = &tk->tp.class;
299         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
300         if (!tk->tp.call.name)
301                 goto error;
302
303         if (!group || !is_good_name(group)) {
304                 ret = -EINVAL;
305                 goto error;
306         }
307
308         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
309         if (!tk->tp.class.system)
310                 goto error;
311
312         INIT_LIST_HEAD(&tk->list);
313         INIT_LIST_HEAD(&tk->tp.files);
314         return tk;
315 error:
316         kfree(tk->tp.call.name);
317         kfree(tk->symbol);
318         kfree(tk);
319         return ERR_PTR(ret);
320 }
321
322 static void free_trace_kprobe(struct trace_kprobe *tk)
323 {
324         int i;
325
326         for (i = 0; i < tk->tp.nr_args; i++)
327                 traceprobe_free_probe_arg(&tk->tp.args[i]);
328
329         kfree(tk->tp.call.class->system);
330         kfree(tk->tp.call.name);
331         kfree(tk->symbol);
332         kfree(tk);
333 }
334
335 static struct trace_kprobe *find_trace_kprobe(const char *event,
336                                               const char *group)
337 {
338         struct trace_kprobe *tk;
339
340         list_for_each_entry(tk, &probe_list, list)
341                 if (strcmp(tk->tp.call.name, event) == 0 &&
342                     strcmp(tk->tp.call.class->system, group) == 0)
343                         return tk;
344         return NULL;
345 }
346
347 /*
348  * Enable trace_probe
349  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
350  */
351 static int
352 enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
353 {
354         int ret = 0;
355
356         if (file) {
357                 struct event_file_link *link;
358
359                 link = kmalloc(sizeof(*link), GFP_KERNEL);
360                 if (!link) {
361                         ret = -ENOMEM;
362                         goto out;
363                 }
364
365                 link->file = file;
366                 list_add_tail_rcu(&link->list, &tk->tp.files);
367
368                 tk->tp.flags |= TP_FLAG_TRACE;
369         } else
370                 tk->tp.flags |= TP_FLAG_PROFILE;
371
372         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
373                 if (trace_kprobe_is_return(tk))
374                         ret = enable_kretprobe(&tk->rp);
375                 else
376                         ret = enable_kprobe(&tk->rp.kp);
377         }
378  out:
379         return ret;
380 }
381
382 static struct event_file_link *
383 find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
384 {
385         struct event_file_link *link;
386
387         list_for_each_entry(link, &tp->files, list)
388                 if (link->file == file)
389                         return link;
390
391         return NULL;
392 }
393
394 /*
395  * Disable trace_probe
396  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
397  */
398 static int
399 disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
400 {
401         struct event_file_link *link = NULL;
402         int wait = 0;
403         int ret = 0;
404
405         if (file) {
406                 link = find_event_file_link(&tk->tp, file);
407                 if (!link) {
408                         ret = -EINVAL;
409                         goto out;
410                 }
411
412                 list_del_rcu(&link->list);
413                 wait = 1;
414                 if (!list_empty(&tk->tp.files))
415                         goto out;
416
417                 tk->tp.flags &= ~TP_FLAG_TRACE;
418         } else
419                 tk->tp.flags &= ~TP_FLAG_PROFILE;
420
421         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
422                 if (trace_kprobe_is_return(tk))
423                         disable_kretprobe(&tk->rp);
424                 else
425                         disable_kprobe(&tk->rp.kp);
426                 wait = 1;
427         }
428  out:
429         if (wait) {
430                 /*
431                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
432                  * to ensure disabled (all running handlers are finished).
433                  * This is not only for kfree(), but also the caller,
434                  * trace_remove_event_call() supposes it for releasing
435                  * event_call related objects, which will be accessed in
436                  * the kprobe_trace_func/kretprobe_trace_func.
437                  */
438                 synchronize_sched();
439                 kfree(link);    /* Ignored if link == NULL */
440         }
441
442         return ret;
443 }
444
445 /* Internal register function - just handle k*probes and flags */
446 static int __register_trace_kprobe(struct trace_kprobe *tk)
447 {
448         int i, ret;
449
450         if (trace_probe_is_registered(&tk->tp))
451                 return -EINVAL;
452
453         for (i = 0; i < tk->tp.nr_args; i++)
454                 traceprobe_update_arg(&tk->tp.args[i]);
455
456         /* Set/clear disabled flag according to tp->flag */
457         if (trace_probe_is_enabled(&tk->tp))
458                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
459         else
460                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
461
462         if (trace_kprobe_is_return(tk))
463                 ret = register_kretprobe(&tk->rp);
464         else
465                 ret = register_kprobe(&tk->rp.kp);
466
467         if (ret == 0)
468                 tk->tp.flags |= TP_FLAG_REGISTERED;
469         else {
470                 pr_warning("Could not insert probe at %s+%lu: %d\n",
471                            trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
472                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
473                         pr_warning("This probe might be able to register after"
474                                    "target module is loaded. Continue.\n");
475                         ret = 0;
476                 } else if (ret == -EILSEQ) {
477                         pr_warning("Probing address(0x%p) is not an "
478                                    "instruction boundary.\n",
479                                    tk->rp.kp.addr);
480                         ret = -EINVAL;
481                 }
482         }
483
484         return ret;
485 }
486
487 /* Internal unregister function - just handle k*probes and flags */
488 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
489 {
490         if (trace_probe_is_registered(&tk->tp)) {
491                 if (trace_kprobe_is_return(tk))
492                         unregister_kretprobe(&tk->rp);
493                 else
494                         unregister_kprobe(&tk->rp.kp);
495                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
496                 /* Cleanup kprobe for reuse */
497                 if (tk->rp.kp.symbol_name)
498                         tk->rp.kp.addr = NULL;
499         }
500 }
501
502 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
503 static int unregister_trace_kprobe(struct trace_kprobe *tk)
504 {
505         /* Enabled event can not be unregistered */
506         if (trace_probe_is_enabled(&tk->tp))
507                 return -EBUSY;
508
509         /* Will fail if probe is being used by ftrace or perf */
510         if (unregister_kprobe_event(tk))
511                 return -EBUSY;
512
513         __unregister_trace_kprobe(tk);
514         list_del(&tk->list);
515
516         return 0;
517 }
518
519 /* Register a trace_probe and probe_event */
520 static int register_trace_kprobe(struct trace_kprobe *tk)
521 {
522         struct trace_kprobe *old_tk;
523         int ret;
524
525         mutex_lock(&probe_lock);
526
527         /* Delete old (same name) event if exist */
528         old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
529         if (old_tk) {
530                 ret = unregister_trace_kprobe(old_tk);
531                 if (ret < 0)
532                         goto end;
533                 free_trace_kprobe(old_tk);
534         }
535
536         /* Register new event */
537         ret = register_kprobe_event(tk);
538         if (ret) {
539                 pr_warning("Failed to register probe event(%d)\n", ret);
540                 goto end;
541         }
542
543         /* Register k*probe */
544         ret = __register_trace_kprobe(tk);
545         if (ret < 0)
546                 unregister_kprobe_event(tk);
547         else
548                 list_add_tail(&tk->list, &probe_list);
549
550 end:
551         mutex_unlock(&probe_lock);
552         return ret;
553 }
554
555 /* Module notifier call back, checking event on the module */
556 static int trace_kprobe_module_callback(struct notifier_block *nb,
557                                        unsigned long val, void *data)
558 {
559         struct module *mod = data;
560         struct trace_kprobe *tk;
561         int ret;
562
563         if (val != MODULE_STATE_COMING)
564                 return NOTIFY_DONE;
565
566         /* Update probes on coming module */
567         mutex_lock(&probe_lock);
568         list_for_each_entry(tk, &probe_list, list) {
569                 if (trace_kprobe_within_module(tk, mod)) {
570                         /* Don't need to check busy - this should have gone. */
571                         __unregister_trace_kprobe(tk);
572                         ret = __register_trace_kprobe(tk);
573                         if (ret)
574                                 pr_warning("Failed to re-register probe %s on"
575                                            "%s: %d\n",
576                                            tk->tp.call.name, mod->name, ret);
577                 }
578         }
579         mutex_unlock(&probe_lock);
580
581         return NOTIFY_DONE;
582 }
583
584 static struct notifier_block trace_kprobe_module_nb = {
585         .notifier_call = trace_kprobe_module_callback,
586         .priority = 1   /* Invoked after kprobe module callback */
587 };
588
589 static int create_trace_kprobe(int argc, char **argv)
590 {
591         /*
592          * Argument syntax:
593          *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
594          *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
595          * Fetch args:
596          *  $retval     : fetch return value
597          *  $stack      : fetch stack address
598          *  $stackN     : fetch Nth of stack (N:0-)
599          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
600          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
601          *  %REG        : fetch register REG
602          * Dereferencing memory fetch:
603          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
604          * Alias name of args:
605          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
606          * Type of args:
607          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
608          */
609         struct trace_kprobe *tk;
610         int i, ret = 0;
611         bool is_return = false, is_delete = false;
612         char *symbol = NULL, *event = NULL, *group = NULL;
613         char *arg;
614         unsigned long offset = 0;
615         void *addr = NULL;
616         char buf[MAX_EVENT_NAME_LEN];
617
618         /* argc must be >= 1 */
619         if (argv[0][0] == 'p')
620                 is_return = false;
621         else if (argv[0][0] == 'r')
622                 is_return = true;
623         else if (argv[0][0] == '-')
624                 is_delete = true;
625         else {
626                 pr_info("Probe definition must be started with 'p', 'r' or"
627                         " '-'.\n");
628                 return -EINVAL;
629         }
630
631         if (argv[0][1] == ':') {
632                 event = &argv[0][2];
633                 if (strchr(event, '/')) {
634                         group = event;
635                         event = strchr(group, '/') + 1;
636                         event[-1] = '\0';
637                         if (strlen(group) == 0) {
638                                 pr_info("Group name is not specified\n");
639                                 return -EINVAL;
640                         }
641                 }
642                 if (strlen(event) == 0) {
643                         pr_info("Event name is not specified\n");
644                         return -EINVAL;
645                 }
646         }
647         if (!group)
648                 group = KPROBE_EVENT_SYSTEM;
649
650         if (is_delete) {
651                 if (!event) {
652                         pr_info("Delete command needs an event name.\n");
653                         return -EINVAL;
654                 }
655                 mutex_lock(&probe_lock);
656                 tk = find_trace_kprobe(event, group);
657                 if (!tk) {
658                         mutex_unlock(&probe_lock);
659                         pr_info("Event %s/%s doesn't exist.\n", group, event);
660                         return -ENOENT;
661                 }
662                 /* delete an event */
663                 ret = unregister_trace_kprobe(tk);
664                 if (ret == 0)
665                         free_trace_kprobe(tk);
666                 mutex_unlock(&probe_lock);
667                 return ret;
668         }
669
670         if (argc < 2) {
671                 pr_info("Probe point is not specified.\n");
672                 return -EINVAL;
673         }
674         if (isdigit(argv[1][0])) {
675                 if (is_return) {
676                         pr_info("Return probe point must be a symbol.\n");
677                         return -EINVAL;
678                 }
679                 /* an address specified */
680                 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
681                 if (ret) {
682                         pr_info("Failed to parse address.\n");
683                         return ret;
684                 }
685         } else {
686                 /* a symbol specified */
687                 symbol = argv[1];
688                 /* TODO: support .init module functions */
689                 ret = traceprobe_split_symbol_offset(symbol, &offset);
690                 if (ret) {
691                         pr_info("Failed to parse symbol.\n");
692                         return ret;
693                 }
694                 if (offset && is_return) {
695                         pr_info("Return probe must be used without offset.\n");
696                         return -EINVAL;
697                 }
698         }
699         argc -= 2; argv += 2;
700
701         /* setup a probe */
702         if (!event) {
703                 /* Make a new event name */
704                 if (symbol)
705                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
706                                  is_return ? 'r' : 'p', symbol, offset);
707                 else
708                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
709                                  is_return ? 'r' : 'p', addr);
710                 event = buf;
711         }
712         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
713                                is_return);
714         if (IS_ERR(tk)) {
715                 pr_info("Failed to allocate trace_probe.(%d)\n",
716                         (int)PTR_ERR(tk));
717                 return PTR_ERR(tk);
718         }
719
720         /* parse arguments */
721         ret = 0;
722         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
723                 struct probe_arg *parg = &tk->tp.args[i];
724
725                 /* Increment count for freeing args in error case */
726                 tk->tp.nr_args++;
727
728                 /* Parse argument name */
729                 arg = strchr(argv[i], '=');
730                 if (arg) {
731                         *arg++ = '\0';
732                         parg->name = kstrdup(argv[i], GFP_KERNEL);
733                 } else {
734                         arg = argv[i];
735                         /* If argument name is omitted, set "argN" */
736                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
737                         parg->name = kstrdup(buf, GFP_KERNEL);
738                 }
739
740                 if (!parg->name) {
741                         pr_info("Failed to allocate argument[%d] name.\n", i);
742                         ret = -ENOMEM;
743                         goto error;
744                 }
745
746                 if (!is_good_name(parg->name)) {
747                         pr_info("Invalid argument[%d] name: %s\n",
748                                 i, parg->name);
749                         ret = -EINVAL;
750                         goto error;
751                 }
752
753                 if (traceprobe_conflict_field_name(parg->name,
754                                                         tk->tp.args, i)) {
755                         pr_info("Argument[%d] name '%s' conflicts with "
756                                 "another field.\n", i, argv[i]);
757                         ret = -EINVAL;
758                         goto error;
759                 }
760
761                 /* Parse fetch argument */
762                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
763                                                 is_return, true);
764                 if (ret) {
765                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
766                         goto error;
767                 }
768         }
769
770         ret = register_trace_kprobe(tk);
771         if (ret)
772                 goto error;
773         return 0;
774
775 error:
776         free_trace_kprobe(tk);
777         return ret;
778 }
779
780 static int release_all_trace_kprobes(void)
781 {
782         struct trace_kprobe *tk;
783         int ret = 0;
784
785         mutex_lock(&probe_lock);
786         /* Ensure no probe is in use. */
787         list_for_each_entry(tk, &probe_list, list)
788                 if (trace_probe_is_enabled(&tk->tp)) {
789                         ret = -EBUSY;
790                         goto end;
791                 }
792         /* TODO: Use batch unregistration */
793         while (!list_empty(&probe_list)) {
794                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
795                 ret = unregister_trace_kprobe(tk);
796                 if (ret)
797                         goto end;
798                 free_trace_kprobe(tk);
799         }
800
801 end:
802         mutex_unlock(&probe_lock);
803
804         return ret;
805 }
806
807 /* Probes listing interfaces */
808 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
809 {
810         mutex_lock(&probe_lock);
811         return seq_list_start(&probe_list, *pos);
812 }
813
814 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
815 {
816         return seq_list_next(v, &probe_list, pos);
817 }
818
819 static void probes_seq_stop(struct seq_file *m, void *v)
820 {
821         mutex_unlock(&probe_lock);
822 }
823
824 static int probes_seq_show(struct seq_file *m, void *v)
825 {
826         struct trace_kprobe *tk = v;
827         int i;
828
829         seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
830         seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
831
832         if (!tk->symbol)
833                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
834         else if (tk->rp.kp.offset)
835                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
836                            tk->rp.kp.offset);
837         else
838                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
839
840         for (i = 0; i < tk->tp.nr_args; i++)
841                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
842         seq_printf(m, "\n");
843
844         return 0;
845 }
846
847 static const struct seq_operations probes_seq_op = {
848         .start  = probes_seq_start,
849         .next   = probes_seq_next,
850         .stop   = probes_seq_stop,
851         .show   = probes_seq_show
852 };
853
854 static int probes_open(struct inode *inode, struct file *file)
855 {
856         int ret;
857
858         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
859                 ret = release_all_trace_kprobes();
860                 if (ret < 0)
861                         return ret;
862         }
863
864         return seq_open(file, &probes_seq_op);
865 }
866
867 static ssize_t probes_write(struct file *file, const char __user *buffer,
868                             size_t count, loff_t *ppos)
869 {
870         return traceprobe_probes_write(file, buffer, count, ppos,
871                         create_trace_kprobe);
872 }
873
874 static const struct file_operations kprobe_events_ops = {
875         .owner          = THIS_MODULE,
876         .open           = probes_open,
877         .read           = seq_read,
878         .llseek         = seq_lseek,
879         .release        = seq_release,
880         .write          = probes_write,
881 };
882
883 /* Probes profiling interfaces */
884 static int probes_profile_seq_show(struct seq_file *m, void *v)
885 {
886         struct trace_kprobe *tk = v;
887
888         seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
889                    tk->rp.kp.nmissed);
890
891         return 0;
892 }
893
894 static const struct seq_operations profile_seq_op = {
895         .start  = probes_seq_start,
896         .next   = probes_seq_next,
897         .stop   = probes_seq_stop,
898         .show   = probes_profile_seq_show
899 };
900
901 static int profile_open(struct inode *inode, struct file *file)
902 {
903         return seq_open(file, &profile_seq_op);
904 }
905
906 static const struct file_operations kprobe_profile_ops = {
907         .owner          = THIS_MODULE,
908         .open           = profile_open,
909         .read           = seq_read,
910         .llseek         = seq_lseek,
911         .release        = seq_release,
912 };
913
914 /* Kprobe handler */
915 static __kprobes void
916 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
917                     struct ftrace_event_file *ftrace_file)
918 {
919         struct kprobe_trace_entry_head *entry;
920         struct ring_buffer_event *event;
921         struct ring_buffer *buffer;
922         int size, dsize, pc;
923         unsigned long irq_flags;
924         struct ftrace_event_call *call = &tk->tp.call;
925
926         WARN_ON(call != ftrace_file->event_call);
927
928         if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
929                 return;
930
931         local_save_flags(irq_flags);
932         pc = preempt_count();
933
934         dsize = __get_data_size(&tk->tp, regs);
935         size = sizeof(*entry) + tk->tp.size + dsize;
936
937         event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
938                                                 call->event.type,
939                                                 size, irq_flags, pc);
940         if (!event)
941                 return;
942
943         entry = ring_buffer_event_data(event);
944         entry->ip = (unsigned long)tk->rp.kp.addr;
945         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
946
947         if (!filter_check_discard(ftrace_file, entry, buffer, event))
948                 trace_buffer_unlock_commit_regs(buffer, event,
949                                                 irq_flags, pc, regs);
950 }
951
952 static __kprobes void
953 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
954 {
955         struct event_file_link *link;
956
957         list_for_each_entry_rcu(link, &tk->tp.files, list)
958                 __kprobe_trace_func(tk, regs, link->file);
959 }
960
961 /* Kretprobe handler */
962 static __kprobes void
963 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
964                        struct pt_regs *regs,
965                        struct ftrace_event_file *ftrace_file)
966 {
967         struct kretprobe_trace_entry_head *entry;
968         struct ring_buffer_event *event;
969         struct ring_buffer *buffer;
970         int size, pc, dsize;
971         unsigned long irq_flags;
972         struct ftrace_event_call *call = &tk->tp.call;
973
974         WARN_ON(call != ftrace_file->event_call);
975
976         if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
977                 return;
978
979         local_save_flags(irq_flags);
980         pc = preempt_count();
981
982         dsize = __get_data_size(&tk->tp, regs);
983         size = sizeof(*entry) + tk->tp.size + dsize;
984
985         event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
986                                                 call->event.type,
987                                                 size, irq_flags, pc);
988         if (!event)
989                 return;
990
991         entry = ring_buffer_event_data(event);
992         entry->func = (unsigned long)tk->rp.kp.addr;
993         entry->ret_ip = (unsigned long)ri->ret_addr;
994         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
995
996         if (!filter_check_discard(ftrace_file, entry, buffer, event))
997                 trace_buffer_unlock_commit_regs(buffer, event,
998                                                 irq_flags, pc, regs);
999 }
1000
1001 static __kprobes void
1002 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1003                      struct pt_regs *regs)
1004 {
1005         struct event_file_link *link;
1006
1007         list_for_each_entry_rcu(link, &tk->tp.files, list)
1008                 __kretprobe_trace_func(tk, ri, regs, link->file);
1009 }
1010
1011 /* Event entry printers */
1012 static enum print_line_t
1013 print_kprobe_event(struct trace_iterator *iter, int flags,
1014                    struct trace_event *event)
1015 {
1016         struct kprobe_trace_entry_head *field;
1017         struct trace_seq *s = &iter->seq;
1018         struct trace_probe *tp;
1019         u8 *data;
1020         int i;
1021
1022         field = (struct kprobe_trace_entry_head *)iter->ent;
1023         tp = container_of(event, struct trace_probe, call.event);
1024
1025         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1026                 goto partial;
1027
1028         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1029                 goto partial;
1030
1031         if (!trace_seq_puts(s, ")"))
1032                 goto partial;
1033
1034         data = (u8 *)&field[1];
1035         for (i = 0; i < tp->nr_args; i++)
1036                 if (!tp->args[i].type->print(s, tp->args[i].name,
1037                                              data + tp->args[i].offset, field))
1038                         goto partial;
1039
1040         if (!trace_seq_puts(s, "\n"))
1041                 goto partial;
1042
1043         return TRACE_TYPE_HANDLED;
1044 partial:
1045         return TRACE_TYPE_PARTIAL_LINE;
1046 }
1047
1048 static enum print_line_t
1049 print_kretprobe_event(struct trace_iterator *iter, int flags,
1050                       struct trace_event *event)
1051 {
1052         struct kretprobe_trace_entry_head *field;
1053         struct trace_seq *s = &iter->seq;
1054         struct trace_probe *tp;
1055         u8 *data;
1056         int i;
1057
1058         field = (struct kretprobe_trace_entry_head *)iter->ent;
1059         tp = container_of(event, struct trace_probe, call.event);
1060
1061         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1062                 goto partial;
1063
1064         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1065                 goto partial;
1066
1067         if (!trace_seq_puts(s, " <- "))
1068                 goto partial;
1069
1070         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1071                 goto partial;
1072
1073         if (!trace_seq_puts(s, ")"))
1074                 goto partial;
1075
1076         data = (u8 *)&field[1];
1077         for (i = 0; i < tp->nr_args; i++)
1078                 if (!tp->args[i].type->print(s, tp->args[i].name,
1079                                              data + tp->args[i].offset, field))
1080                         goto partial;
1081
1082         if (!trace_seq_puts(s, "\n"))
1083                 goto partial;
1084
1085         return TRACE_TYPE_HANDLED;
1086 partial:
1087         return TRACE_TYPE_PARTIAL_LINE;
1088 }
1089
1090
1091 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1092 {
1093         int ret, i;
1094         struct kprobe_trace_entry_head field;
1095         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1096
1097         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1098         /* Set argument names as fields */
1099         for (i = 0; i < tk->tp.nr_args; i++) {
1100                 struct probe_arg *parg = &tk->tp.args[i];
1101
1102                 ret = trace_define_field(event_call, parg->type->fmttype,
1103                                          parg->name,
1104                                          sizeof(field) + parg->offset,
1105                                          parg->type->size,
1106                                          parg->type->is_signed,
1107                                          FILTER_OTHER);
1108                 if (ret)
1109                         return ret;
1110         }
1111         return 0;
1112 }
1113
1114 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1115 {
1116         int ret, i;
1117         struct kretprobe_trace_entry_head field;
1118         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1119
1120         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1121         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1122         /* Set argument names as fields */
1123         for (i = 0; i < tk->tp.nr_args; i++) {
1124                 struct probe_arg *parg = &tk->tp.args[i];
1125
1126                 ret = trace_define_field(event_call, parg->type->fmttype,
1127                                          parg->name,
1128                                          sizeof(field) + parg->offset,
1129                                          parg->type->size,
1130                                          parg->type->is_signed,
1131                                          FILTER_OTHER);
1132                 if (ret)
1133                         return ret;
1134         }
1135         return 0;
1136 }
1137
1138 #ifdef CONFIG_PERF_EVENTS
1139
1140 /* Kprobe profile handler */
1141 static __kprobes void
1142 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1143 {
1144         struct ftrace_event_call *call = &tk->tp.call;
1145         struct kprobe_trace_entry_head *entry;
1146         struct hlist_head *head;
1147         int size, __size, dsize;
1148         int rctx;
1149
1150         head = this_cpu_ptr(call->perf_events);
1151         if (hlist_empty(head))
1152                 return;
1153
1154         dsize = __get_data_size(&tk->tp, regs);
1155         __size = sizeof(*entry) + tk->tp.size + dsize;
1156         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1157         size -= sizeof(u32);
1158
1159         entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1160         if (!entry)
1161                 return;
1162
1163         entry->ip = (unsigned long)tk->rp.kp.addr;
1164         memset(&entry[1], 0, dsize);
1165         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1166         perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1167 }
1168
1169 /* Kretprobe profile handler */
1170 static __kprobes void
1171 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1172                     struct pt_regs *regs)
1173 {
1174         struct ftrace_event_call *call = &tk->tp.call;
1175         struct kretprobe_trace_entry_head *entry;
1176         struct hlist_head *head;
1177         int size, __size, dsize;
1178         int rctx;
1179
1180         head = this_cpu_ptr(call->perf_events);
1181         if (hlist_empty(head))
1182                 return;
1183
1184         dsize = __get_data_size(&tk->tp, regs);
1185         __size = sizeof(*entry) + tk->tp.size + dsize;
1186         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1187         size -= sizeof(u32);
1188
1189         entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1190         if (!entry)
1191                 return;
1192
1193         entry->func = (unsigned long)tk->rp.kp.addr;
1194         entry->ret_ip = (unsigned long)ri->ret_addr;
1195         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1196         perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1197 }
1198 #endif  /* CONFIG_PERF_EVENTS */
1199
1200 /*
1201  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1202  *
1203  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1204  * lockless, but we can't race with this __init function.
1205  */
1206 static __kprobes
1207 int kprobe_register(struct ftrace_event_call *event,
1208                     enum trace_reg type, void *data)
1209 {
1210         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1211         struct ftrace_event_file *file = data;
1212
1213         switch (type) {
1214         case TRACE_REG_REGISTER:
1215                 return enable_trace_kprobe(tk, file);
1216         case TRACE_REG_UNREGISTER:
1217                 return disable_trace_kprobe(tk, file);
1218
1219 #ifdef CONFIG_PERF_EVENTS
1220         case TRACE_REG_PERF_REGISTER:
1221                 return enable_trace_kprobe(tk, NULL);
1222         case TRACE_REG_PERF_UNREGISTER:
1223                 return disable_trace_kprobe(tk, NULL);
1224         case TRACE_REG_PERF_OPEN:
1225         case TRACE_REG_PERF_CLOSE:
1226         case TRACE_REG_PERF_ADD:
1227         case TRACE_REG_PERF_DEL:
1228                 return 0;
1229 #endif
1230         }
1231         return 0;
1232 }
1233
1234 static __kprobes
1235 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1236 {
1237         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1238
1239         tk->nhit++;
1240
1241         if (tk->tp.flags & TP_FLAG_TRACE)
1242                 kprobe_trace_func(tk, regs);
1243 #ifdef CONFIG_PERF_EVENTS
1244         if (tk->tp.flags & TP_FLAG_PROFILE)
1245                 kprobe_perf_func(tk, regs);
1246 #endif
1247         return 0;       /* We don't tweek kernel, so just return 0 */
1248 }
1249
1250 static __kprobes
1251 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1252 {
1253         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1254
1255         tk->nhit++;
1256
1257         if (tk->tp.flags & TP_FLAG_TRACE)
1258                 kretprobe_trace_func(tk, ri, regs);
1259 #ifdef CONFIG_PERF_EVENTS
1260         if (tk->tp.flags & TP_FLAG_PROFILE)
1261                 kretprobe_perf_func(tk, ri, regs);
1262 #endif
1263         return 0;       /* We don't tweek kernel, so just return 0 */
1264 }
1265
1266 static struct trace_event_functions kretprobe_funcs = {
1267         .trace          = print_kretprobe_event
1268 };
1269
1270 static struct trace_event_functions kprobe_funcs = {
1271         .trace          = print_kprobe_event
1272 };
1273
1274 static int register_kprobe_event(struct trace_kprobe *tk)
1275 {
1276         struct ftrace_event_call *call = &tk->tp.call;
1277         int ret;
1278
1279         /* Initialize ftrace_event_call */
1280         INIT_LIST_HEAD(&call->class->fields);
1281         if (trace_kprobe_is_return(tk)) {
1282                 call->event.funcs = &kretprobe_funcs;
1283                 call->class->define_fields = kretprobe_event_define_fields;
1284         } else {
1285                 call->event.funcs = &kprobe_funcs;
1286                 call->class->define_fields = kprobe_event_define_fields;
1287         }
1288         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1289                 return -ENOMEM;
1290         ret = register_ftrace_event(&call->event);
1291         if (!ret) {
1292                 kfree(call->print_fmt);
1293                 return -ENODEV;
1294         }
1295         call->flags = 0;
1296         call->class->reg = kprobe_register;
1297         call->data = tk;
1298         ret = trace_add_event_call(call);
1299         if (ret) {
1300                 pr_info("Failed to register kprobe event: %s\n", call->name);
1301                 kfree(call->print_fmt);
1302                 unregister_ftrace_event(&call->event);
1303         }
1304         return ret;
1305 }
1306
1307 static int unregister_kprobe_event(struct trace_kprobe *tk)
1308 {
1309         int ret;
1310
1311         /* tp->event is unregistered in trace_remove_event_call() */
1312         ret = trace_remove_event_call(&tk->tp.call);
1313         if (!ret)
1314                 kfree(tk->tp.call.print_fmt);
1315         return ret;
1316 }
1317
1318 /* Make a debugfs interface for controlling probe points */
1319 static __init int init_kprobe_trace(void)
1320 {
1321         struct dentry *d_tracer;
1322         struct dentry *entry;
1323
1324         if (register_module_notifier(&trace_kprobe_module_nb))
1325                 return -EINVAL;
1326
1327         d_tracer = tracing_init_dentry();
1328         if (!d_tracer)
1329                 return 0;
1330
1331         entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1332                                     NULL, &kprobe_events_ops);
1333
1334         /* Event list interface */
1335         if (!entry)
1336                 pr_warning("Could not create debugfs "
1337                            "'kprobe_events' entry\n");
1338
1339         /* Profile interface */
1340         entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1341                                     NULL, &kprobe_profile_ops);
1342
1343         if (!entry)
1344                 pr_warning("Could not create debugfs "
1345                            "'kprobe_profile' entry\n");
1346         return 0;
1347 }
1348 fs_initcall(init_kprobe_trace);
1349
1350
1351 #ifdef CONFIG_FTRACE_STARTUP_TEST
1352
1353 /*
1354  * The "__used" keeps gcc from removing the function symbol
1355  * from the kallsyms table.
1356  */
1357 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1358                                                int a4, int a5, int a6)
1359 {
1360         return a1 + a2 + a3 + a4 + a5 + a6;
1361 }
1362
1363 static struct ftrace_event_file *
1364 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1365 {
1366         struct ftrace_event_file *file;
1367
1368         list_for_each_entry(file, &tr->events, list)
1369                 if (file->event_call == &tk->tp.call)
1370                         return file;
1371
1372         return NULL;
1373 }
1374
1375 /*
1376  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1377  * stage, we can do this lockless.
1378  */
1379 static __init int kprobe_trace_self_tests_init(void)
1380 {
1381         int ret, warn = 0;
1382         int (*target)(int, int, int, int, int, int);
1383         struct trace_kprobe *tk;
1384         struct ftrace_event_file *file;
1385
1386         target = kprobe_trace_selftest_target;
1387
1388         pr_info("Testing kprobe tracing: ");
1389
1390         ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1391                                   "$stack $stack0 +0($stack)",
1392                                   create_trace_kprobe);
1393         if (WARN_ON_ONCE(ret)) {
1394                 pr_warn("error on probing function entry.\n");
1395                 warn++;
1396         } else {
1397                 /* Enable trace point */
1398                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1399                 if (WARN_ON_ONCE(tk == NULL)) {
1400                         pr_warn("error on getting new probe.\n");
1401                         warn++;
1402                 } else {
1403                         file = find_trace_probe_file(tk, top_trace_array());
1404                         if (WARN_ON_ONCE(file == NULL)) {
1405                                 pr_warn("error on getting probe file.\n");
1406                                 warn++;
1407                         } else
1408                                 enable_trace_kprobe(tk, file);
1409                 }
1410         }
1411
1412         ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1413                                   "$retval", create_trace_kprobe);
1414         if (WARN_ON_ONCE(ret)) {
1415                 pr_warn("error on probing function return.\n");
1416                 warn++;
1417         } else {
1418                 /* Enable trace point */
1419                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1420                 if (WARN_ON_ONCE(tk == NULL)) {
1421                         pr_warn("error on getting 2nd new probe.\n");
1422                         warn++;
1423                 } else {
1424                         file = find_trace_probe_file(tk, top_trace_array());
1425                         if (WARN_ON_ONCE(file == NULL)) {
1426                                 pr_warn("error on getting probe file.\n");
1427                                 warn++;
1428                         } else
1429                                 enable_trace_kprobe(tk, file);
1430                 }
1431         }
1432
1433         if (warn)
1434                 goto end;
1435
1436         ret = target(1, 2, 3, 4, 5, 6);
1437
1438         /* Disable trace points before removing it */
1439         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1440         if (WARN_ON_ONCE(tk == NULL)) {
1441                 pr_warn("error on getting test probe.\n");
1442                 warn++;
1443         } else {
1444                 file = find_trace_probe_file(tk, top_trace_array());
1445                 if (WARN_ON_ONCE(file == NULL)) {
1446                         pr_warn("error on getting probe file.\n");
1447                         warn++;
1448                 } else
1449                         disable_trace_kprobe(tk, file);
1450         }
1451
1452         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1453         if (WARN_ON_ONCE(tk == NULL)) {
1454                 pr_warn("error on getting 2nd test probe.\n");
1455                 warn++;
1456         } else {
1457                 file = find_trace_probe_file(tk, top_trace_array());
1458                 if (WARN_ON_ONCE(file == NULL)) {
1459                         pr_warn("error on getting probe file.\n");
1460                         warn++;
1461                 } else
1462                         disable_trace_kprobe(tk, file);
1463         }
1464
1465         ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1466         if (WARN_ON_ONCE(ret)) {
1467                 pr_warn("error on deleting a probe.\n");
1468                 warn++;
1469         }
1470
1471         ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1472         if (WARN_ON_ONCE(ret)) {
1473                 pr_warn("error on deleting a probe.\n");
1474                 warn++;
1475         }
1476
1477 end:
1478         release_all_trace_kprobes();
1479         if (warn)
1480                 pr_cont("NG: Some tests are failed. Please check them.\n");
1481         else
1482                 pr_cont("OK\n");
1483         return 0;
1484 }
1485
1486 late_initcall(kprobe_trace_self_tests_init);
1487
1488 #endif