tracing: Use kmem_cache_alloc instead of kmalloc in trace_events.c
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file)                        \
46         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
47                 list_for_each_entry(file, &tr->events, list)
48
49 #define do_for_each_event_file_safe(tr, file)                   \
50         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
51                 struct ftrace_event_file *___n;                         \
52                 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54 #define while_for_each_event_file()             \
55         }
56
57 struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
59 {
60         if (!event_call->class->get_fields)
61                 return &event_call->class->fields;
62         return event_call->class->get_fields(event_call);
63 }
64
65 static int __trace_define_field(struct list_head *head, const char *type,
66                                 const char *name, int offset, int size,
67                                 int is_signed, int filter_type)
68 {
69         struct ftrace_event_field *field;
70
71         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
72         if (!field)
73                 goto err;
74
75         field->name = kstrdup(name, GFP_KERNEL);
76         if (!field->name)
77                 goto err;
78
79         field->type = kstrdup(type, GFP_KERNEL);
80         if (!field->type)
81                 goto err;
82
83         if (filter_type == FILTER_OTHER)
84                 field->filter_type = filter_assign_type(type);
85         else
86                 field->filter_type = filter_type;
87
88         field->offset = offset;
89         field->size = size;
90         field->is_signed = is_signed;
91
92         list_add(&field->link, head);
93
94         return 0;
95
96 err:
97         if (field)
98                 kfree(field->name);
99         kmem_cache_free(field_cachep, field);
100
101         return -ENOMEM;
102 }
103
104 int trace_define_field(struct ftrace_event_call *call, const char *type,
105                        const char *name, int offset, int size, int is_signed,
106                        int filter_type)
107 {
108         struct list_head *head;
109
110         if (WARN_ON(!call->class))
111                 return 0;
112
113         head = trace_get_fields(call);
114         return __trace_define_field(head, type, name, offset, size,
115                                     is_signed, filter_type);
116 }
117 EXPORT_SYMBOL_GPL(trace_define_field);
118
119 #define __common_field(type, item)                                      \
120         ret = __trace_define_field(&ftrace_common_fields, #type,        \
121                                    "common_" #item,                     \
122                                    offsetof(typeof(ent), item),         \
123                                    sizeof(ent.item),                    \
124                                    is_signed_type(type), FILTER_OTHER); \
125         if (ret)                                                        \
126                 return ret;
127
128 static int trace_define_common_fields(void)
129 {
130         int ret;
131         struct trace_entry ent;
132
133         __common_field(unsigned short, type);
134         __common_field(unsigned char, flags);
135         __common_field(unsigned char, preempt_count);
136         __common_field(int, pid);
137
138         return ret;
139 }
140
141 void trace_destroy_fields(struct ftrace_event_call *call)
142 {
143         struct ftrace_event_field *field, *next;
144         struct list_head *head;
145
146         head = trace_get_fields(call);
147         list_for_each_entry_safe(field, next, head, link) {
148                 list_del(&field->link);
149                 kfree(field->type);
150                 kfree(field->name);
151                 kmem_cache_free(field_cachep, field);
152         }
153 }
154
155 int trace_event_raw_init(struct ftrace_event_call *call)
156 {
157         int id;
158
159         id = register_ftrace_event(&call->event);
160         if (!id)
161                 return -ENODEV;
162
163         return 0;
164 }
165 EXPORT_SYMBOL_GPL(trace_event_raw_init);
166
167 int ftrace_event_reg(struct ftrace_event_call *call,
168                      enum trace_reg type, void *data)
169 {
170         struct ftrace_event_file *file = data;
171
172         switch (type) {
173         case TRACE_REG_REGISTER:
174                 return tracepoint_probe_register(call->name,
175                                                  call->class->probe,
176                                                  file);
177         case TRACE_REG_UNREGISTER:
178                 tracepoint_probe_unregister(call->name,
179                                             call->class->probe,
180                                             file);
181                 return 0;
182
183 #ifdef CONFIG_PERF_EVENTS
184         case TRACE_REG_PERF_REGISTER:
185                 return tracepoint_probe_register(call->name,
186                                                  call->class->perf_probe,
187                                                  call);
188         case TRACE_REG_PERF_UNREGISTER:
189                 tracepoint_probe_unregister(call->name,
190                                             call->class->perf_probe,
191                                             call);
192                 return 0;
193         case TRACE_REG_PERF_OPEN:
194         case TRACE_REG_PERF_CLOSE:
195         case TRACE_REG_PERF_ADD:
196         case TRACE_REG_PERF_DEL:
197                 return 0;
198 #endif
199         }
200         return 0;
201 }
202 EXPORT_SYMBOL_GPL(ftrace_event_reg);
203
204 void trace_event_enable_cmd_record(bool enable)
205 {
206         struct ftrace_event_file *file;
207         struct trace_array *tr;
208
209         mutex_lock(&event_mutex);
210         do_for_each_event_file(tr, file) {
211
212                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
213                         continue;
214
215                 if (enable) {
216                         tracing_start_cmdline_record();
217                         file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
218                 } else {
219                         tracing_stop_cmdline_record();
220                         file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
221                 }
222         } while_for_each_event_file();
223         mutex_unlock(&event_mutex);
224 }
225
226 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
227                                        int enable)
228 {
229         struct ftrace_event_call *call = file->event_call;
230         int ret = 0;
231
232         switch (enable) {
233         case 0:
234                 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
235                         file->flags &= ~FTRACE_EVENT_FL_ENABLED;
236                         if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
237                                 tracing_stop_cmdline_record();
238                                 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
239                         }
240                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
241                 }
242                 break;
243         case 1:
244                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
245                         if (trace_flags & TRACE_ITER_RECORD_CMD) {
246                                 tracing_start_cmdline_record();
247                                 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
248                         }
249                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
250                         if (ret) {
251                                 tracing_stop_cmdline_record();
252                                 pr_info("event trace: Could not enable event "
253                                         "%s\n", call->name);
254                                 break;
255                         }
256                         file->flags |= FTRACE_EVENT_FL_ENABLED;
257                 }
258                 break;
259         }
260
261         return ret;
262 }
263
264 static void ftrace_clear_events(struct trace_array *tr)
265 {
266         struct ftrace_event_file *file;
267
268         mutex_lock(&event_mutex);
269         list_for_each_entry(file, &tr->events, list) {
270                 ftrace_event_enable_disable(file, 0);
271         }
272         mutex_unlock(&event_mutex);
273 }
274
275 static void __put_system(struct event_subsystem *system)
276 {
277         struct event_filter *filter = system->filter;
278
279         WARN_ON_ONCE(system->ref_count == 0);
280         if (--system->ref_count)
281                 return;
282
283         list_del(&system->list);
284
285         if (filter) {
286                 kfree(filter->filter_string);
287                 kfree(filter);
288         }
289         kfree(system->name);
290         kfree(system);
291 }
292
293 static void __get_system(struct event_subsystem *system)
294 {
295         WARN_ON_ONCE(system->ref_count == 0);
296         system->ref_count++;
297 }
298
299 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
300 {
301         WARN_ON_ONCE(dir->ref_count == 0);
302         dir->ref_count++;
303         __get_system(dir->subsystem);
304 }
305
306 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
307 {
308         WARN_ON_ONCE(dir->ref_count == 0);
309         /* If the subsystem is about to be freed, the dir must be too */
310         WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
311
312         __put_system(dir->subsystem);
313         if (!--dir->ref_count)
314                 kfree(dir);
315 }
316
317 static void put_system(struct ftrace_subsystem_dir *dir)
318 {
319         mutex_lock(&event_mutex);
320         __put_system_dir(dir);
321         mutex_unlock(&event_mutex);
322 }
323
324 /*
325  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
326  */
327 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
328                                   const char *sub, const char *event, int set)
329 {
330         struct ftrace_event_file *file;
331         struct ftrace_event_call *call;
332         int ret = -EINVAL;
333
334         mutex_lock(&event_mutex);
335         list_for_each_entry(file, &tr->events, list) {
336
337                 call = file->event_call;
338
339                 if (!call->name || !call->class || !call->class->reg)
340                         continue;
341
342                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
343                         continue;
344
345                 if (match &&
346                     strcmp(match, call->name) != 0 &&
347                     strcmp(match, call->class->system) != 0)
348                         continue;
349
350                 if (sub && strcmp(sub, call->class->system) != 0)
351                         continue;
352
353                 if (event && strcmp(event, call->name) != 0)
354                         continue;
355
356                 ftrace_event_enable_disable(file, set);
357
358                 ret = 0;
359         }
360         mutex_unlock(&event_mutex);
361
362         return ret;
363 }
364
365 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
366 {
367         char *event = NULL, *sub = NULL, *match;
368
369         /*
370          * The buf format can be <subsystem>:<event-name>
371          *  *:<event-name> means any event by that name.
372          *  :<event-name> is the same.
373          *
374          *  <subsystem>:* means all events in that subsystem
375          *  <subsystem>: means the same.
376          *
377          *  <name> (no ':') means all events in a subsystem with
378          *  the name <name> or any event that matches <name>
379          */
380
381         match = strsep(&buf, ":");
382         if (buf) {
383                 sub = match;
384                 event = buf;
385                 match = NULL;
386
387                 if (!strlen(sub) || strcmp(sub, "*") == 0)
388                         sub = NULL;
389                 if (!strlen(event) || strcmp(event, "*") == 0)
390                         event = NULL;
391         }
392
393         return __ftrace_set_clr_event(tr, match, sub, event, set);
394 }
395
396 /**
397  * trace_set_clr_event - enable or disable an event
398  * @system: system name to match (NULL for any system)
399  * @event: event name to match (NULL for all events, within system)
400  * @set: 1 to enable, 0 to disable
401  *
402  * This is a way for other parts of the kernel to enable or disable
403  * event recording.
404  *
405  * Returns 0 on success, -EINVAL if the parameters do not match any
406  * registered events.
407  */
408 int trace_set_clr_event(const char *system, const char *event, int set)
409 {
410         struct trace_array *tr = top_trace_array();
411
412         return __ftrace_set_clr_event(tr, NULL, system, event, set);
413 }
414 EXPORT_SYMBOL_GPL(trace_set_clr_event);
415
416 /* 128 should be much more than enough */
417 #define EVENT_BUF_SIZE          127
418
419 static ssize_t
420 ftrace_event_write(struct file *file, const char __user *ubuf,
421                    size_t cnt, loff_t *ppos)
422 {
423         struct trace_parser parser;
424         struct seq_file *m = file->private_data;
425         struct trace_array *tr = m->private;
426         ssize_t read, ret;
427
428         if (!cnt)
429                 return 0;
430
431         ret = tracing_update_buffers();
432         if (ret < 0)
433                 return ret;
434
435         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
436                 return -ENOMEM;
437
438         read = trace_get_user(&parser, ubuf, cnt, ppos);
439
440         if (read >= 0 && trace_parser_loaded((&parser))) {
441                 int set = 1;
442
443                 if (*parser.buffer == '!')
444                         set = 0;
445
446                 parser.buffer[parser.idx] = 0;
447
448                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
449                 if (ret)
450                         goto out_put;
451         }
452
453         ret = read;
454
455  out_put:
456         trace_parser_put(&parser);
457
458         return ret;
459 }
460
461 static void *
462 t_next(struct seq_file *m, void *v, loff_t *pos)
463 {
464         struct ftrace_event_file *file = v;
465         struct ftrace_event_call *call;
466         struct trace_array *tr = m->private;
467
468         (*pos)++;
469
470         list_for_each_entry_continue(file, &tr->events, list) {
471                 call = file->event_call;
472                 /*
473                  * The ftrace subsystem is for showing formats only.
474                  * They can not be enabled or disabled via the event files.
475                  */
476                 if (call->class && call->class->reg)
477                         return file;
478         }
479
480         return NULL;
481 }
482
483 static void *t_start(struct seq_file *m, loff_t *pos)
484 {
485         struct ftrace_event_file *file;
486         struct trace_array *tr = m->private;
487         loff_t l;
488
489         mutex_lock(&event_mutex);
490
491         file = list_entry(&tr->events, struct ftrace_event_file, list);
492         for (l = 0; l <= *pos; ) {
493                 file = t_next(m, file, &l);
494                 if (!file)
495                         break;
496         }
497         return file;
498 }
499
500 static void *
501 s_next(struct seq_file *m, void *v, loff_t *pos)
502 {
503         struct ftrace_event_file *file = v;
504         struct trace_array *tr = m->private;
505
506         (*pos)++;
507
508         list_for_each_entry_continue(file, &tr->events, list) {
509                 if (file->flags & FTRACE_EVENT_FL_ENABLED)
510                         return file;
511         }
512
513         return NULL;
514 }
515
516 static void *s_start(struct seq_file *m, loff_t *pos)
517 {
518         struct ftrace_event_file *file;
519         struct trace_array *tr = m->private;
520         loff_t l;
521
522         mutex_lock(&event_mutex);
523
524         file = list_entry(&tr->events, struct ftrace_event_file, list);
525         for (l = 0; l <= *pos; ) {
526                 file = s_next(m, file, &l);
527                 if (!file)
528                         break;
529         }
530         return file;
531 }
532
533 static int t_show(struct seq_file *m, void *v)
534 {
535         struct ftrace_event_file *file = v;
536         struct ftrace_event_call *call = file->event_call;
537
538         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
539                 seq_printf(m, "%s:", call->class->system);
540         seq_printf(m, "%s\n", call->name);
541
542         return 0;
543 }
544
545 static void t_stop(struct seq_file *m, void *p)
546 {
547         mutex_unlock(&event_mutex);
548 }
549
550 static ssize_t
551 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
552                   loff_t *ppos)
553 {
554         struct ftrace_event_file *file = filp->private_data;
555         char *buf;
556
557         if (file->flags & FTRACE_EVENT_FL_ENABLED)
558                 buf = "1\n";
559         else
560                 buf = "0\n";
561
562         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
563 }
564
565 static ssize_t
566 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
567                    loff_t *ppos)
568 {
569         struct ftrace_event_file *file = filp->private_data;
570         unsigned long val;
571         int ret;
572
573         if (!file)
574                 return -EINVAL;
575
576         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
577         if (ret)
578                 return ret;
579
580         ret = tracing_update_buffers();
581         if (ret < 0)
582                 return ret;
583
584         switch (val) {
585         case 0:
586         case 1:
587                 mutex_lock(&event_mutex);
588                 ret = ftrace_event_enable_disable(file, val);
589                 mutex_unlock(&event_mutex);
590                 break;
591
592         default:
593                 return -EINVAL;
594         }
595
596         *ppos += cnt;
597
598         return ret ? ret : cnt;
599 }
600
601 static ssize_t
602 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
603                    loff_t *ppos)
604 {
605         const char set_to_char[4] = { '?', '0', '1', 'X' };
606         struct ftrace_subsystem_dir *dir = filp->private_data;
607         struct event_subsystem *system = dir->subsystem;
608         struct ftrace_event_call *call;
609         struct ftrace_event_file *file;
610         struct trace_array *tr = dir->tr;
611         char buf[2];
612         int set = 0;
613         int ret;
614
615         mutex_lock(&event_mutex);
616         list_for_each_entry(file, &tr->events, list) {
617                 call = file->event_call;
618                 if (!call->name || !call->class || !call->class->reg)
619                         continue;
620
621                 if (system && strcmp(call->class->system, system->name) != 0)
622                         continue;
623
624                 /*
625                  * We need to find out if all the events are set
626                  * or if all events or cleared, or if we have
627                  * a mixture.
628                  */
629                 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
630
631                 /*
632                  * If we have a mixture, no need to look further.
633                  */
634                 if (set == 3)
635                         break;
636         }
637         mutex_unlock(&event_mutex);
638
639         buf[0] = set_to_char[set];
640         buf[1] = '\n';
641
642         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
643
644         return ret;
645 }
646
647 static ssize_t
648 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
649                     loff_t *ppos)
650 {
651         struct ftrace_subsystem_dir *dir = filp->private_data;
652         struct event_subsystem *system = dir->subsystem;
653         const char *name = NULL;
654         unsigned long val;
655         ssize_t ret;
656
657         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
658         if (ret)
659                 return ret;
660
661         ret = tracing_update_buffers();
662         if (ret < 0)
663                 return ret;
664
665         if (val != 0 && val != 1)
666                 return -EINVAL;
667
668         /*
669          * Opening of "enable" adds a ref count to system,
670          * so the name is safe to use.
671          */
672         if (system)
673                 name = system->name;
674
675         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
676         if (ret)
677                 goto out;
678
679         ret = cnt;
680
681 out:
682         *ppos += cnt;
683
684         return ret;
685 }
686
687 enum {
688         FORMAT_HEADER           = 1,
689         FORMAT_FIELD_SEPERATOR  = 2,
690         FORMAT_PRINTFMT         = 3,
691 };
692
693 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
694 {
695         struct ftrace_event_call *call = m->private;
696         struct ftrace_event_field *field;
697         struct list_head *common_head = &ftrace_common_fields;
698         struct list_head *head = trace_get_fields(call);
699
700         (*pos)++;
701
702         switch ((unsigned long)v) {
703         case FORMAT_HEADER:
704                 if (unlikely(list_empty(common_head)))
705                         return NULL;
706
707                 field = list_entry(common_head->prev,
708                                    struct ftrace_event_field, link);
709                 return field;
710
711         case FORMAT_FIELD_SEPERATOR:
712                 if (unlikely(list_empty(head)))
713                         return NULL;
714
715                 field = list_entry(head->prev, struct ftrace_event_field, link);
716                 return field;
717
718         case FORMAT_PRINTFMT:
719                 /* all done */
720                 return NULL;
721         }
722
723         field = v;
724         if (field->link.prev == common_head)
725                 return (void *)FORMAT_FIELD_SEPERATOR;
726         else if (field->link.prev == head)
727                 return (void *)FORMAT_PRINTFMT;
728
729         field = list_entry(field->link.prev, struct ftrace_event_field, link);
730
731         return field;
732 }
733
734 static void *f_start(struct seq_file *m, loff_t *pos)
735 {
736         loff_t l = 0;
737         void *p;
738
739         /* Start by showing the header */
740         if (!*pos)
741                 return (void *)FORMAT_HEADER;
742
743         p = (void *)FORMAT_HEADER;
744         do {
745                 p = f_next(m, p, &l);
746         } while (p && l < *pos);
747
748         return p;
749 }
750
751 static int f_show(struct seq_file *m, void *v)
752 {
753         struct ftrace_event_call *call = m->private;
754         struct ftrace_event_field *field;
755         const char *array_descriptor;
756
757         switch ((unsigned long)v) {
758         case FORMAT_HEADER:
759                 seq_printf(m, "name: %s\n", call->name);
760                 seq_printf(m, "ID: %d\n", call->event.type);
761                 seq_printf(m, "format:\n");
762                 return 0;
763
764         case FORMAT_FIELD_SEPERATOR:
765                 seq_putc(m, '\n');
766                 return 0;
767
768         case FORMAT_PRINTFMT:
769                 seq_printf(m, "\nprint fmt: %s\n",
770                            call->print_fmt);
771                 return 0;
772         }
773
774         field = v;
775
776         /*
777          * Smartly shows the array type(except dynamic array).
778          * Normal:
779          *      field:TYPE VAR
780          * If TYPE := TYPE[LEN], it is shown:
781          *      field:TYPE VAR[LEN]
782          */
783         array_descriptor = strchr(field->type, '[');
784
785         if (!strncmp(field->type, "__data_loc", 10))
786                 array_descriptor = NULL;
787
788         if (!array_descriptor)
789                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
790                            field->type, field->name, field->offset,
791                            field->size, !!field->is_signed);
792         else
793                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
794                            (int)(array_descriptor - field->type),
795                            field->type, field->name,
796                            array_descriptor, field->offset,
797                            field->size, !!field->is_signed);
798
799         return 0;
800 }
801
802 static void f_stop(struct seq_file *m, void *p)
803 {
804 }
805
806 static const struct seq_operations trace_format_seq_ops = {
807         .start          = f_start,
808         .next           = f_next,
809         .stop           = f_stop,
810         .show           = f_show,
811 };
812
813 static int trace_format_open(struct inode *inode, struct file *file)
814 {
815         struct ftrace_event_call *call = inode->i_private;
816         struct seq_file *m;
817         int ret;
818
819         ret = seq_open(file, &trace_format_seq_ops);
820         if (ret < 0)
821                 return ret;
822
823         m = file->private_data;
824         m->private = call;
825
826         return 0;
827 }
828
829 static ssize_t
830 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
831 {
832         struct ftrace_event_call *call = filp->private_data;
833         struct trace_seq *s;
834         int r;
835
836         if (*ppos)
837                 return 0;
838
839         s = kmalloc(sizeof(*s), GFP_KERNEL);
840         if (!s)
841                 return -ENOMEM;
842
843         trace_seq_init(s);
844         trace_seq_printf(s, "%d\n", call->event.type);
845
846         r = simple_read_from_buffer(ubuf, cnt, ppos,
847                                     s->buffer, s->len);
848         kfree(s);
849         return r;
850 }
851
852 static ssize_t
853 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
854                   loff_t *ppos)
855 {
856         struct ftrace_event_call *call = filp->private_data;
857         struct trace_seq *s;
858         int r;
859
860         if (*ppos)
861                 return 0;
862
863         s = kmalloc(sizeof(*s), GFP_KERNEL);
864         if (!s)
865                 return -ENOMEM;
866
867         trace_seq_init(s);
868
869         print_event_filter(call, s);
870         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
871
872         kfree(s);
873
874         return r;
875 }
876
877 static ssize_t
878 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
879                    loff_t *ppos)
880 {
881         struct ftrace_event_call *call = filp->private_data;
882         char *buf;
883         int err;
884
885         if (cnt >= PAGE_SIZE)
886                 return -EINVAL;
887
888         buf = (char *)__get_free_page(GFP_TEMPORARY);
889         if (!buf)
890                 return -ENOMEM;
891
892         if (copy_from_user(buf, ubuf, cnt)) {
893                 free_page((unsigned long) buf);
894                 return -EFAULT;
895         }
896         buf[cnt] = '\0';
897
898         err = apply_event_filter(call, buf);
899         free_page((unsigned long) buf);
900         if (err < 0)
901                 return err;
902
903         *ppos += cnt;
904
905         return cnt;
906 }
907
908 static LIST_HEAD(event_subsystems);
909
910 static int subsystem_open(struct inode *inode, struct file *filp)
911 {
912         struct event_subsystem *system = NULL;
913         struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
914         struct trace_array *tr;
915         int ret;
916
917         /* Make sure the system still exists */
918         mutex_lock(&event_mutex);
919         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
920                 list_for_each_entry(dir, &tr->systems, list) {
921                         if (dir == inode->i_private) {
922                                 /* Don't open systems with no events */
923                                 if (dir->nr_events) {
924                                         __get_system_dir(dir);
925                                         system = dir->subsystem;
926                                 }
927                                 goto exit_loop;
928                         }
929                 }
930         }
931  exit_loop:
932         mutex_unlock(&event_mutex);
933
934         if (!system)
935                 return -ENODEV;
936
937         /* Some versions of gcc think dir can be uninitialized here */
938         WARN_ON(!dir);
939
940         ret = tracing_open_generic(inode, filp);
941         if (ret < 0)
942                 put_system(dir);
943
944         return ret;
945 }
946
947 static int system_tr_open(struct inode *inode, struct file *filp)
948 {
949         struct ftrace_subsystem_dir *dir;
950         struct trace_array *tr = inode->i_private;
951         int ret;
952
953         /* Make a temporary dir that has no system but points to tr */
954         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
955         if (!dir)
956                 return -ENOMEM;
957
958         dir->tr = tr;
959
960         ret = tracing_open_generic(inode, filp);
961         if (ret < 0)
962                 kfree(dir);
963
964         filp->private_data = dir;
965
966         return ret;
967 }
968
969 static int subsystem_release(struct inode *inode, struct file *file)
970 {
971         struct ftrace_subsystem_dir *dir = file->private_data;
972
973         /*
974          * If dir->subsystem is NULL, then this is a temporary
975          * descriptor that was made for a trace_array to enable
976          * all subsystems.
977          */
978         if (dir->subsystem)
979                 put_system(dir);
980         else
981                 kfree(dir);
982
983         return 0;
984 }
985
986 static ssize_t
987 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
988                       loff_t *ppos)
989 {
990         struct ftrace_subsystem_dir *dir = filp->private_data;
991         struct event_subsystem *system = dir->subsystem;
992         struct trace_seq *s;
993         int r;
994
995         if (*ppos)
996                 return 0;
997
998         s = kmalloc(sizeof(*s), GFP_KERNEL);
999         if (!s)
1000                 return -ENOMEM;
1001
1002         trace_seq_init(s);
1003
1004         print_subsystem_event_filter(system, s);
1005         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1006
1007         kfree(s);
1008
1009         return r;
1010 }
1011
1012 static ssize_t
1013 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1014                        loff_t *ppos)
1015 {
1016         struct ftrace_subsystem_dir *dir = filp->private_data;
1017         char *buf;
1018         int err;
1019
1020         if (cnt >= PAGE_SIZE)
1021                 return -EINVAL;
1022
1023         buf = (char *)__get_free_page(GFP_TEMPORARY);
1024         if (!buf)
1025                 return -ENOMEM;
1026
1027         if (copy_from_user(buf, ubuf, cnt)) {
1028                 free_page((unsigned long) buf);
1029                 return -EFAULT;
1030         }
1031         buf[cnt] = '\0';
1032
1033         err = apply_subsystem_event_filter(dir, buf);
1034         free_page((unsigned long) buf);
1035         if (err < 0)
1036                 return err;
1037
1038         *ppos += cnt;
1039
1040         return cnt;
1041 }
1042
1043 static ssize_t
1044 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1045 {
1046         int (*func)(struct trace_seq *s) = filp->private_data;
1047         struct trace_seq *s;
1048         int r;
1049
1050         if (*ppos)
1051                 return 0;
1052
1053         s = kmalloc(sizeof(*s), GFP_KERNEL);
1054         if (!s)
1055                 return -ENOMEM;
1056
1057         trace_seq_init(s);
1058
1059         func(s);
1060         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1061
1062         kfree(s);
1063
1064         return r;
1065 }
1066
1067 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1068 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1069
1070 static const struct seq_operations show_event_seq_ops = {
1071         .start = t_start,
1072         .next = t_next,
1073         .show = t_show,
1074         .stop = t_stop,
1075 };
1076
1077 static const struct seq_operations show_set_event_seq_ops = {
1078         .start = s_start,
1079         .next = s_next,
1080         .show = t_show,
1081         .stop = t_stop,
1082 };
1083
1084 static const struct file_operations ftrace_avail_fops = {
1085         .open = ftrace_event_avail_open,
1086         .read = seq_read,
1087         .llseek = seq_lseek,
1088         .release = seq_release,
1089 };
1090
1091 static const struct file_operations ftrace_set_event_fops = {
1092         .open = ftrace_event_set_open,
1093         .read = seq_read,
1094         .write = ftrace_event_write,
1095         .llseek = seq_lseek,
1096         .release = seq_release,
1097 };
1098
1099 static const struct file_operations ftrace_enable_fops = {
1100         .open = tracing_open_generic,
1101         .read = event_enable_read,
1102         .write = event_enable_write,
1103         .llseek = default_llseek,
1104 };
1105
1106 static const struct file_operations ftrace_event_format_fops = {
1107         .open = trace_format_open,
1108         .read = seq_read,
1109         .llseek = seq_lseek,
1110         .release = seq_release,
1111 };
1112
1113 static const struct file_operations ftrace_event_id_fops = {
1114         .open = tracing_open_generic,
1115         .read = event_id_read,
1116         .llseek = default_llseek,
1117 };
1118
1119 static const struct file_operations ftrace_event_filter_fops = {
1120         .open = tracing_open_generic,
1121         .read = event_filter_read,
1122         .write = event_filter_write,
1123         .llseek = default_llseek,
1124 };
1125
1126 static const struct file_operations ftrace_subsystem_filter_fops = {
1127         .open = subsystem_open,
1128         .read = subsystem_filter_read,
1129         .write = subsystem_filter_write,
1130         .llseek = default_llseek,
1131         .release = subsystem_release,
1132 };
1133
1134 static const struct file_operations ftrace_system_enable_fops = {
1135         .open = subsystem_open,
1136         .read = system_enable_read,
1137         .write = system_enable_write,
1138         .llseek = default_llseek,
1139         .release = subsystem_release,
1140 };
1141
1142 static const struct file_operations ftrace_tr_enable_fops = {
1143         .open = system_tr_open,
1144         .read = system_enable_read,
1145         .write = system_enable_write,
1146         .llseek = default_llseek,
1147         .release = subsystem_release,
1148 };
1149
1150 static const struct file_operations ftrace_show_header_fops = {
1151         .open = tracing_open_generic,
1152         .read = show_header,
1153         .llseek = default_llseek,
1154 };
1155
1156 static int
1157 ftrace_event_open(struct inode *inode, struct file *file,
1158                   const struct seq_operations *seq_ops)
1159 {
1160         struct seq_file *m;
1161         int ret;
1162
1163         ret = seq_open(file, seq_ops);
1164         if (ret < 0)
1165                 return ret;
1166         m = file->private_data;
1167         /* copy tr over to seq ops */
1168         m->private = inode->i_private;
1169
1170         return ret;
1171 }
1172
1173 static int
1174 ftrace_event_avail_open(struct inode *inode, struct file *file)
1175 {
1176         const struct seq_operations *seq_ops = &show_event_seq_ops;
1177
1178         return ftrace_event_open(inode, file, seq_ops);
1179 }
1180
1181 static int
1182 ftrace_event_set_open(struct inode *inode, struct file *file)
1183 {
1184         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1185         struct trace_array *tr = inode->i_private;
1186
1187         if ((file->f_mode & FMODE_WRITE) &&
1188             (file->f_flags & O_TRUNC))
1189                 ftrace_clear_events(tr);
1190
1191         return ftrace_event_open(inode, file, seq_ops);
1192 }
1193
1194 static struct event_subsystem *
1195 create_new_subsystem(const char *name)
1196 {
1197         struct event_subsystem *system;
1198
1199         /* need to create new entry */
1200         system = kmalloc(sizeof(*system), GFP_KERNEL);
1201         if (!system)
1202                 return NULL;
1203
1204         system->ref_count = 1;
1205         system->name = kstrdup(name, GFP_KERNEL);
1206
1207         if (!system->name)
1208                 goto out_free;
1209
1210         system->filter = NULL;
1211
1212         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1213         if (!system->filter)
1214                 goto out_free;
1215
1216         list_add(&system->list, &event_subsystems);
1217
1218         return system;
1219
1220  out_free:
1221         kfree(system->name);
1222         kfree(system);
1223         return NULL;
1224 }
1225
1226 static struct dentry *
1227 event_subsystem_dir(struct trace_array *tr, const char *name,
1228                     struct ftrace_event_file *file, struct dentry *parent)
1229 {
1230         struct ftrace_subsystem_dir *dir;
1231         struct event_subsystem *system;
1232         struct dentry *entry;
1233
1234         /* First see if we did not already create this dir */
1235         list_for_each_entry(dir, &tr->systems, list) {
1236                 system = dir->subsystem;
1237                 if (strcmp(system->name, name) == 0) {
1238                         dir->nr_events++;
1239                         file->system = dir;
1240                         return dir->entry;
1241                 }
1242         }
1243
1244         /* Now see if the system itself exists. */
1245         list_for_each_entry(system, &event_subsystems, list) {
1246                 if (strcmp(system->name, name) == 0)
1247                         break;
1248         }
1249         /* Reset system variable when not found */
1250         if (&system->list == &event_subsystems)
1251                 system = NULL;
1252
1253         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1254         if (!dir)
1255                 goto out_fail;
1256
1257         if (!system) {
1258                 system = create_new_subsystem(name);
1259                 if (!system)
1260                         goto out_free;
1261         } else
1262                 __get_system(system);
1263
1264         dir->entry = debugfs_create_dir(name, parent);
1265         if (!dir->entry) {
1266                 pr_warning("Failed to create system directory %s\n", name);
1267                 __put_system(system);
1268                 goto out_free;
1269         }
1270
1271         dir->tr = tr;
1272         dir->ref_count = 1;
1273         dir->nr_events = 1;
1274         dir->subsystem = system;
1275         file->system = dir;
1276
1277         entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1278                                     &ftrace_subsystem_filter_fops);
1279         if (!entry) {
1280                 kfree(system->filter);
1281                 system->filter = NULL;
1282                 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1283         }
1284
1285         trace_create_file("enable", 0644, dir->entry, dir,
1286                           &ftrace_system_enable_fops);
1287
1288         list_add(&dir->list, &tr->systems);
1289
1290         return dir->entry;
1291
1292  out_free:
1293         kfree(dir);
1294  out_fail:
1295         /* Only print this message if failed on memory allocation */
1296         if (!dir || !system)
1297                 pr_warning("No memory to create event subsystem %s\n",
1298                            name);
1299         return NULL;
1300 }
1301
1302 static int
1303 event_create_dir(struct dentry *parent,
1304                  struct ftrace_event_file *file,
1305                  const struct file_operations *id,
1306                  const struct file_operations *enable,
1307                  const struct file_operations *filter,
1308                  const struct file_operations *format)
1309 {
1310         struct ftrace_event_call *call = file->event_call;
1311         struct trace_array *tr = file->tr;
1312         struct list_head *head;
1313         struct dentry *d_events;
1314         int ret;
1315
1316         /*
1317          * If the trace point header did not define TRACE_SYSTEM
1318          * then the system would be called "TRACE_SYSTEM".
1319          */
1320         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1321                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1322                 if (!d_events)
1323                         return -ENOMEM;
1324         } else
1325                 d_events = parent;
1326
1327         file->dir = debugfs_create_dir(call->name, d_events);
1328         if (!file->dir) {
1329                 pr_warning("Could not create debugfs '%s' directory\n",
1330                            call->name);
1331                 return -1;
1332         }
1333
1334         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1335                 trace_create_file("enable", 0644, file->dir, file,
1336                                   enable);
1337
1338 #ifdef CONFIG_PERF_EVENTS
1339         if (call->event.type && call->class->reg)
1340                 trace_create_file("id", 0444, file->dir, call,
1341                                   id);
1342 #endif
1343
1344         /*
1345          * Other events may have the same class. Only update
1346          * the fields if they are not already defined.
1347          */
1348         head = trace_get_fields(call);
1349         if (list_empty(head)) {
1350                 ret = call->class->define_fields(call);
1351                 if (ret < 0) {
1352                         pr_warning("Could not initialize trace point"
1353                                    " events/%s\n", call->name);
1354                         return -1;
1355                 }
1356         }
1357         trace_create_file("filter", 0644, file->dir, call,
1358                           filter);
1359
1360         trace_create_file("format", 0444, file->dir, call,
1361                           format);
1362
1363         return 0;
1364 }
1365
1366 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1367 {
1368         if (!dir)
1369                 return;
1370
1371         if (!--dir->nr_events) {
1372                 debugfs_remove_recursive(dir->entry);
1373                 list_del(&dir->list);
1374                 __put_system_dir(dir);
1375         }
1376 }
1377
1378 static void remove_event_from_tracers(struct ftrace_event_call *call)
1379 {
1380         struct ftrace_event_file *file;
1381         struct trace_array *tr;
1382
1383         do_for_each_event_file_safe(tr, file) {
1384
1385                 if (file->event_call != call)
1386                         continue;
1387
1388                 list_del(&file->list);
1389                 debugfs_remove_recursive(file->dir);
1390                 remove_subsystem(file->system);
1391                 kmem_cache_free(file_cachep, file);
1392
1393                 /*
1394                  * The do_for_each_event_file_safe() is
1395                  * a double loop. After finding the call for this
1396                  * trace_array, we use break to jump to the next
1397                  * trace_array.
1398                  */
1399                 break;
1400         } while_for_each_event_file();
1401 }
1402
1403 static void event_remove(struct ftrace_event_call *call)
1404 {
1405         struct trace_array *tr;
1406         struct ftrace_event_file *file;
1407
1408         do_for_each_event_file(tr, file) {
1409                 if (file->event_call != call)
1410                         continue;
1411                 ftrace_event_enable_disable(file, 0);
1412                 /*
1413                  * The do_for_each_event_file() is
1414                  * a double loop. After finding the call for this
1415                  * trace_array, we use break to jump to the next
1416                  * trace_array.
1417                  */
1418                 break;
1419         } while_for_each_event_file();
1420
1421         if (call->event.funcs)
1422                 __unregister_ftrace_event(&call->event);
1423         remove_event_from_tracers(call);
1424         list_del(&call->list);
1425 }
1426
1427 static int event_init(struct ftrace_event_call *call)
1428 {
1429         int ret = 0;
1430
1431         if (WARN_ON(!call->name))
1432                 return -EINVAL;
1433
1434         if (call->class->raw_init) {
1435                 ret = call->class->raw_init(call);
1436                 if (ret < 0 && ret != -ENOSYS)
1437                         pr_warn("Could not initialize trace events/%s\n",
1438                                 call->name);
1439         }
1440
1441         return ret;
1442 }
1443
1444 static int
1445 __register_event(struct ftrace_event_call *call, struct module *mod)
1446 {
1447         int ret;
1448
1449         ret = event_init(call);
1450         if (ret < 0)
1451                 return ret;
1452
1453         list_add(&call->list, &ftrace_events);
1454         call->mod = mod;
1455
1456         return 0;
1457 }
1458
1459 /* Add an event to a trace directory */
1460 static int
1461 __trace_add_new_event(struct ftrace_event_call *call,
1462                       struct trace_array *tr,
1463                       const struct file_operations *id,
1464                       const struct file_operations *enable,
1465                       const struct file_operations *filter,
1466                       const struct file_operations *format)
1467 {
1468         struct ftrace_event_file *file;
1469
1470         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1471         if (!file)
1472                 return -ENOMEM;
1473
1474         file->event_call = call;
1475         file->tr = tr;
1476         list_add(&file->list, &tr->events);
1477
1478         return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1479 }
1480
1481 /*
1482  * Just create a decriptor for early init. A descriptor is required
1483  * for enabling events at boot. We want to enable events before
1484  * the filesystem is initialized.
1485  */
1486 static __init int
1487 __trace_early_add_new_event(struct ftrace_event_call *call,
1488                             struct trace_array *tr)
1489 {
1490         struct ftrace_event_file *file;
1491
1492         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1493         if (!file)
1494                 return -ENOMEM;
1495
1496         file->event_call = call;
1497         file->tr = tr;
1498         list_add(&file->list, &tr->events);
1499
1500         return 0;
1501 }
1502
1503 struct ftrace_module_file_ops;
1504 static void __add_event_to_tracers(struct ftrace_event_call *call,
1505                                    struct ftrace_module_file_ops *file_ops);
1506
1507 /* Add an additional event_call dynamically */
1508 int trace_add_event_call(struct ftrace_event_call *call)
1509 {
1510         int ret;
1511         mutex_lock(&event_mutex);
1512
1513         ret = __register_event(call, NULL);
1514         if (ret >= 0)
1515                 __add_event_to_tracers(call, NULL);
1516
1517         mutex_unlock(&event_mutex);
1518         return ret;
1519 }
1520
1521 /*
1522  * Must be called under locking both of event_mutex and trace_event_mutex.
1523  */
1524 static void __trace_remove_event_call(struct ftrace_event_call *call)
1525 {
1526         event_remove(call);
1527         trace_destroy_fields(call);
1528         destroy_preds(call);
1529 }
1530
1531 /* Remove an event_call */
1532 void trace_remove_event_call(struct ftrace_event_call *call)
1533 {
1534         mutex_lock(&event_mutex);
1535         down_write(&trace_event_mutex);
1536         __trace_remove_event_call(call);
1537         up_write(&trace_event_mutex);
1538         mutex_unlock(&event_mutex);
1539 }
1540
1541 #define for_each_event(event, start, end)                       \
1542         for (event = start;                                     \
1543              (unsigned long)event < (unsigned long)end;         \
1544              event++)
1545
1546 #ifdef CONFIG_MODULES
1547
1548 static LIST_HEAD(ftrace_module_file_list);
1549
1550 /*
1551  * Modules must own their file_operations to keep up with
1552  * reference counting.
1553  */
1554 struct ftrace_module_file_ops {
1555         struct list_head                list;
1556         struct module                   *mod;
1557         struct file_operations          id;
1558         struct file_operations          enable;
1559         struct file_operations          format;
1560         struct file_operations          filter;
1561 };
1562
1563 static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1564 {
1565         struct ftrace_module_file_ops *file_ops;
1566
1567         list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1568                 if (file_ops->mod == mod)
1569                         return file_ops;
1570         }
1571         return NULL;
1572 }
1573
1574 static struct ftrace_module_file_ops *
1575 trace_create_file_ops(struct module *mod)
1576 {
1577         struct ftrace_module_file_ops *file_ops;
1578
1579         /*
1580          * This is a bit of a PITA. To allow for correct reference
1581          * counting, modules must "own" their file_operations.
1582          * To do this, we allocate the file operations that will be
1583          * used in the event directory.
1584          */
1585
1586         file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1587         if (!file_ops)
1588                 return NULL;
1589
1590         file_ops->mod = mod;
1591
1592         file_ops->id = ftrace_event_id_fops;
1593         file_ops->id.owner = mod;
1594
1595         file_ops->enable = ftrace_enable_fops;
1596         file_ops->enable.owner = mod;
1597
1598         file_ops->filter = ftrace_event_filter_fops;
1599         file_ops->filter.owner = mod;
1600
1601         file_ops->format = ftrace_event_format_fops;
1602         file_ops->format.owner = mod;
1603
1604         list_add(&file_ops->list, &ftrace_module_file_list);
1605
1606         return file_ops;
1607 }
1608
1609 static void trace_module_add_events(struct module *mod)
1610 {
1611         struct ftrace_module_file_ops *file_ops = NULL;
1612         struct ftrace_event_call **call, **start, **end;
1613
1614         start = mod->trace_events;
1615         end = mod->trace_events + mod->num_trace_events;
1616
1617         if (start == end)
1618                 return;
1619
1620         file_ops = trace_create_file_ops(mod);
1621         if (!file_ops)
1622                 return;
1623
1624         for_each_event(call, start, end) {
1625                 __register_event(*call, mod);
1626                 __add_event_to_tracers(*call, file_ops);
1627         }
1628 }
1629
1630 static void trace_module_remove_events(struct module *mod)
1631 {
1632         struct ftrace_module_file_ops *file_ops;
1633         struct ftrace_event_call *call, *p;
1634         bool found = false;
1635
1636         down_write(&trace_event_mutex);
1637         list_for_each_entry_safe(call, p, &ftrace_events, list) {
1638                 if (call->mod == mod) {
1639                         found = true;
1640                         __trace_remove_event_call(call);
1641                 }
1642         }
1643
1644         /* Now free the file_operations */
1645         list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1646                 if (file_ops->mod == mod)
1647                         break;
1648         }
1649         if (&file_ops->list != &ftrace_module_file_list) {
1650                 list_del(&file_ops->list);
1651                 kfree(file_ops);
1652         }
1653
1654         /*
1655          * It is safest to reset the ring buffer if the module being unloaded
1656          * registered any events.
1657          */
1658         if (found)
1659                 tracing_reset_current_online_cpus();
1660         up_write(&trace_event_mutex);
1661 }
1662
1663 static int trace_module_notify(struct notifier_block *self,
1664                                unsigned long val, void *data)
1665 {
1666         struct module *mod = data;
1667
1668         mutex_lock(&event_mutex);
1669         switch (val) {
1670         case MODULE_STATE_COMING:
1671                 trace_module_add_events(mod);
1672                 break;
1673         case MODULE_STATE_GOING:
1674                 trace_module_remove_events(mod);
1675                 break;
1676         }
1677         mutex_unlock(&event_mutex);
1678
1679         return 0;
1680 }
1681 #else
1682 static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1683 {
1684         return NULL;
1685 }
1686 static int trace_module_notify(struct notifier_block *self,
1687                                unsigned long val, void *data)
1688 {
1689         return 0;
1690 }
1691 #endif /* CONFIG_MODULES */
1692
1693 /* Create a new event directory structure for a trace directory. */
1694 static void
1695 __trace_add_event_dirs(struct trace_array *tr)
1696 {
1697         struct ftrace_module_file_ops *file_ops = NULL;
1698         struct ftrace_event_call *call;
1699         int ret;
1700
1701         list_for_each_entry(call, &ftrace_events, list) {
1702                 if (call->mod) {
1703                         /*
1704                          * Directories for events by modules need to
1705                          * keep module ref counts when opened (as we don't
1706                          * want the module to disappear when reading one
1707                          * of these files). The file_ops keep account of
1708                          * the module ref count.
1709                          *
1710                          * As event_calls are added in groups by module,
1711                          * when we find one file_ops, we don't need to search for
1712                          * each call in that module, as the rest should be the
1713                          * same. Only search for a new one if the last one did
1714                          * not match.
1715                          */
1716                         if (!file_ops || call->mod != file_ops->mod)
1717                                 file_ops = find_ftrace_file_ops(call->mod);
1718                         if (!file_ops)
1719                                 continue; /* Warn? */
1720                         ret = __trace_add_new_event(call, tr,
1721                                         &file_ops->id, &file_ops->enable,
1722                                         &file_ops->filter, &file_ops->format);
1723                         if (ret < 0)
1724                                 pr_warning("Could not create directory for event %s\n",
1725                                            call->name);
1726                         continue;
1727                 }
1728                 ret = __trace_add_new_event(call, tr,
1729                                             &ftrace_event_id_fops,
1730                                             &ftrace_enable_fops,
1731                                             &ftrace_event_filter_fops,
1732                                             &ftrace_event_format_fops);
1733                 if (ret < 0)
1734                         pr_warning("Could not create directory for event %s\n",
1735                                    call->name);
1736         }
1737 }
1738
1739 /*
1740  * The top level array has already had its ftrace_event_file
1741  * descriptors created in order to allow for early events to
1742  * be recorded. This function is called after the debugfs has been
1743  * initialized, and we now have to create the files associated
1744  * to the events.
1745  */
1746 static __init void
1747 __trace_early_add_event_dirs(struct trace_array *tr)
1748 {
1749         struct ftrace_event_file *file;
1750         int ret;
1751
1752
1753         list_for_each_entry(file, &tr->events, list) {
1754                 ret = event_create_dir(tr->event_dir, file,
1755                                        &ftrace_event_id_fops,
1756                                        &ftrace_enable_fops,
1757                                        &ftrace_event_filter_fops,
1758                                        &ftrace_event_format_fops);
1759                 if (ret < 0)
1760                         pr_warning("Could not create directory for event %s\n",
1761                                    file->event_call->name);
1762         }
1763 }
1764
1765 /*
1766  * For early boot up, the top trace array requires to have
1767  * a list of events that can be enabled. This must be done before
1768  * the filesystem is set up in order to allow events to be traced
1769  * early.
1770  */
1771 static __init void
1772 __trace_early_add_events(struct trace_array *tr)
1773 {
1774         struct ftrace_event_call *call;
1775         int ret;
1776
1777         list_for_each_entry(call, &ftrace_events, list) {
1778                 /* Early boot up should not have any modules loaded */
1779                 if (WARN_ON_ONCE(call->mod))
1780                         continue;
1781
1782                 ret = __trace_early_add_new_event(call, tr);
1783                 if (ret < 0)
1784                         pr_warning("Could not create early event %s\n",
1785                                    call->name);
1786         }
1787 }
1788
1789 /* Remove the event directory structure for a trace directory. */
1790 static void
1791 __trace_remove_event_dirs(struct trace_array *tr)
1792 {
1793         struct ftrace_event_file *file, *next;
1794
1795         list_for_each_entry_safe(file, next, &tr->events, list) {
1796                 list_del(&file->list);
1797                 debugfs_remove_recursive(file->dir);
1798                 remove_subsystem(file->system);
1799                 kmem_cache_free(file_cachep, file);
1800         }
1801 }
1802
1803 static void
1804 __add_event_to_tracers(struct ftrace_event_call *call,
1805                        struct ftrace_module_file_ops *file_ops)
1806 {
1807         struct trace_array *tr;
1808
1809         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1810                 if (file_ops)
1811                         __trace_add_new_event(call, tr,
1812                                               &file_ops->id, &file_ops->enable,
1813                                               &file_ops->filter, &file_ops->format);
1814                 else
1815                         __trace_add_new_event(call, tr,
1816                                               &ftrace_event_id_fops,
1817                                               &ftrace_enable_fops,
1818                                               &ftrace_event_filter_fops,
1819                                               &ftrace_event_format_fops);
1820         }
1821 }
1822
1823 static struct notifier_block trace_module_nb = {
1824         .notifier_call = trace_module_notify,
1825         .priority = 0,
1826 };
1827
1828 extern struct ftrace_event_call *__start_ftrace_events[];
1829 extern struct ftrace_event_call *__stop_ftrace_events[];
1830
1831 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1832
1833 static __init int setup_trace_event(char *str)
1834 {
1835         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1836         ring_buffer_expanded = 1;
1837         tracing_selftest_disabled = 1;
1838
1839         return 1;
1840 }
1841 __setup("trace_event=", setup_trace_event);
1842
1843 /* Expects to have event_mutex held when called */
1844 static int
1845 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
1846 {
1847         struct dentry *d_events;
1848         struct dentry *entry;
1849
1850         entry = debugfs_create_file("set_event", 0644, parent,
1851                                     tr, &ftrace_set_event_fops);
1852         if (!entry) {
1853                 pr_warning("Could not create debugfs 'set_event' entry\n");
1854                 return -ENOMEM;
1855         }
1856
1857         d_events = debugfs_create_dir("events", parent);
1858         if (!d_events) {
1859                 pr_warning("Could not create debugfs 'events' directory\n");
1860                 return -ENOMEM;
1861         }
1862
1863         /* ring buffer internal formats */
1864         trace_create_file("header_page", 0444, d_events,
1865                           ring_buffer_print_page_header,
1866                           &ftrace_show_header_fops);
1867
1868         trace_create_file("header_event", 0444, d_events,
1869                           ring_buffer_print_entry_header,
1870                           &ftrace_show_header_fops);
1871
1872         trace_create_file("enable", 0644, d_events,
1873                           tr, &ftrace_tr_enable_fops);
1874
1875         tr->event_dir = d_events;
1876
1877         return 0;
1878 }
1879
1880 /**
1881  * event_trace_add_tracer - add a instance of a trace_array to events
1882  * @parent: The parent dentry to place the files/directories for events in
1883  * @tr: The trace array associated with these events
1884  *
1885  * When a new instance is created, it needs to set up its events
1886  * directory, as well as other files associated with events. It also
1887  * creates the event hierachry in the @parent/events directory.
1888  *
1889  * Returns 0 on success.
1890  */
1891 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1892 {
1893         int ret;
1894
1895         mutex_lock(&event_mutex);
1896
1897         ret = create_event_toplevel_files(parent, tr);
1898         if (ret)
1899                 goto out_unlock;
1900
1901         down_write(&trace_event_mutex);
1902         __trace_add_event_dirs(tr);
1903         up_write(&trace_event_mutex);
1904
1905  out_unlock:
1906         mutex_unlock(&event_mutex);
1907
1908         return ret;
1909 }
1910
1911 /*
1912  * The top trace array already had its file descriptors created.
1913  * Now the files themselves need to be created.
1914  */
1915 static __init int
1916 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
1917 {
1918         int ret;
1919
1920         mutex_lock(&event_mutex);
1921
1922         ret = create_event_toplevel_files(parent, tr);
1923         if (ret)
1924                 goto out_unlock;
1925
1926         down_write(&trace_event_mutex);
1927         __trace_early_add_event_dirs(tr);
1928         up_write(&trace_event_mutex);
1929
1930  out_unlock:
1931         mutex_unlock(&event_mutex);
1932
1933         return ret;
1934 }
1935
1936 int event_trace_del_tracer(struct trace_array *tr)
1937 {
1938         /* Disable any running events */
1939         __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
1940
1941         mutex_lock(&event_mutex);
1942
1943         down_write(&trace_event_mutex);
1944         __trace_remove_event_dirs(tr);
1945         debugfs_remove_recursive(tr->event_dir);
1946         up_write(&trace_event_mutex);
1947
1948         tr->event_dir = NULL;
1949
1950         mutex_unlock(&event_mutex);
1951
1952         return 0;
1953 }
1954
1955 static __init int event_trace_memsetup(void)
1956 {
1957         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
1958         file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
1959         return 0;
1960 }
1961
1962 static __init int event_trace_enable(void)
1963 {
1964         struct trace_array *tr = top_trace_array();
1965         struct ftrace_event_call **iter, *call;
1966         char *buf = bootup_event_buf;
1967         char *token;
1968         int ret;
1969
1970         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1971
1972                 call = *iter;
1973                 ret = event_init(call);
1974                 if (!ret)
1975                         list_add(&call->list, &ftrace_events);
1976         }
1977
1978         /*
1979          * We need the top trace array to have a working set of trace
1980          * points at early init, before the debug files and directories
1981          * are created. Create the file entries now, and attach them
1982          * to the actual file dentries later.
1983          */
1984         __trace_early_add_events(tr);
1985
1986         while (true) {
1987                 token = strsep(&buf, ",");
1988
1989                 if (!token)
1990                         break;
1991                 if (!*token)
1992                         continue;
1993
1994                 ret = ftrace_set_clr_event(tr, token, 1);
1995                 if (ret)
1996                         pr_warn("Failed to enable trace event: %s\n", token);
1997         }
1998
1999         trace_printk_start_comm();
2000
2001         return 0;
2002 }
2003
2004 static __init int event_trace_init(void)
2005 {
2006         struct trace_array *tr;
2007         struct dentry *d_tracer;
2008         struct dentry *entry;
2009         int ret;
2010
2011         tr = top_trace_array();
2012
2013         d_tracer = tracing_init_dentry();
2014         if (!d_tracer)
2015                 return 0;
2016
2017         entry = debugfs_create_file("available_events", 0444, d_tracer,
2018                                     tr, &ftrace_avail_fops);
2019         if (!entry)
2020                 pr_warning("Could not create debugfs "
2021                            "'available_events' entry\n");
2022
2023         if (trace_define_common_fields())
2024                 pr_warning("tracing: Failed to allocate common fields");
2025
2026         ret = early_event_add_tracer(d_tracer, tr);
2027         if (ret)
2028                 return ret;
2029
2030         ret = register_module_notifier(&trace_module_nb);
2031         if (ret)
2032                 pr_warning("Failed to register trace events module notifier\n");
2033
2034         return 0;
2035 }
2036 early_initcall(event_trace_memsetup);
2037 core_initcall(event_trace_enable);
2038 fs_initcall(event_trace_init);
2039
2040 #ifdef CONFIG_FTRACE_STARTUP_TEST
2041
2042 static DEFINE_SPINLOCK(test_spinlock);
2043 static DEFINE_SPINLOCK(test_spinlock_irq);
2044 static DEFINE_MUTEX(test_mutex);
2045
2046 static __init void test_work(struct work_struct *dummy)
2047 {
2048         spin_lock(&test_spinlock);
2049         spin_lock_irq(&test_spinlock_irq);
2050         udelay(1);
2051         spin_unlock_irq(&test_spinlock_irq);
2052         spin_unlock(&test_spinlock);
2053
2054         mutex_lock(&test_mutex);
2055         msleep(1);
2056         mutex_unlock(&test_mutex);
2057 }
2058
2059 static __init int event_test_thread(void *unused)
2060 {
2061         void *test_malloc;
2062
2063         test_malloc = kmalloc(1234, GFP_KERNEL);
2064         if (!test_malloc)
2065                 pr_info("failed to kmalloc\n");
2066
2067         schedule_on_each_cpu(test_work);
2068
2069         kfree(test_malloc);
2070
2071         set_current_state(TASK_INTERRUPTIBLE);
2072         while (!kthread_should_stop())
2073                 schedule();
2074
2075         return 0;
2076 }
2077
2078 /*
2079  * Do various things that may trigger events.
2080  */
2081 static __init void event_test_stuff(void)
2082 {
2083         struct task_struct *test_thread;
2084
2085         test_thread = kthread_run(event_test_thread, NULL, "test-events");
2086         msleep(1);
2087         kthread_stop(test_thread);
2088 }
2089
2090 /*
2091  * For every trace event defined, we will test each trace point separately,
2092  * and then by groups, and finally all trace points.
2093  */
2094 static __init void event_trace_self_tests(void)
2095 {
2096         struct ftrace_subsystem_dir *dir;
2097         struct ftrace_event_file *file;
2098         struct ftrace_event_call *call;
2099         struct event_subsystem *system;
2100         struct trace_array *tr;
2101         int ret;
2102
2103         tr = top_trace_array();
2104
2105         pr_info("Running tests on trace events:\n");
2106
2107         list_for_each_entry(file, &tr->events, list) {
2108
2109                 call = file->event_call;
2110
2111                 /* Only test those that have a probe */
2112                 if (!call->class || !call->class->probe)
2113                         continue;
2114
2115 /*
2116  * Testing syscall events here is pretty useless, but
2117  * we still do it if configured. But this is time consuming.
2118  * What we really need is a user thread to perform the
2119  * syscalls as we test.
2120  */
2121 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2122                 if (call->class->system &&
2123                     strcmp(call->class->system, "syscalls") == 0)
2124                         continue;
2125 #endif
2126
2127                 pr_info("Testing event %s: ", call->name);
2128
2129                 /*
2130                  * If an event is already enabled, someone is using
2131                  * it and the self test should not be on.
2132                  */
2133                 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2134                         pr_warning("Enabled event during self test!\n");
2135                         WARN_ON_ONCE(1);
2136                         continue;
2137                 }
2138
2139                 ftrace_event_enable_disable(file, 1);
2140                 event_test_stuff();
2141                 ftrace_event_enable_disable(file, 0);
2142
2143                 pr_cont("OK\n");
2144         }
2145
2146         /* Now test at the sub system level */
2147
2148         pr_info("Running tests on trace event systems:\n");
2149
2150         list_for_each_entry(dir, &tr->systems, list) {
2151
2152                 system = dir->subsystem;
2153
2154                 /* the ftrace system is special, skip it */
2155                 if (strcmp(system->name, "ftrace") == 0)
2156                         continue;
2157
2158                 pr_info("Testing event system %s: ", system->name);
2159
2160                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2161                 if (WARN_ON_ONCE(ret)) {
2162                         pr_warning("error enabling system %s\n",
2163                                    system->name);
2164                         continue;
2165                 }
2166
2167                 event_test_stuff();
2168
2169                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2170                 if (WARN_ON_ONCE(ret)) {
2171                         pr_warning("error disabling system %s\n",
2172                                    system->name);
2173                         continue;
2174                 }
2175
2176                 pr_cont("OK\n");
2177         }
2178
2179         /* Test with all events enabled */
2180
2181         pr_info("Running tests on all trace events:\n");
2182         pr_info("Testing all events: ");
2183
2184         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2185         if (WARN_ON_ONCE(ret)) {
2186                 pr_warning("error enabling all events\n");
2187                 return;
2188         }
2189
2190         event_test_stuff();
2191
2192         /* reset sysname */
2193         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2194         if (WARN_ON_ONCE(ret)) {
2195                 pr_warning("error disabling all events\n");
2196                 return;
2197         }
2198
2199         pr_cont("OK\n");
2200 }
2201
2202 #ifdef CONFIG_FUNCTION_TRACER
2203
2204 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2205
2206 static void
2207 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2208                           struct ftrace_ops *op, struct pt_regs *pt_regs)
2209 {
2210         struct ring_buffer_event *event;
2211         struct ring_buffer *buffer;
2212         struct ftrace_entry *entry;
2213         unsigned long flags;
2214         long disabled;
2215         int cpu;
2216         int pc;
2217
2218         pc = preempt_count();
2219         preempt_disable_notrace();
2220         cpu = raw_smp_processor_id();
2221         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2222
2223         if (disabled != 1)
2224                 goto out;
2225
2226         local_save_flags(flags);
2227
2228         event = trace_current_buffer_lock_reserve(&buffer,
2229                                                   TRACE_FN, sizeof(*entry),
2230                                                   flags, pc);
2231         if (!event)
2232                 goto out;
2233         entry   = ring_buffer_event_data(event);
2234         entry->ip                       = ip;
2235         entry->parent_ip                = parent_ip;
2236
2237         trace_buffer_unlock_commit(buffer, event, flags, pc);
2238
2239  out:
2240         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2241         preempt_enable_notrace();
2242 }
2243
2244 static struct ftrace_ops trace_ops __initdata  =
2245 {
2246         .func = function_test_events_call,
2247         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2248 };
2249
2250 static __init void event_trace_self_test_with_function(void)
2251 {
2252         int ret;
2253         ret = register_ftrace_function(&trace_ops);
2254         if (WARN_ON(ret < 0)) {
2255                 pr_info("Failed to enable function tracer for event tests\n");
2256                 return;
2257         }
2258         pr_info("Running tests again, along with the function tracer\n");
2259         event_trace_self_tests();
2260         unregister_ftrace_function(&trace_ops);
2261 }
2262 #else
2263 static __init void event_trace_self_test_with_function(void)
2264 {
2265 }
2266 #endif
2267
2268 static __init int event_trace_self_tests_init(void)
2269 {
2270         if (!tracing_selftest_disabled) {
2271                 event_trace_self_tests();
2272                 event_trace_self_test_with_function();
2273         }
2274
2275         return 0;
2276 }
2277
2278 late_initcall(event_trace_self_tests_init);
2279
2280 #endif