ftrace: Use hash instead for FTRACE_FL_FILTER
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_MAX_BITS 10
61
62 /* ftrace_enabled is a method to turn ftrace on or off */
63 int ftrace_enabled __read_mostly;
64 static int last_ftrace_enabled;
65
66 /* Quick disabling of function tracer. */
67 int function_trace_stop;
68
69 /* List for set_ftrace_pid's pids. */
70 LIST_HEAD(ftrace_pids);
71 struct ftrace_pid {
72         struct list_head list;
73         struct pid *pid;
74 };
75
76 /*
77  * ftrace_disabled is set when an anomaly is discovered.
78  * ftrace_disabled is much stronger than ftrace_enabled.
79  */
80 static int ftrace_disabled __read_mostly;
81
82 static DEFINE_MUTEX(ftrace_lock);
83
84 static struct ftrace_ops ftrace_list_end __read_mostly =
85 {
86         .func           = ftrace_stub,
87 };
88
89 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
90 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
93
94 /*
95  * Traverse the ftrace_list, invoking all entries.  The reason that we
96  * can use rcu_dereference_raw() is that elements removed from this list
97  * are simply leaked, so there is no need to interact with a grace-period
98  * mechanism.  The rcu_dereference_raw() calls are needed to handle
99  * concurrent insertions into the ftrace_list.
100  *
101  * Silly Alpha and silly pointer-speculation compiler optimizations!
102  */
103 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
104 {
105         struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
106
107         while (op != &ftrace_list_end) {
108                 op->func(ip, parent_ip);
109                 op = rcu_dereference_raw(op->next); /*see above*/
110         };
111 }
112
113 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
114 {
115         if (!test_tsk_trace_trace(current))
116                 return;
117
118         ftrace_pid_function(ip, parent_ip);
119 }
120
121 static void set_ftrace_pid_function(ftrace_func_t func)
122 {
123         /* do not set ftrace_pid_function to itself! */
124         if (func != ftrace_pid_func)
125                 ftrace_pid_function = func;
126 }
127
128 /**
129  * clear_ftrace_function - reset the ftrace function
130  *
131  * This NULLs the ftrace function and in essence stops
132  * tracing.  There may be lag
133  */
134 void clear_ftrace_function(void)
135 {
136         ftrace_trace_function = ftrace_stub;
137         __ftrace_trace_function = ftrace_stub;
138         ftrace_pid_function = ftrace_stub;
139 }
140
141 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
142 /*
143  * For those archs that do not test ftrace_trace_stop in their
144  * mcount call site, we need to do it from C.
145  */
146 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
147 {
148         if (function_trace_stop)
149                 return;
150
151         __ftrace_trace_function(ip, parent_ip);
152 }
153 #endif
154
155 static void update_ftrace_function(void)
156 {
157         ftrace_func_t func;
158
159         /*
160          * If there's only one function registered, then call that
161          * function directly. Otherwise, we need to iterate over the
162          * registered callers.
163          */
164         if (ftrace_list == &ftrace_list_end ||
165             ftrace_list->next == &ftrace_list_end)
166                 func = ftrace_list->func;
167         else
168                 func = ftrace_list_func;
169
170         /* If we filter on pids, update to use the pid function */
171         if (!list_empty(&ftrace_pids)) {
172                 set_ftrace_pid_function(func);
173                 func = ftrace_pid_func;
174         }
175 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
176         ftrace_trace_function = func;
177 #else
178         __ftrace_trace_function = func;
179         ftrace_trace_function = ftrace_test_stop_func;
180 #endif
181 }
182
183 static int __register_ftrace_function(struct ftrace_ops *ops)
184 {
185         ops->next = ftrace_list;
186         /*
187          * We are entering ops into the ftrace_list but another
188          * CPU might be walking that list. We need to make sure
189          * the ops->next pointer is valid before another CPU sees
190          * the ops pointer included into the ftrace_list.
191          */
192         rcu_assign_pointer(ftrace_list, ops);
193
194         if (ftrace_enabled)
195                 update_ftrace_function();
196
197         return 0;
198 }
199
200 static int __unregister_ftrace_function(struct ftrace_ops *ops)
201 {
202         struct ftrace_ops **p;
203
204         /*
205          * If we are removing the last function, then simply point
206          * to the ftrace_stub.
207          */
208         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
209                 ftrace_trace_function = ftrace_stub;
210                 ftrace_list = &ftrace_list_end;
211                 return 0;
212         }
213
214         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
215                 if (*p == ops)
216                         break;
217
218         if (*p != ops)
219                 return -1;
220
221         *p = (*p)->next;
222
223         if (ftrace_enabled)
224                 update_ftrace_function();
225
226         return 0;
227 }
228
229 static void ftrace_update_pid_func(void)
230 {
231         /* Only do something if we are tracing something */
232         if (ftrace_trace_function == ftrace_stub)
233                 return;
234
235         update_ftrace_function();
236 }
237
238 #ifdef CONFIG_FUNCTION_PROFILER
239 struct ftrace_profile {
240         struct hlist_node               node;
241         unsigned long                   ip;
242         unsigned long                   counter;
243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
244         unsigned long long              time;
245         unsigned long long              time_squared;
246 #endif
247 };
248
249 struct ftrace_profile_page {
250         struct ftrace_profile_page      *next;
251         unsigned long                   index;
252         struct ftrace_profile           records[];
253 };
254
255 struct ftrace_profile_stat {
256         atomic_t                        disabled;
257         struct hlist_head               *hash;
258         struct ftrace_profile_page      *pages;
259         struct ftrace_profile_page      *start;
260         struct tracer_stat              stat;
261 };
262
263 #define PROFILE_RECORDS_SIZE                                            \
264         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
265
266 #define PROFILES_PER_PAGE                                       \
267         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
268
269 static int ftrace_profile_bits __read_mostly;
270 static int ftrace_profile_enabled __read_mostly;
271
272 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
273 static DEFINE_MUTEX(ftrace_profile_lock);
274
275 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
276
277 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
278
279 static void *
280 function_stat_next(void *v, int idx)
281 {
282         struct ftrace_profile *rec = v;
283         struct ftrace_profile_page *pg;
284
285         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
286
287  again:
288         if (idx != 0)
289                 rec++;
290
291         if ((void *)rec >= (void *)&pg->records[pg->index]) {
292                 pg = pg->next;
293                 if (!pg)
294                         return NULL;
295                 rec = &pg->records[0];
296                 if (!rec->counter)
297                         goto again;
298         }
299
300         return rec;
301 }
302
303 static void *function_stat_start(struct tracer_stat *trace)
304 {
305         struct ftrace_profile_stat *stat =
306                 container_of(trace, struct ftrace_profile_stat, stat);
307
308         if (!stat || !stat->start)
309                 return NULL;
310
311         return function_stat_next(&stat->start->records[0], 0);
312 }
313
314 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
315 /* function graph compares on total time */
316 static int function_stat_cmp(void *p1, void *p2)
317 {
318         struct ftrace_profile *a = p1;
319         struct ftrace_profile *b = p2;
320
321         if (a->time < b->time)
322                 return -1;
323         if (a->time > b->time)
324                 return 1;
325         else
326                 return 0;
327 }
328 #else
329 /* not function graph compares against hits */
330 static int function_stat_cmp(void *p1, void *p2)
331 {
332         struct ftrace_profile *a = p1;
333         struct ftrace_profile *b = p2;
334
335         if (a->counter < b->counter)
336                 return -1;
337         if (a->counter > b->counter)
338                 return 1;
339         else
340                 return 0;
341 }
342 #endif
343
344 static int function_stat_headers(struct seq_file *m)
345 {
346 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
347         seq_printf(m, "  Function                               "
348                    "Hit    Time            Avg             s^2\n"
349                       "  --------                               "
350                    "---    ----            ---             ---\n");
351 #else
352         seq_printf(m, "  Function                               Hit\n"
353                       "  --------                               ---\n");
354 #endif
355         return 0;
356 }
357
358 static int function_stat_show(struct seq_file *m, void *v)
359 {
360         struct ftrace_profile *rec = v;
361         char str[KSYM_SYMBOL_LEN];
362         int ret = 0;
363 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
364         static struct trace_seq s;
365         unsigned long long avg;
366         unsigned long long stddev;
367 #endif
368         mutex_lock(&ftrace_profile_lock);
369
370         /* we raced with function_profile_reset() */
371         if (unlikely(rec->counter == 0)) {
372                 ret = -EBUSY;
373                 goto out;
374         }
375
376         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
377         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
378
379 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
380         seq_printf(m, "    ");
381         avg = rec->time;
382         do_div(avg, rec->counter);
383
384         /* Sample standard deviation (s^2) */
385         if (rec->counter <= 1)
386                 stddev = 0;
387         else {
388                 stddev = rec->time_squared - rec->counter * avg * avg;
389                 /*
390                  * Divide only 1000 for ns^2 -> us^2 conversion.
391                  * trace_print_graph_duration will divide 1000 again.
392                  */
393                 do_div(stddev, (rec->counter - 1) * 1000);
394         }
395
396         trace_seq_init(&s);
397         trace_print_graph_duration(rec->time, &s);
398         trace_seq_puts(&s, "    ");
399         trace_print_graph_duration(avg, &s);
400         trace_seq_puts(&s, "    ");
401         trace_print_graph_duration(stddev, &s);
402         trace_print_seq(m, &s);
403 #endif
404         seq_putc(m, '\n');
405 out:
406         mutex_unlock(&ftrace_profile_lock);
407
408         return ret;
409 }
410
411 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
412 {
413         struct ftrace_profile_page *pg;
414
415         pg = stat->pages = stat->start;
416
417         while (pg) {
418                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
419                 pg->index = 0;
420                 pg = pg->next;
421         }
422
423         memset(stat->hash, 0,
424                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
425 }
426
427 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
428 {
429         struct ftrace_profile_page *pg;
430         int functions;
431         int pages;
432         int i;
433
434         /* If we already allocated, do nothing */
435         if (stat->pages)
436                 return 0;
437
438         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
439         if (!stat->pages)
440                 return -ENOMEM;
441
442 #ifdef CONFIG_DYNAMIC_FTRACE
443         functions = ftrace_update_tot_cnt;
444 #else
445         /*
446          * We do not know the number of functions that exist because
447          * dynamic tracing is what counts them. With past experience
448          * we have around 20K functions. That should be more than enough.
449          * It is highly unlikely we will execute every function in
450          * the kernel.
451          */
452         functions = 20000;
453 #endif
454
455         pg = stat->start = stat->pages;
456
457         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
458
459         for (i = 0; i < pages; i++) {
460                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
461                 if (!pg->next)
462                         goto out_free;
463                 pg = pg->next;
464         }
465
466         return 0;
467
468  out_free:
469         pg = stat->start;
470         while (pg) {
471                 unsigned long tmp = (unsigned long)pg;
472
473                 pg = pg->next;
474                 free_page(tmp);
475         }
476
477         free_page((unsigned long)stat->pages);
478         stat->pages = NULL;
479         stat->start = NULL;
480
481         return -ENOMEM;
482 }
483
484 static int ftrace_profile_init_cpu(int cpu)
485 {
486         struct ftrace_profile_stat *stat;
487         int size;
488
489         stat = &per_cpu(ftrace_profile_stats, cpu);
490
491         if (stat->hash) {
492                 /* If the profile is already created, simply reset it */
493                 ftrace_profile_reset(stat);
494                 return 0;
495         }
496
497         /*
498          * We are profiling all functions, but usually only a few thousand
499          * functions are hit. We'll make a hash of 1024 items.
500          */
501         size = FTRACE_PROFILE_HASH_SIZE;
502
503         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
504
505         if (!stat->hash)
506                 return -ENOMEM;
507
508         if (!ftrace_profile_bits) {
509                 size--;
510
511                 for (; size; size >>= 1)
512                         ftrace_profile_bits++;
513         }
514
515         /* Preallocate the function profiling pages */
516         if (ftrace_profile_pages_init(stat) < 0) {
517                 kfree(stat->hash);
518                 stat->hash = NULL;
519                 return -ENOMEM;
520         }
521
522         return 0;
523 }
524
525 static int ftrace_profile_init(void)
526 {
527         int cpu;
528         int ret = 0;
529
530         for_each_online_cpu(cpu) {
531                 ret = ftrace_profile_init_cpu(cpu);
532                 if (ret)
533                         break;
534         }
535
536         return ret;
537 }
538
539 /* interrupts must be disabled */
540 static struct ftrace_profile *
541 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
542 {
543         struct ftrace_profile *rec;
544         struct hlist_head *hhd;
545         struct hlist_node *n;
546         unsigned long key;
547
548         key = hash_long(ip, ftrace_profile_bits);
549         hhd = &stat->hash[key];
550
551         if (hlist_empty(hhd))
552                 return NULL;
553
554         hlist_for_each_entry_rcu(rec, n, hhd, node) {
555                 if (rec->ip == ip)
556                         return rec;
557         }
558
559         return NULL;
560 }
561
562 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
563                                struct ftrace_profile *rec)
564 {
565         unsigned long key;
566
567         key = hash_long(rec->ip, ftrace_profile_bits);
568         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
569 }
570
571 /*
572  * The memory is already allocated, this simply finds a new record to use.
573  */
574 static struct ftrace_profile *
575 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
576 {
577         struct ftrace_profile *rec = NULL;
578
579         /* prevent recursion (from NMIs) */
580         if (atomic_inc_return(&stat->disabled) != 1)
581                 goto out;
582
583         /*
584          * Try to find the function again since an NMI
585          * could have added it
586          */
587         rec = ftrace_find_profiled_func(stat, ip);
588         if (rec)
589                 goto out;
590
591         if (stat->pages->index == PROFILES_PER_PAGE) {
592                 if (!stat->pages->next)
593                         goto out;
594                 stat->pages = stat->pages->next;
595         }
596
597         rec = &stat->pages->records[stat->pages->index++];
598         rec->ip = ip;
599         ftrace_add_profile(stat, rec);
600
601  out:
602         atomic_dec(&stat->disabled);
603
604         return rec;
605 }
606
607 static void
608 function_profile_call(unsigned long ip, unsigned long parent_ip)
609 {
610         struct ftrace_profile_stat *stat;
611         struct ftrace_profile *rec;
612         unsigned long flags;
613
614         if (!ftrace_profile_enabled)
615                 return;
616
617         local_irq_save(flags);
618
619         stat = &__get_cpu_var(ftrace_profile_stats);
620         if (!stat->hash || !ftrace_profile_enabled)
621                 goto out;
622
623         rec = ftrace_find_profiled_func(stat, ip);
624         if (!rec) {
625                 rec = ftrace_profile_alloc(stat, ip);
626                 if (!rec)
627                         goto out;
628         }
629
630         rec->counter++;
631  out:
632         local_irq_restore(flags);
633 }
634
635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
636 static int profile_graph_entry(struct ftrace_graph_ent *trace)
637 {
638         function_profile_call(trace->func, 0);
639         return 1;
640 }
641
642 static void profile_graph_return(struct ftrace_graph_ret *trace)
643 {
644         struct ftrace_profile_stat *stat;
645         unsigned long long calltime;
646         struct ftrace_profile *rec;
647         unsigned long flags;
648
649         local_irq_save(flags);
650         stat = &__get_cpu_var(ftrace_profile_stats);
651         if (!stat->hash || !ftrace_profile_enabled)
652                 goto out;
653
654         /* If the calltime was zero'd ignore it */
655         if (!trace->calltime)
656                 goto out;
657
658         calltime = trace->rettime - trace->calltime;
659
660         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
661                 int index;
662
663                 index = trace->depth;
664
665                 /* Append this call time to the parent time to subtract */
666                 if (index)
667                         current->ret_stack[index - 1].subtime += calltime;
668
669                 if (current->ret_stack[index].subtime < calltime)
670                         calltime -= current->ret_stack[index].subtime;
671                 else
672                         calltime = 0;
673         }
674
675         rec = ftrace_find_profiled_func(stat, trace->func);
676         if (rec) {
677                 rec->time += calltime;
678                 rec->time_squared += calltime * calltime;
679         }
680
681  out:
682         local_irq_restore(flags);
683 }
684
685 static int register_ftrace_profiler(void)
686 {
687         return register_ftrace_graph(&profile_graph_return,
688                                      &profile_graph_entry);
689 }
690
691 static void unregister_ftrace_profiler(void)
692 {
693         unregister_ftrace_graph();
694 }
695 #else
696 static struct ftrace_ops ftrace_profile_ops __read_mostly =
697 {
698         .func           = function_profile_call,
699 };
700
701 static int register_ftrace_profiler(void)
702 {
703         return register_ftrace_function(&ftrace_profile_ops);
704 }
705
706 static void unregister_ftrace_profiler(void)
707 {
708         unregister_ftrace_function(&ftrace_profile_ops);
709 }
710 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
711
712 static ssize_t
713 ftrace_profile_write(struct file *filp, const char __user *ubuf,
714                      size_t cnt, loff_t *ppos)
715 {
716         unsigned long val;
717         char buf[64];           /* big enough to hold a number */
718         int ret;
719
720         if (cnt >= sizeof(buf))
721                 return -EINVAL;
722
723         if (copy_from_user(&buf, ubuf, cnt))
724                 return -EFAULT;
725
726         buf[cnt] = 0;
727
728         ret = strict_strtoul(buf, 10, &val);
729         if (ret < 0)
730                 return ret;
731
732         val = !!val;
733
734         mutex_lock(&ftrace_profile_lock);
735         if (ftrace_profile_enabled ^ val) {
736                 if (val) {
737                         ret = ftrace_profile_init();
738                         if (ret < 0) {
739                                 cnt = ret;
740                                 goto out;
741                         }
742
743                         ret = register_ftrace_profiler();
744                         if (ret < 0) {
745                                 cnt = ret;
746                                 goto out;
747                         }
748                         ftrace_profile_enabled = 1;
749                 } else {
750                         ftrace_profile_enabled = 0;
751                         /*
752                          * unregister_ftrace_profiler calls stop_machine
753                          * so this acts like an synchronize_sched.
754                          */
755                         unregister_ftrace_profiler();
756                 }
757         }
758  out:
759         mutex_unlock(&ftrace_profile_lock);
760
761         *ppos += cnt;
762
763         return cnt;
764 }
765
766 static ssize_t
767 ftrace_profile_read(struct file *filp, char __user *ubuf,
768                      size_t cnt, loff_t *ppos)
769 {
770         char buf[64];           /* big enough to hold a number */
771         int r;
772
773         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
774         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
775 }
776
777 static const struct file_operations ftrace_profile_fops = {
778         .open           = tracing_open_generic,
779         .read           = ftrace_profile_read,
780         .write          = ftrace_profile_write,
781         .llseek         = default_llseek,
782 };
783
784 /* used to initialize the real stat files */
785 static struct tracer_stat function_stats __initdata = {
786         .name           = "functions",
787         .stat_start     = function_stat_start,
788         .stat_next      = function_stat_next,
789         .stat_cmp       = function_stat_cmp,
790         .stat_headers   = function_stat_headers,
791         .stat_show      = function_stat_show
792 };
793
794 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
795 {
796         struct ftrace_profile_stat *stat;
797         struct dentry *entry;
798         char *name;
799         int ret;
800         int cpu;
801
802         for_each_possible_cpu(cpu) {
803                 stat = &per_cpu(ftrace_profile_stats, cpu);
804
805                 /* allocate enough for function name + cpu number */
806                 name = kmalloc(32, GFP_KERNEL);
807                 if (!name) {
808                         /*
809                          * The files created are permanent, if something happens
810                          * we still do not free memory.
811                          */
812                         WARN(1,
813                              "Could not allocate stat file for cpu %d\n",
814                              cpu);
815                         return;
816                 }
817                 stat->stat = function_stats;
818                 snprintf(name, 32, "function%d", cpu);
819                 stat->stat.name = name;
820                 ret = register_stat_tracer(&stat->stat);
821                 if (ret) {
822                         WARN(1,
823                              "Could not register function stat for cpu %d\n",
824                              cpu);
825                         kfree(name);
826                         return;
827                 }
828         }
829
830         entry = debugfs_create_file("function_profile_enabled", 0644,
831                                     d_tracer, NULL, &ftrace_profile_fops);
832         if (!entry)
833                 pr_warning("Could not create debugfs "
834                            "'function_profile_enabled' entry\n");
835 }
836
837 #else /* CONFIG_FUNCTION_PROFILER */
838 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
839 {
840 }
841 #endif /* CONFIG_FUNCTION_PROFILER */
842
843 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
844
845 #ifdef CONFIG_DYNAMIC_FTRACE
846
847 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
848 # error Dynamic ftrace depends on MCOUNT_RECORD
849 #endif
850
851 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
852
853 struct ftrace_func_probe {
854         struct hlist_node       node;
855         struct ftrace_probe_ops *ops;
856         unsigned long           flags;
857         unsigned long           ip;
858         void                    *data;
859         struct rcu_head         rcu;
860 };
861
862 enum {
863         FTRACE_ENABLE_CALLS             = (1 << 0),
864         FTRACE_DISABLE_CALLS            = (1 << 1),
865         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
866         FTRACE_START_FUNC_RET           = (1 << 3),
867         FTRACE_STOP_FUNC_RET            = (1 << 4),
868 };
869 struct ftrace_func_entry {
870         struct hlist_node hlist;
871         unsigned long ip;
872 };
873
874 struct ftrace_hash {
875         unsigned long           size_bits;
876         struct hlist_head       *buckets;
877         unsigned long           count;
878 };
879
880 static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS];
881 static struct ftrace_hash notrace_hash = {
882         .size_bits = FTRACE_HASH_MAX_BITS,
883         .buckets = notrace_buckets,
884 };
885
886 static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS];
887 static struct ftrace_hash filter_hash = {
888         .size_bits = FTRACE_HASH_MAX_BITS,
889         .buckets = filter_buckets,
890 };
891
892 static struct dyn_ftrace *ftrace_new_addrs;
893
894 static DEFINE_MUTEX(ftrace_regex_lock);
895
896 struct ftrace_page {
897         struct ftrace_page      *next;
898         int                     index;
899         struct dyn_ftrace       records[];
900 };
901
902 #define ENTRIES_PER_PAGE \
903   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
904
905 /* estimate from running different kernels */
906 #define NR_TO_INIT              10000
907
908 static struct ftrace_page       *ftrace_pages_start;
909 static struct ftrace_page       *ftrace_pages;
910
911 static struct dyn_ftrace *ftrace_free_records;
912
913 static struct ftrace_func_entry *
914 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
915 {
916         unsigned long key;
917         struct ftrace_func_entry *entry;
918         struct hlist_head *hhd;
919         struct hlist_node *n;
920
921         if (!hash->count)
922                 return NULL;
923
924         if (hash->size_bits > 0)
925                 key = hash_long(ip, hash->size_bits);
926         else
927                 key = 0;
928
929         hhd = &hash->buckets[key];
930
931         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
932                 if (entry->ip == ip)
933                         return entry;
934         }
935         return NULL;
936 }
937
938 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
939 {
940         struct ftrace_func_entry *entry;
941         struct hlist_head *hhd;
942         unsigned long key;
943
944         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
945         if (!entry)
946                 return -ENOMEM;
947
948         if (hash->size_bits)
949                 key = hash_long(ip, hash->size_bits);
950         else
951                 key = 0;
952
953         entry->ip = ip;
954         hhd = &hash->buckets[key];
955         hlist_add_head(&entry->hlist, hhd);
956         hash->count++;
957
958         return 0;
959 }
960
961 static void
962 remove_hash_entry(struct ftrace_hash *hash,
963                   struct ftrace_func_entry *entry)
964 {
965         hlist_del(&entry->hlist);
966         kfree(entry);
967         hash->count--;
968 }
969
970 static void ftrace_hash_clear(struct ftrace_hash *hash)
971 {
972         struct hlist_head *hhd;
973         struct hlist_node *tp, *tn;
974         struct ftrace_func_entry *entry;
975         int size = 1 << hash->size_bits;
976         int i;
977
978         for (i = 0; i < size; i++) {
979                 hhd = &hash->buckets[i];
980                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
981                         remove_hash_entry(hash, entry);
982         }
983         FTRACE_WARN_ON(hash->count);
984 }
985
986 /*
987  * This is a double for. Do not use 'break' to break out of the loop,
988  * you must use a goto.
989  */
990 #define do_for_each_ftrace_rec(pg, rec)                                 \
991         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
992                 int _____i;                                             \
993                 for (_____i = 0; _____i < pg->index; _____i++) {        \
994                         rec = &pg->records[_____i];
995
996 #define while_for_each_ftrace_rec()             \
997                 }                               \
998         }
999
1000 static void ftrace_free_rec(struct dyn_ftrace *rec)
1001 {
1002         rec->freelist = ftrace_free_records;
1003         ftrace_free_records = rec;
1004         rec->flags |= FTRACE_FL_FREE;
1005 }
1006
1007 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1008 {
1009         struct dyn_ftrace *rec;
1010
1011         /* First check for freed records */
1012         if (ftrace_free_records) {
1013                 rec = ftrace_free_records;
1014
1015                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1016                         FTRACE_WARN_ON_ONCE(1);
1017                         ftrace_free_records = NULL;
1018                         return NULL;
1019                 }
1020
1021                 ftrace_free_records = rec->freelist;
1022                 memset(rec, 0, sizeof(*rec));
1023                 return rec;
1024         }
1025
1026         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1027                 if (!ftrace_pages->next) {
1028                         /* allocate another page */
1029                         ftrace_pages->next =
1030                                 (void *)get_zeroed_page(GFP_KERNEL);
1031                         if (!ftrace_pages->next)
1032                                 return NULL;
1033                 }
1034                 ftrace_pages = ftrace_pages->next;
1035         }
1036
1037         return &ftrace_pages->records[ftrace_pages->index++];
1038 }
1039
1040 static struct dyn_ftrace *
1041 ftrace_record_ip(unsigned long ip)
1042 {
1043         struct dyn_ftrace *rec;
1044
1045         if (ftrace_disabled)
1046                 return NULL;
1047
1048         rec = ftrace_alloc_dyn_node(ip);
1049         if (!rec)
1050                 return NULL;
1051
1052         rec->ip = ip;
1053         rec->newlist = ftrace_new_addrs;
1054         ftrace_new_addrs = rec;
1055
1056         return rec;
1057 }
1058
1059 static void print_ip_ins(const char *fmt, unsigned char *p)
1060 {
1061         int i;
1062
1063         printk(KERN_CONT "%s", fmt);
1064
1065         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1066                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1067 }
1068
1069 static void ftrace_bug(int failed, unsigned long ip)
1070 {
1071         switch (failed) {
1072         case -EFAULT:
1073                 FTRACE_WARN_ON_ONCE(1);
1074                 pr_info("ftrace faulted on modifying ");
1075                 print_ip_sym(ip);
1076                 break;
1077         case -EINVAL:
1078                 FTRACE_WARN_ON_ONCE(1);
1079                 pr_info("ftrace failed to modify ");
1080                 print_ip_sym(ip);
1081                 print_ip_ins(" actual: ", (unsigned char *)ip);
1082                 printk(KERN_CONT "\n");
1083                 break;
1084         case -EPERM:
1085                 FTRACE_WARN_ON_ONCE(1);
1086                 pr_info("ftrace faulted on writing ");
1087                 print_ip_sym(ip);
1088                 break;
1089         default:
1090                 FTRACE_WARN_ON_ONCE(1);
1091                 pr_info("ftrace faulted on unknown error ");
1092                 print_ip_sym(ip);
1093         }
1094 }
1095
1096
1097 /* Return 1 if the address range is reserved for ftrace */
1098 int ftrace_text_reserved(void *start, void *end)
1099 {
1100         struct dyn_ftrace *rec;
1101         struct ftrace_page *pg;
1102
1103         do_for_each_ftrace_rec(pg, rec) {
1104                 if (rec->ip <= (unsigned long)end &&
1105                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1106                         return 1;
1107         } while_for_each_ftrace_rec();
1108         return 0;
1109 }
1110
1111
1112 static int
1113 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1114 {
1115         unsigned long ftrace_addr;
1116         unsigned long flag = 0UL;
1117
1118         ftrace_addr = (unsigned long)FTRACE_ADDR;
1119
1120         /*
1121          * If this record is not to be traced or we want to disable it,
1122          * then disable it.
1123          *
1124          * If we want to enable it and filtering is off, then enable it.
1125          *
1126          * If we want to enable it and filtering is on, enable it only if
1127          * it's filtered
1128          */
1129         if (enable && !ftrace_lookup_ip(&notrace_hash, rec->ip)) {
1130                 if (!filter_hash.count || ftrace_lookup_ip(&filter_hash, rec->ip))
1131                         flag = FTRACE_FL_ENABLED;
1132         }
1133
1134         /* If the state of this record hasn't changed, then do nothing */
1135         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1136                 return 0;
1137
1138         if (flag) {
1139                 rec->flags |= FTRACE_FL_ENABLED;
1140                 return ftrace_make_call(rec, ftrace_addr);
1141         }
1142
1143         rec->flags &= ~FTRACE_FL_ENABLED;
1144         return ftrace_make_nop(NULL, rec, ftrace_addr);
1145 }
1146
1147 static void ftrace_replace_code(int enable)
1148 {
1149         struct dyn_ftrace *rec;
1150         struct ftrace_page *pg;
1151         int failed;
1152
1153         if (unlikely(ftrace_disabled))
1154                 return;
1155
1156         do_for_each_ftrace_rec(pg, rec) {
1157                 /* Skip over free records */
1158                 if (rec->flags & FTRACE_FL_FREE)
1159                         continue;
1160
1161                 failed = __ftrace_replace_code(rec, enable);
1162                 if (failed) {
1163                         ftrace_bug(failed, rec->ip);
1164                         /* Stop processing */
1165                         return;
1166                 }
1167         } while_for_each_ftrace_rec();
1168 }
1169
1170 static int
1171 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1172 {
1173         unsigned long ip;
1174         int ret;
1175
1176         ip = rec->ip;
1177
1178         if (unlikely(ftrace_disabled))
1179                 return 0;
1180
1181         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1182         if (ret) {
1183                 ftrace_bug(ret, ip);
1184                 return 0;
1185         }
1186         return 1;
1187 }
1188
1189 /*
1190  * archs can override this function if they must do something
1191  * before the modifying code is performed.
1192  */
1193 int __weak ftrace_arch_code_modify_prepare(void)
1194 {
1195         return 0;
1196 }
1197
1198 /*
1199  * archs can override this function if they must do something
1200  * after the modifying code is performed.
1201  */
1202 int __weak ftrace_arch_code_modify_post_process(void)
1203 {
1204         return 0;
1205 }
1206
1207 static int __ftrace_modify_code(void *data)
1208 {
1209         int *command = data;
1210
1211         if (*command & FTRACE_ENABLE_CALLS)
1212                 ftrace_replace_code(1);
1213         else if (*command & FTRACE_DISABLE_CALLS)
1214                 ftrace_replace_code(0);
1215
1216         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1217                 ftrace_update_ftrace_func(ftrace_trace_function);
1218
1219         if (*command & FTRACE_START_FUNC_RET)
1220                 ftrace_enable_ftrace_graph_caller();
1221         else if (*command & FTRACE_STOP_FUNC_RET)
1222                 ftrace_disable_ftrace_graph_caller();
1223
1224         return 0;
1225 }
1226
1227 static void ftrace_run_update_code(int command)
1228 {
1229         int ret;
1230
1231         ret = ftrace_arch_code_modify_prepare();
1232         FTRACE_WARN_ON(ret);
1233         if (ret)
1234                 return;
1235
1236         stop_machine(__ftrace_modify_code, &command, NULL);
1237
1238         ret = ftrace_arch_code_modify_post_process();
1239         FTRACE_WARN_ON(ret);
1240 }
1241
1242 static ftrace_func_t saved_ftrace_func;
1243 static int ftrace_start_up;
1244
1245 static void ftrace_startup_enable(int command)
1246 {
1247         if (saved_ftrace_func != ftrace_trace_function) {
1248                 saved_ftrace_func = ftrace_trace_function;
1249                 command |= FTRACE_UPDATE_TRACE_FUNC;
1250         }
1251
1252         if (!command || !ftrace_enabled)
1253                 return;
1254
1255         ftrace_run_update_code(command);
1256 }
1257
1258 static void ftrace_startup(int command)
1259 {
1260         if (unlikely(ftrace_disabled))
1261                 return;
1262
1263         ftrace_start_up++;
1264         command |= FTRACE_ENABLE_CALLS;
1265
1266         ftrace_startup_enable(command);
1267 }
1268
1269 static void ftrace_shutdown(int command)
1270 {
1271         if (unlikely(ftrace_disabled))
1272                 return;
1273
1274         ftrace_start_up--;
1275         /*
1276          * Just warn in case of unbalance, no need to kill ftrace, it's not
1277          * critical but the ftrace_call callers may be never nopped again after
1278          * further ftrace uses.
1279          */
1280         WARN_ON_ONCE(ftrace_start_up < 0);
1281
1282         if (!ftrace_start_up)
1283                 command |= FTRACE_DISABLE_CALLS;
1284
1285         if (saved_ftrace_func != ftrace_trace_function) {
1286                 saved_ftrace_func = ftrace_trace_function;
1287                 command |= FTRACE_UPDATE_TRACE_FUNC;
1288         }
1289
1290         if (!command || !ftrace_enabled)
1291                 return;
1292
1293         ftrace_run_update_code(command);
1294 }
1295
1296 static void ftrace_startup_sysctl(void)
1297 {
1298         if (unlikely(ftrace_disabled))
1299                 return;
1300
1301         /* Force update next time */
1302         saved_ftrace_func = NULL;
1303         /* ftrace_start_up is true if we want ftrace running */
1304         if (ftrace_start_up)
1305                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1306 }
1307
1308 static void ftrace_shutdown_sysctl(void)
1309 {
1310         if (unlikely(ftrace_disabled))
1311                 return;
1312
1313         /* ftrace_start_up is true if ftrace is running */
1314         if (ftrace_start_up)
1315                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1316 }
1317
1318 static cycle_t          ftrace_update_time;
1319 static unsigned long    ftrace_update_cnt;
1320 unsigned long           ftrace_update_tot_cnt;
1321
1322 static int ftrace_update_code(struct module *mod)
1323 {
1324         struct dyn_ftrace *p;
1325         cycle_t start, stop;
1326
1327         start = ftrace_now(raw_smp_processor_id());
1328         ftrace_update_cnt = 0;
1329
1330         while (ftrace_new_addrs) {
1331
1332                 /* If something went wrong, bail without enabling anything */
1333                 if (unlikely(ftrace_disabled))
1334                         return -1;
1335
1336                 p = ftrace_new_addrs;
1337                 ftrace_new_addrs = p->newlist;
1338                 p->flags = 0L;
1339
1340                 /*
1341                  * Do the initial record conversion from mcount jump
1342                  * to the NOP instructions.
1343                  */
1344                 if (!ftrace_code_disable(mod, p)) {
1345                         ftrace_free_rec(p);
1346                         /* Game over */
1347                         break;
1348                 }
1349
1350                 ftrace_update_cnt++;
1351
1352                 /*
1353                  * If the tracing is enabled, go ahead and enable the record.
1354                  *
1355                  * The reason not to enable the record immediatelly is the
1356                  * inherent check of ftrace_make_nop/ftrace_make_call for
1357                  * correct previous instructions.  Making first the NOP
1358                  * conversion puts the module to the correct state, thus
1359                  * passing the ftrace_make_call check.
1360                  */
1361                 if (ftrace_start_up) {
1362                         int failed = __ftrace_replace_code(p, 1);
1363                         if (failed) {
1364                                 ftrace_bug(failed, p->ip);
1365                                 ftrace_free_rec(p);
1366                         }
1367                 }
1368         }
1369
1370         stop = ftrace_now(raw_smp_processor_id());
1371         ftrace_update_time = stop - start;
1372         ftrace_update_tot_cnt += ftrace_update_cnt;
1373
1374         return 0;
1375 }
1376
1377 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1378 {
1379         struct ftrace_page *pg;
1380         int cnt;
1381         int i;
1382
1383         /* allocate a few pages */
1384         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1385         if (!ftrace_pages_start)
1386                 return -1;
1387
1388         /*
1389          * Allocate a few more pages.
1390          *
1391          * TODO: have some parser search vmlinux before
1392          *   final linking to find all calls to ftrace.
1393          *   Then we can:
1394          *    a) know how many pages to allocate.
1395          *     and/or
1396          *    b) set up the table then.
1397          *
1398          *  The dynamic code is still necessary for
1399          *  modules.
1400          */
1401
1402         pg = ftrace_pages = ftrace_pages_start;
1403
1404         cnt = num_to_init / ENTRIES_PER_PAGE;
1405         pr_info("ftrace: allocating %ld entries in %d pages\n",
1406                 num_to_init, cnt + 1);
1407
1408         for (i = 0; i < cnt; i++) {
1409                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1410
1411                 /* If we fail, we'll try later anyway */
1412                 if (!pg->next)
1413                         break;
1414
1415                 pg = pg->next;
1416         }
1417
1418         return 0;
1419 }
1420
1421 enum {
1422         FTRACE_ITER_FILTER      = (1 << 0),
1423         FTRACE_ITER_NOTRACE     = (1 << 1),
1424         FTRACE_ITER_PRINTALL    = (1 << 2),
1425         FTRACE_ITER_HASH        = (1 << 3),
1426 };
1427
1428 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1429
1430 struct ftrace_iterator {
1431         loff_t                          pos;
1432         loff_t                          func_pos;
1433         struct ftrace_page              *pg;
1434         struct dyn_ftrace               *func;
1435         struct ftrace_func_probe        *probe;
1436         struct trace_parser             parser;
1437         struct ftrace_hash              *hash;
1438         int                             hidx;
1439         int                             idx;
1440         unsigned                        flags;
1441 };
1442
1443 static void *
1444 t_hash_next(struct seq_file *m, loff_t *pos)
1445 {
1446         struct ftrace_iterator *iter = m->private;
1447         struct hlist_node *hnd = NULL;
1448         struct hlist_head *hhd;
1449
1450         (*pos)++;
1451         iter->pos = *pos;
1452
1453         if (iter->probe)
1454                 hnd = &iter->probe->node;
1455  retry:
1456         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1457                 return NULL;
1458
1459         hhd = &ftrace_func_hash[iter->hidx];
1460
1461         if (hlist_empty(hhd)) {
1462                 iter->hidx++;
1463                 hnd = NULL;
1464                 goto retry;
1465         }
1466
1467         if (!hnd)
1468                 hnd = hhd->first;
1469         else {
1470                 hnd = hnd->next;
1471                 if (!hnd) {
1472                         iter->hidx++;
1473                         goto retry;
1474                 }
1475         }
1476
1477         if (WARN_ON_ONCE(!hnd))
1478                 return NULL;
1479
1480         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1481
1482         return iter;
1483 }
1484
1485 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1486 {
1487         struct ftrace_iterator *iter = m->private;
1488         void *p = NULL;
1489         loff_t l;
1490
1491         if (iter->func_pos > *pos)
1492                 return NULL;
1493
1494         iter->hidx = 0;
1495         for (l = 0; l <= (*pos - iter->func_pos); ) {
1496                 p = t_hash_next(m, &l);
1497                 if (!p)
1498                         break;
1499         }
1500         if (!p)
1501                 return NULL;
1502
1503         /* Only set this if we have an item */
1504         iter->flags |= FTRACE_ITER_HASH;
1505
1506         return iter;
1507 }
1508
1509 static int
1510 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1511 {
1512         struct ftrace_func_probe *rec;
1513
1514         rec = iter->probe;
1515         if (WARN_ON_ONCE(!rec))
1516                 return -EIO;
1517
1518         if (rec->ops->print)
1519                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1520
1521         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1522
1523         if (rec->data)
1524                 seq_printf(m, ":%p", rec->data);
1525         seq_putc(m, '\n');
1526
1527         return 0;
1528 }
1529
1530 static void *
1531 t_next(struct seq_file *m, void *v, loff_t *pos)
1532 {
1533         struct ftrace_iterator *iter = m->private;
1534         struct dyn_ftrace *rec = NULL;
1535
1536         if (unlikely(ftrace_disabled))
1537                 return NULL;
1538
1539         if (iter->flags & FTRACE_ITER_HASH)
1540                 return t_hash_next(m, pos);
1541
1542         (*pos)++;
1543         iter->pos = iter->func_pos = *pos;
1544
1545         if (iter->flags & FTRACE_ITER_PRINTALL)
1546                 return t_hash_start(m, pos);
1547
1548  retry:
1549         if (iter->idx >= iter->pg->index) {
1550                 if (iter->pg->next) {
1551                         iter->pg = iter->pg->next;
1552                         iter->idx = 0;
1553                         goto retry;
1554                 }
1555         } else {
1556                 rec = &iter->pg->records[iter->idx++];
1557                 if ((rec->flags & FTRACE_FL_FREE) ||
1558
1559                     ((iter->flags & FTRACE_ITER_FILTER) &&
1560                      !(ftrace_lookup_ip(&filter_hash, rec->ip))) ||
1561
1562                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1563                      !ftrace_lookup_ip(&notrace_hash, rec->ip))) {
1564                         rec = NULL;
1565                         goto retry;
1566                 }
1567         }
1568
1569         if (!rec)
1570                 return t_hash_start(m, pos);
1571
1572         iter->func = rec;
1573
1574         return iter;
1575 }
1576
1577 static void reset_iter_read(struct ftrace_iterator *iter)
1578 {
1579         iter->pos = 0;
1580         iter->func_pos = 0;
1581         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1582 }
1583
1584 static void *t_start(struct seq_file *m, loff_t *pos)
1585 {
1586         struct ftrace_iterator *iter = m->private;
1587         void *p = NULL;
1588         loff_t l;
1589
1590         mutex_lock(&ftrace_lock);
1591
1592         if (unlikely(ftrace_disabled))
1593                 return NULL;
1594
1595         /*
1596          * If an lseek was done, then reset and start from beginning.
1597          */
1598         if (*pos < iter->pos)
1599                 reset_iter_read(iter);
1600
1601         /*
1602          * For set_ftrace_filter reading, if we have the filter
1603          * off, we can short cut and just print out that all
1604          * functions are enabled.
1605          */
1606         if (iter->flags & FTRACE_ITER_FILTER && !filter_hash.count) {
1607                 if (*pos > 0)
1608                         return t_hash_start(m, pos);
1609                 iter->flags |= FTRACE_ITER_PRINTALL;
1610                 /* reset in case of seek/pread */
1611                 iter->flags &= ~FTRACE_ITER_HASH;
1612                 return iter;
1613         }
1614
1615         if (iter->flags & FTRACE_ITER_HASH)
1616                 return t_hash_start(m, pos);
1617
1618         /*
1619          * Unfortunately, we need to restart at ftrace_pages_start
1620          * every time we let go of the ftrace_mutex. This is because
1621          * those pointers can change without the lock.
1622          */
1623         iter->pg = ftrace_pages_start;
1624         iter->idx = 0;
1625         for (l = 0; l <= *pos; ) {
1626                 p = t_next(m, p, &l);
1627                 if (!p)
1628                         break;
1629         }
1630
1631         if (!p) {
1632                 if (iter->flags & FTRACE_ITER_FILTER)
1633                         return t_hash_start(m, pos);
1634
1635                 return NULL;
1636         }
1637
1638         return iter;
1639 }
1640
1641 static void t_stop(struct seq_file *m, void *p)
1642 {
1643         mutex_unlock(&ftrace_lock);
1644 }
1645
1646 static int t_show(struct seq_file *m, void *v)
1647 {
1648         struct ftrace_iterator *iter = m->private;
1649         struct dyn_ftrace *rec;
1650
1651         if (iter->flags & FTRACE_ITER_HASH)
1652                 return t_hash_show(m, iter);
1653
1654         if (iter->flags & FTRACE_ITER_PRINTALL) {
1655                 seq_printf(m, "#### all functions enabled ####\n");
1656                 return 0;
1657         }
1658
1659         rec = iter->func;
1660
1661         if (!rec)
1662                 return 0;
1663
1664         seq_printf(m, "%ps\n", (void *)rec->ip);
1665
1666         return 0;
1667 }
1668
1669 static const struct seq_operations show_ftrace_seq_ops = {
1670         .start = t_start,
1671         .next = t_next,
1672         .stop = t_stop,
1673         .show = t_show,
1674 };
1675
1676 static int
1677 ftrace_avail_open(struct inode *inode, struct file *file)
1678 {
1679         struct ftrace_iterator *iter;
1680         int ret;
1681
1682         if (unlikely(ftrace_disabled))
1683                 return -ENODEV;
1684
1685         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1686         if (!iter)
1687                 return -ENOMEM;
1688
1689         iter->pg = ftrace_pages_start;
1690
1691         ret = seq_open(file, &show_ftrace_seq_ops);
1692         if (!ret) {
1693                 struct seq_file *m = file->private_data;
1694
1695                 m->private = iter;
1696         } else {
1697                 kfree(iter);
1698         }
1699
1700         return ret;
1701 }
1702
1703 static void ftrace_filter_reset(struct ftrace_hash *hash)
1704 {
1705         mutex_lock(&ftrace_lock);
1706         ftrace_hash_clear(hash);
1707         mutex_unlock(&ftrace_lock);
1708 }
1709
1710 static int
1711 ftrace_regex_open(struct ftrace_hash *hash, int flag,
1712                   struct inode *inode, struct file *file)
1713 {
1714         struct ftrace_iterator *iter;
1715         int ret = 0;
1716
1717         if (unlikely(ftrace_disabled))
1718                 return -ENODEV;
1719
1720         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1721         if (!iter)
1722                 return -ENOMEM;
1723
1724         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1725                 kfree(iter);
1726                 return -ENOMEM;
1727         }
1728
1729         iter->hash = hash;
1730
1731         mutex_lock(&ftrace_regex_lock);
1732         if ((file->f_mode & FMODE_WRITE) &&
1733             (file->f_flags & O_TRUNC))
1734                 ftrace_filter_reset(hash);
1735
1736         if (file->f_mode & FMODE_READ) {
1737                 iter->pg = ftrace_pages_start;
1738                 iter->flags = flag;
1739
1740                 ret = seq_open(file, &show_ftrace_seq_ops);
1741                 if (!ret) {
1742                         struct seq_file *m = file->private_data;
1743                         m->private = iter;
1744                 } else {
1745                         trace_parser_put(&iter->parser);
1746                         kfree(iter);
1747                 }
1748         } else
1749                 file->private_data = iter;
1750         mutex_unlock(&ftrace_regex_lock);
1751
1752         return ret;
1753 }
1754
1755 static int
1756 ftrace_filter_open(struct inode *inode, struct file *file)
1757 {
1758         return ftrace_regex_open(&filter_hash, FTRACE_ITER_FILTER,
1759                                  inode, file);
1760 }
1761
1762 static int
1763 ftrace_notrace_open(struct inode *inode, struct file *file)
1764 {
1765         return ftrace_regex_open(&notrace_hash, FTRACE_ITER_NOTRACE,
1766                                  inode, file);
1767 }
1768
1769 static loff_t
1770 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1771 {
1772         loff_t ret;
1773
1774         if (file->f_mode & FMODE_READ)
1775                 ret = seq_lseek(file, offset, origin);
1776         else
1777                 file->f_pos = ret = 1;
1778
1779         return ret;
1780 }
1781
1782 static int ftrace_match(char *str, char *regex, int len, int type)
1783 {
1784         int matched = 0;
1785         int slen;
1786
1787         switch (type) {
1788         case MATCH_FULL:
1789                 if (strcmp(str, regex) == 0)
1790                         matched = 1;
1791                 break;
1792         case MATCH_FRONT_ONLY:
1793                 if (strncmp(str, regex, len) == 0)
1794                         matched = 1;
1795                 break;
1796         case MATCH_MIDDLE_ONLY:
1797                 if (strstr(str, regex))
1798                         matched = 1;
1799                 break;
1800         case MATCH_END_ONLY:
1801                 slen = strlen(str);
1802                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1803                         matched = 1;
1804                 break;
1805         }
1806
1807         return matched;
1808 }
1809
1810 static int
1811 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
1812 {
1813         struct ftrace_func_entry *entry;
1814         int ret = 0;
1815
1816         entry = ftrace_lookup_ip(hash, rec->ip);
1817         if (not) {
1818                 /* Do nothing if it doesn't exist */
1819                 if (!entry)
1820                         return 0;
1821
1822                 remove_hash_entry(hash, entry);
1823         } else {
1824                 /* Do nothing if it exists */
1825                 if (entry)
1826                         return 0;
1827
1828                 ret = add_hash_entry(hash, rec->ip);
1829         }
1830         return ret;
1831 }
1832
1833 static int
1834 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
1835                     char *regex, int len, int type)
1836 {
1837         char str[KSYM_SYMBOL_LEN];
1838         char *modname;
1839
1840         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1841
1842         if (mod) {
1843                 /* module lookup requires matching the module */
1844                 if (!modname || strcmp(modname, mod))
1845                         return 0;
1846
1847                 /* blank search means to match all funcs in the mod */
1848                 if (!len)
1849                         return 1;
1850         }
1851
1852         return ftrace_match(str, regex, len, type);
1853 }
1854
1855 static int
1856 match_records(struct ftrace_hash *hash, char *buff,
1857               int len, char *mod, int not)
1858 {
1859         unsigned search_len = 0;
1860         struct ftrace_page *pg;
1861         struct dyn_ftrace *rec;
1862         int type = MATCH_FULL;
1863         char *search = buff;
1864         int found = 0;
1865         int ret;
1866
1867         if (len) {
1868                 type = filter_parse_regex(buff, len, &search, &not);
1869                 search_len = strlen(search);
1870         }
1871
1872         mutex_lock(&ftrace_lock);
1873
1874         if (unlikely(ftrace_disabled))
1875                 goto out_unlock;
1876
1877         do_for_each_ftrace_rec(pg, rec) {
1878
1879                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
1880                         ret = enter_record(hash, rec, not);
1881                         if (ret < 0) {
1882                                 found = ret;
1883                                 goto out_unlock;
1884                         }
1885                         found = 1;
1886                 }
1887         } while_for_each_ftrace_rec();
1888  out_unlock:
1889         mutex_unlock(&ftrace_lock);
1890
1891         return found;
1892 }
1893
1894 static int
1895 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
1896 {
1897         return match_records(hash, buff, len, NULL, 0);
1898 }
1899
1900 static int
1901 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
1902 {
1903         int not = 0;
1904
1905         /* blank or '*' mean the same */
1906         if (strcmp(buff, "*") == 0)
1907                 buff[0] = 0;
1908
1909         /* handle the case of 'dont filter this module' */
1910         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1911                 buff[0] = 0;
1912                 not = 1;
1913         }
1914
1915         return match_records(hash, buff, strlen(buff), mod, not);
1916 }
1917
1918 /*
1919  * We register the module command as a template to show others how
1920  * to register the a command as well.
1921  */
1922
1923 static int
1924 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1925 {
1926         struct ftrace_hash *hash;
1927         char *mod;
1928         int ret = -EINVAL;
1929
1930         /*
1931          * cmd == 'mod' because we only registered this func
1932          * for the 'mod' ftrace_func_command.
1933          * But if you register one func with multiple commands,
1934          * you can tell which command was used by the cmd
1935          * parameter.
1936          */
1937
1938         /* we must have a module name */
1939         if (!param)
1940                 return ret;
1941
1942         mod = strsep(&param, ":");
1943         if (!strlen(mod))
1944                 return ret;
1945
1946         if (enable)
1947                 hash = &filter_hash;
1948         else
1949                 hash = &notrace_hash;
1950
1951         ret = ftrace_match_module_records(hash, func, mod);
1952         if (!ret)
1953                 ret = -EINVAL;
1954         if (ret < 0)
1955                 return ret;
1956
1957         return 0;
1958 }
1959
1960 static struct ftrace_func_command ftrace_mod_cmd = {
1961         .name                   = "mod",
1962         .func                   = ftrace_mod_callback,
1963 };
1964
1965 static int __init ftrace_mod_cmd_init(void)
1966 {
1967         return register_ftrace_command(&ftrace_mod_cmd);
1968 }
1969 device_initcall(ftrace_mod_cmd_init);
1970
1971 static void
1972 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1973 {
1974         struct ftrace_func_probe *entry;
1975         struct hlist_head *hhd;
1976         struct hlist_node *n;
1977         unsigned long key;
1978
1979         key = hash_long(ip, FTRACE_HASH_BITS);
1980
1981         hhd = &ftrace_func_hash[key];
1982
1983         if (hlist_empty(hhd))
1984                 return;
1985
1986         /*
1987          * Disable preemption for these calls to prevent a RCU grace
1988          * period. This syncs the hash iteration and freeing of items
1989          * on the hash. rcu_read_lock is too dangerous here.
1990          */
1991         preempt_disable_notrace();
1992         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1993                 if (entry->ip == ip)
1994                         entry->ops->func(ip, parent_ip, &entry->data);
1995         }
1996         preempt_enable_notrace();
1997 }
1998
1999 static struct ftrace_ops trace_probe_ops __read_mostly =
2000 {
2001         .func           = function_trace_probe_call,
2002 };
2003
2004 static int ftrace_probe_registered;
2005
2006 static void __enable_ftrace_function_probe(void)
2007 {
2008         int i;
2009
2010         if (ftrace_probe_registered)
2011                 return;
2012
2013         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2014                 struct hlist_head *hhd = &ftrace_func_hash[i];
2015                 if (hhd->first)
2016                         break;
2017         }
2018         /* Nothing registered? */
2019         if (i == FTRACE_FUNC_HASHSIZE)
2020                 return;
2021
2022         __register_ftrace_function(&trace_probe_ops);
2023         ftrace_startup(0);
2024         ftrace_probe_registered = 1;
2025 }
2026
2027 static void __disable_ftrace_function_probe(void)
2028 {
2029         int i;
2030
2031         if (!ftrace_probe_registered)
2032                 return;
2033
2034         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2035                 struct hlist_head *hhd = &ftrace_func_hash[i];
2036                 if (hhd->first)
2037                         return;
2038         }
2039
2040         /* no more funcs left */
2041         __unregister_ftrace_function(&trace_probe_ops);
2042         ftrace_shutdown(0);
2043         ftrace_probe_registered = 0;
2044 }
2045
2046
2047 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2048 {
2049         struct ftrace_func_probe *entry =
2050                 container_of(rhp, struct ftrace_func_probe, rcu);
2051
2052         if (entry->ops->free)
2053                 entry->ops->free(&entry->data);
2054         kfree(entry);
2055 }
2056
2057
2058 int
2059 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2060                               void *data)
2061 {
2062         struct ftrace_func_probe *entry;
2063         struct ftrace_page *pg;
2064         struct dyn_ftrace *rec;
2065         int type, len, not;
2066         unsigned long key;
2067         int count = 0;
2068         char *search;
2069
2070         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2071         len = strlen(search);
2072
2073         /* we do not support '!' for function probes */
2074         if (WARN_ON(not))
2075                 return -EINVAL;
2076
2077         mutex_lock(&ftrace_lock);
2078
2079         if (unlikely(ftrace_disabled))
2080                 goto out_unlock;
2081
2082         do_for_each_ftrace_rec(pg, rec) {
2083
2084                 if (!ftrace_match_record(rec, NULL, search, len, type))
2085                         continue;
2086
2087                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2088                 if (!entry) {
2089                         /* If we did not process any, then return error */
2090                         if (!count)
2091                                 count = -ENOMEM;
2092                         goto out_unlock;
2093                 }
2094
2095                 count++;
2096
2097                 entry->data = data;
2098
2099                 /*
2100                  * The caller might want to do something special
2101                  * for each function we find. We call the callback
2102                  * to give the caller an opportunity to do so.
2103                  */
2104                 if (ops->callback) {
2105                         if (ops->callback(rec->ip, &entry->data) < 0) {
2106                                 /* caller does not like this func */
2107                                 kfree(entry);
2108                                 continue;
2109                         }
2110                 }
2111
2112                 entry->ops = ops;
2113                 entry->ip = rec->ip;
2114
2115                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2116                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2117
2118         } while_for_each_ftrace_rec();
2119         __enable_ftrace_function_probe();
2120
2121  out_unlock:
2122         mutex_unlock(&ftrace_lock);
2123
2124         return count;
2125 }
2126
2127 enum {
2128         PROBE_TEST_FUNC         = 1,
2129         PROBE_TEST_DATA         = 2
2130 };
2131
2132 static void
2133 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2134                                   void *data, int flags)
2135 {
2136         struct ftrace_func_probe *entry;
2137         struct hlist_node *n, *tmp;
2138         char str[KSYM_SYMBOL_LEN];
2139         int type = MATCH_FULL;
2140         int i, len = 0;
2141         char *search;
2142
2143         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2144                 glob = NULL;
2145         else if (glob) {
2146                 int not;
2147
2148                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2149                 len = strlen(search);
2150
2151                 /* we do not support '!' for function probes */
2152                 if (WARN_ON(not))
2153                         return;
2154         }
2155
2156         mutex_lock(&ftrace_lock);
2157         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2158                 struct hlist_head *hhd = &ftrace_func_hash[i];
2159
2160                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2161
2162                         /* break up if statements for readability */
2163                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2164                                 continue;
2165
2166                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2167                                 continue;
2168
2169                         /* do this last, since it is the most expensive */
2170                         if (glob) {
2171                                 kallsyms_lookup(entry->ip, NULL, NULL,
2172                                                 NULL, str);
2173                                 if (!ftrace_match(str, glob, len, type))
2174                                         continue;
2175                         }
2176
2177                         hlist_del(&entry->node);
2178                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2179                 }
2180         }
2181         __disable_ftrace_function_probe();
2182         mutex_unlock(&ftrace_lock);
2183 }
2184
2185 void
2186 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2187                                 void *data)
2188 {
2189         __unregister_ftrace_function_probe(glob, ops, data,
2190                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2191 }
2192
2193 void
2194 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2195 {
2196         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2197 }
2198
2199 void unregister_ftrace_function_probe_all(char *glob)
2200 {
2201         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2202 }
2203
2204 static LIST_HEAD(ftrace_commands);
2205 static DEFINE_MUTEX(ftrace_cmd_mutex);
2206
2207 int register_ftrace_command(struct ftrace_func_command *cmd)
2208 {
2209         struct ftrace_func_command *p;
2210         int ret = 0;
2211
2212         mutex_lock(&ftrace_cmd_mutex);
2213         list_for_each_entry(p, &ftrace_commands, list) {
2214                 if (strcmp(cmd->name, p->name) == 0) {
2215                         ret = -EBUSY;
2216                         goto out_unlock;
2217                 }
2218         }
2219         list_add(&cmd->list, &ftrace_commands);
2220  out_unlock:
2221         mutex_unlock(&ftrace_cmd_mutex);
2222
2223         return ret;
2224 }
2225
2226 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2227 {
2228         struct ftrace_func_command *p, *n;
2229         int ret = -ENODEV;
2230
2231         mutex_lock(&ftrace_cmd_mutex);
2232         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2233                 if (strcmp(cmd->name, p->name) == 0) {
2234                         ret = 0;
2235                         list_del_init(&p->list);
2236                         goto out_unlock;
2237                 }
2238         }
2239  out_unlock:
2240         mutex_unlock(&ftrace_cmd_mutex);
2241
2242         return ret;
2243 }
2244
2245 static int ftrace_process_regex(char *buff, int len, int enable)
2246 {
2247         char *func, *command, *next = buff;
2248         struct ftrace_func_command *p;
2249         struct ftrace_hash *hash;
2250         int ret;
2251
2252         if (enable)
2253                 hash = &filter_hash;
2254         else
2255                 hash = &notrace_hash;
2256
2257         func = strsep(&next, ":");
2258
2259         if (!next) {
2260                 ret = ftrace_match_records(hash, func, len);
2261                 if (!ret)
2262                         ret = -EINVAL;
2263                 if (ret < 0)
2264                         return ret;
2265                 return 0;
2266         }
2267
2268         /* command found */
2269
2270         command = strsep(&next, ":");
2271
2272         mutex_lock(&ftrace_cmd_mutex);
2273         list_for_each_entry(p, &ftrace_commands, list) {
2274                 if (strcmp(p->name, command) == 0) {
2275                         ret = p->func(func, command, next, enable);
2276                         goto out_unlock;
2277                 }
2278         }
2279  out_unlock:
2280         mutex_unlock(&ftrace_cmd_mutex);
2281
2282         return ret;
2283 }
2284
2285 static ssize_t
2286 ftrace_regex_write(struct file *file, const char __user *ubuf,
2287                    size_t cnt, loff_t *ppos, int enable)
2288 {
2289         struct ftrace_iterator *iter;
2290         struct trace_parser *parser;
2291         ssize_t ret, read;
2292
2293         if (!cnt)
2294                 return 0;
2295
2296         mutex_lock(&ftrace_regex_lock);
2297
2298         ret = -ENODEV;
2299         if (unlikely(ftrace_disabled))
2300                 goto out_unlock;
2301
2302         if (file->f_mode & FMODE_READ) {
2303                 struct seq_file *m = file->private_data;
2304                 iter = m->private;
2305         } else
2306                 iter = file->private_data;
2307
2308         parser = &iter->parser;
2309         read = trace_get_user(parser, ubuf, cnt, ppos);
2310
2311         if (read >= 0 && trace_parser_loaded(parser) &&
2312             !trace_parser_cont(parser)) {
2313                 ret = ftrace_process_regex(parser->buffer,
2314                                            parser->idx, enable);
2315                 trace_parser_clear(parser);
2316                 if (ret)
2317                         goto out_unlock;
2318         }
2319
2320         ret = read;
2321 out_unlock:
2322         mutex_unlock(&ftrace_regex_lock);
2323
2324         return ret;
2325 }
2326
2327 static ssize_t
2328 ftrace_filter_write(struct file *file, const char __user *ubuf,
2329                     size_t cnt, loff_t *ppos)
2330 {
2331         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2332 }
2333
2334 static ssize_t
2335 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2336                      size_t cnt, loff_t *ppos)
2337 {
2338         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2339 }
2340
2341 static void
2342 ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int reset)
2343 {
2344         if (unlikely(ftrace_disabled))
2345                 return;
2346
2347         mutex_lock(&ftrace_regex_lock);
2348         if (reset)
2349                 ftrace_filter_reset(hash);
2350         if (buf)
2351                 ftrace_match_records(hash, buf, len);
2352         mutex_unlock(&ftrace_regex_lock);
2353 }
2354
2355 /**
2356  * ftrace_set_filter - set a function to filter on in ftrace
2357  * @buf - the string that holds the function filter text.
2358  * @len - the length of the string.
2359  * @reset - non zero to reset all filters before applying this filter.
2360  *
2361  * Filters denote which functions should be enabled when tracing is enabled.
2362  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2363  */
2364 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2365 {
2366         ftrace_set_regex(&filter_hash, buf, len, reset);
2367 }
2368
2369 /**
2370  * ftrace_set_notrace - set a function to not trace in ftrace
2371  * @buf - the string that holds the function notrace text.
2372  * @len - the length of the string.
2373  * @reset - non zero to reset all filters before applying this filter.
2374  *
2375  * Notrace Filters denote which functions should not be enabled when tracing
2376  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2377  * for tracing.
2378  */
2379 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2380 {
2381         ftrace_set_regex(&notrace_hash, buf, len, reset);
2382 }
2383
2384 /*
2385  * command line interface to allow users to set filters on boot up.
2386  */
2387 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2388 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2389 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2390
2391 static int __init set_ftrace_notrace(char *str)
2392 {
2393         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2394         return 1;
2395 }
2396 __setup("ftrace_notrace=", set_ftrace_notrace);
2397
2398 static int __init set_ftrace_filter(char *str)
2399 {
2400         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2401         return 1;
2402 }
2403 __setup("ftrace_filter=", set_ftrace_filter);
2404
2405 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2406 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2407 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2408
2409 static int __init set_graph_function(char *str)
2410 {
2411         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2412         return 1;
2413 }
2414 __setup("ftrace_graph_filter=", set_graph_function);
2415
2416 static void __init set_ftrace_early_graph(char *buf)
2417 {
2418         int ret;
2419         char *func;
2420
2421         while (buf) {
2422                 func = strsep(&buf, ",");
2423                 /* we allow only one expression at a time */
2424                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2425                                       func);
2426                 if (ret)
2427                         printk(KERN_DEBUG "ftrace: function %s not "
2428                                           "traceable\n", func);
2429         }
2430 }
2431 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2432
2433 static void __init set_ftrace_early_filter(struct ftrace_hash *hash, char *buf)
2434 {
2435         char *func;
2436
2437         while (buf) {
2438                 func = strsep(&buf, ",");
2439                 ftrace_set_regex(hash, func, strlen(func), 0);
2440         }
2441 }
2442
2443 static void __init set_ftrace_early_filters(void)
2444 {
2445         if (ftrace_filter_buf[0])
2446                 set_ftrace_early_filter(&filter_hash, ftrace_filter_buf);
2447         if (ftrace_notrace_buf[0])
2448                 set_ftrace_early_filter(&notrace_hash, ftrace_notrace_buf);
2449 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2450         if (ftrace_graph_buf[0])
2451                 set_ftrace_early_graph(ftrace_graph_buf);
2452 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2453 }
2454
2455 static int
2456 ftrace_regex_release(struct inode *inode, struct file *file)
2457 {
2458         struct seq_file *m = (struct seq_file *)file->private_data;
2459         struct ftrace_iterator *iter;
2460         struct trace_parser *parser;
2461
2462         mutex_lock(&ftrace_regex_lock);
2463         if (file->f_mode & FMODE_READ) {
2464                 iter = m->private;
2465
2466                 seq_release(inode, file);
2467         } else
2468                 iter = file->private_data;
2469
2470         parser = &iter->parser;
2471         if (trace_parser_loaded(parser)) {
2472                 parser->buffer[parser->idx] = 0;
2473                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
2474         }
2475
2476         trace_parser_put(parser);
2477         kfree(iter);
2478
2479         if (file->f_mode & FMODE_WRITE) {
2480                 mutex_lock(&ftrace_lock);
2481                 if (ftrace_start_up && ftrace_enabled)
2482                         ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2483                 mutex_unlock(&ftrace_lock);
2484         }
2485
2486         mutex_unlock(&ftrace_regex_lock);
2487         return 0;
2488 }
2489
2490 static const struct file_operations ftrace_avail_fops = {
2491         .open = ftrace_avail_open,
2492         .read = seq_read,
2493         .llseek = seq_lseek,
2494         .release = seq_release_private,
2495 };
2496
2497 static const struct file_operations ftrace_filter_fops = {
2498         .open = ftrace_filter_open,
2499         .read = seq_read,
2500         .write = ftrace_filter_write,
2501         .llseek = ftrace_regex_lseek,
2502         .release = ftrace_regex_release,
2503 };
2504
2505 static const struct file_operations ftrace_notrace_fops = {
2506         .open = ftrace_notrace_open,
2507         .read = seq_read,
2508         .write = ftrace_notrace_write,
2509         .llseek = ftrace_regex_lseek,
2510         .release = ftrace_regex_release,
2511 };
2512
2513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2514
2515 static DEFINE_MUTEX(graph_lock);
2516
2517 int ftrace_graph_count;
2518 int ftrace_graph_filter_enabled;
2519 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2520
2521 static void *
2522 __g_next(struct seq_file *m, loff_t *pos)
2523 {
2524         if (*pos >= ftrace_graph_count)
2525                 return NULL;
2526         return &ftrace_graph_funcs[*pos];
2527 }
2528
2529 static void *
2530 g_next(struct seq_file *m, void *v, loff_t *pos)
2531 {
2532         (*pos)++;
2533         return __g_next(m, pos);
2534 }
2535
2536 static void *g_start(struct seq_file *m, loff_t *pos)
2537 {
2538         mutex_lock(&graph_lock);
2539
2540         /* Nothing, tell g_show to print all functions are enabled */
2541         if (!ftrace_graph_filter_enabled && !*pos)
2542                 return (void *)1;
2543
2544         return __g_next(m, pos);
2545 }
2546
2547 static void g_stop(struct seq_file *m, void *p)
2548 {
2549         mutex_unlock(&graph_lock);
2550 }
2551
2552 static int g_show(struct seq_file *m, void *v)
2553 {
2554         unsigned long *ptr = v;
2555
2556         if (!ptr)
2557                 return 0;
2558
2559         if (ptr == (unsigned long *)1) {
2560                 seq_printf(m, "#### all functions enabled ####\n");
2561                 return 0;
2562         }
2563
2564         seq_printf(m, "%ps\n", (void *)*ptr);
2565
2566         return 0;
2567 }
2568
2569 static const struct seq_operations ftrace_graph_seq_ops = {
2570         .start = g_start,
2571         .next = g_next,
2572         .stop = g_stop,
2573         .show = g_show,
2574 };
2575
2576 static int
2577 ftrace_graph_open(struct inode *inode, struct file *file)
2578 {
2579         int ret = 0;
2580
2581         if (unlikely(ftrace_disabled))
2582                 return -ENODEV;
2583
2584         mutex_lock(&graph_lock);
2585         if ((file->f_mode & FMODE_WRITE) &&
2586             (file->f_flags & O_TRUNC)) {
2587                 ftrace_graph_filter_enabled = 0;
2588                 ftrace_graph_count = 0;
2589                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2590         }
2591         mutex_unlock(&graph_lock);
2592
2593         if (file->f_mode & FMODE_READ)
2594                 ret = seq_open(file, &ftrace_graph_seq_ops);
2595
2596         return ret;
2597 }
2598
2599 static int
2600 ftrace_graph_release(struct inode *inode, struct file *file)
2601 {
2602         if (file->f_mode & FMODE_READ)
2603                 seq_release(inode, file);
2604         return 0;
2605 }
2606
2607 static int
2608 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2609 {
2610         struct dyn_ftrace *rec;
2611         struct ftrace_page *pg;
2612         int search_len;
2613         int fail = 1;
2614         int type, not;
2615         char *search;
2616         bool exists;
2617         int i;
2618
2619         /* decode regex */
2620         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2621         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2622                 return -EBUSY;
2623
2624         search_len = strlen(search);
2625
2626         mutex_lock(&ftrace_lock);
2627
2628         if (unlikely(ftrace_disabled)) {
2629                 mutex_unlock(&ftrace_lock);
2630                 return -ENODEV;
2631         }
2632
2633         do_for_each_ftrace_rec(pg, rec) {
2634
2635                 if (rec->flags & FTRACE_FL_FREE)
2636                         continue;
2637
2638                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
2639                         /* if it is in the array */
2640                         exists = false;
2641                         for (i = 0; i < *idx; i++) {
2642                                 if (array[i] == rec->ip) {
2643                                         exists = true;
2644                                         break;
2645                                 }
2646                         }
2647
2648                         if (!not) {
2649                                 fail = 0;
2650                                 if (!exists) {
2651                                         array[(*idx)++] = rec->ip;
2652                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2653                                                 goto out;
2654                                 }
2655                         } else {
2656                                 if (exists) {
2657                                         array[i] = array[--(*idx)];
2658                                         array[*idx] = 0;
2659                                         fail = 0;
2660                                 }
2661                         }
2662                 }
2663         } while_for_each_ftrace_rec();
2664 out:
2665         mutex_unlock(&ftrace_lock);
2666
2667         if (fail)
2668                 return -EINVAL;
2669
2670         ftrace_graph_filter_enabled = 1;
2671         return 0;
2672 }
2673
2674 static ssize_t
2675 ftrace_graph_write(struct file *file, const char __user *ubuf,
2676                    size_t cnt, loff_t *ppos)
2677 {
2678         struct trace_parser parser;
2679         ssize_t read, ret;
2680
2681         if (!cnt)
2682                 return 0;
2683
2684         mutex_lock(&graph_lock);
2685
2686         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2687                 ret = -ENOMEM;
2688                 goto out_unlock;
2689         }
2690
2691         read = trace_get_user(&parser, ubuf, cnt, ppos);
2692
2693         if (read >= 0 && trace_parser_loaded((&parser))) {
2694                 parser.buffer[parser.idx] = 0;
2695
2696                 /* we allow only one expression at a time */
2697                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2698                                         parser.buffer);
2699                 if (ret)
2700                         goto out_free;
2701         }
2702
2703         ret = read;
2704
2705 out_free:
2706         trace_parser_put(&parser);
2707 out_unlock:
2708         mutex_unlock(&graph_lock);
2709
2710         return ret;
2711 }
2712
2713 static const struct file_operations ftrace_graph_fops = {
2714         .open           = ftrace_graph_open,
2715         .read           = seq_read,
2716         .write          = ftrace_graph_write,
2717         .release        = ftrace_graph_release,
2718         .llseek         = seq_lseek,
2719 };
2720 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2721
2722 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2723 {
2724
2725         trace_create_file("available_filter_functions", 0444,
2726                         d_tracer, NULL, &ftrace_avail_fops);
2727
2728         trace_create_file("set_ftrace_filter", 0644, d_tracer,
2729                         NULL, &ftrace_filter_fops);
2730
2731         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2732                                     NULL, &ftrace_notrace_fops);
2733
2734 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2735         trace_create_file("set_graph_function", 0444, d_tracer,
2736                                     NULL,
2737                                     &ftrace_graph_fops);
2738 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2739
2740         return 0;
2741 }
2742
2743 static int ftrace_process_locs(struct module *mod,
2744                                unsigned long *start,
2745                                unsigned long *end)
2746 {
2747         unsigned long *p;
2748         unsigned long addr;
2749
2750         mutex_lock(&ftrace_lock);
2751         p = start;
2752         while (p < end) {
2753                 addr = ftrace_call_adjust(*p++);
2754                 /*
2755                  * Some architecture linkers will pad between
2756                  * the different mcount_loc sections of different
2757                  * object files to satisfy alignments.
2758                  * Skip any NULL pointers.
2759                  */
2760                 if (!addr)
2761                         continue;
2762                 ftrace_record_ip(addr);
2763         }
2764
2765         ftrace_update_code(mod);
2766         mutex_unlock(&ftrace_lock);
2767
2768         return 0;
2769 }
2770
2771 #ifdef CONFIG_MODULES
2772 void ftrace_release_mod(struct module *mod)
2773 {
2774         struct dyn_ftrace *rec;
2775         struct ftrace_page *pg;
2776
2777         mutex_lock(&ftrace_lock);
2778
2779         if (ftrace_disabled)
2780                 goto out_unlock;
2781
2782         do_for_each_ftrace_rec(pg, rec) {
2783                 if (within_module_core(rec->ip, mod)) {
2784                         /*
2785                          * rec->ip is changed in ftrace_free_rec()
2786                          * It should not between s and e if record was freed.
2787                          */
2788                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2789                         ftrace_free_rec(rec);
2790                 }
2791         } while_for_each_ftrace_rec();
2792  out_unlock:
2793         mutex_unlock(&ftrace_lock);
2794 }
2795
2796 static void ftrace_init_module(struct module *mod,
2797                                unsigned long *start, unsigned long *end)
2798 {
2799         if (ftrace_disabled || start == end)
2800                 return;
2801         ftrace_process_locs(mod, start, end);
2802 }
2803
2804 static int ftrace_module_notify(struct notifier_block *self,
2805                                 unsigned long val, void *data)
2806 {
2807         struct module *mod = data;
2808
2809         switch (val) {
2810         case MODULE_STATE_COMING:
2811                 ftrace_init_module(mod, mod->ftrace_callsites,
2812                                    mod->ftrace_callsites +
2813                                    mod->num_ftrace_callsites);
2814                 break;
2815         case MODULE_STATE_GOING:
2816                 ftrace_release_mod(mod);
2817                 break;
2818         }
2819
2820         return 0;
2821 }
2822 #else
2823 static int ftrace_module_notify(struct notifier_block *self,
2824                                 unsigned long val, void *data)
2825 {
2826         return 0;
2827 }
2828 #endif /* CONFIG_MODULES */
2829
2830 struct notifier_block ftrace_module_nb = {
2831         .notifier_call = ftrace_module_notify,
2832         .priority = 0,
2833 };
2834
2835 extern unsigned long __start_mcount_loc[];
2836 extern unsigned long __stop_mcount_loc[];
2837
2838 void __init ftrace_init(void)
2839 {
2840         unsigned long count, addr, flags;
2841         int ret;
2842
2843         /* Keep the ftrace pointer to the stub */
2844         addr = (unsigned long)ftrace_stub;
2845
2846         local_irq_save(flags);
2847         ftrace_dyn_arch_init(&addr);
2848         local_irq_restore(flags);
2849
2850         /* ftrace_dyn_arch_init places the return code in addr */
2851         if (addr)
2852                 goto failed;
2853
2854         count = __stop_mcount_loc - __start_mcount_loc;
2855
2856         ret = ftrace_dyn_table_alloc(count);
2857         if (ret)
2858                 goto failed;
2859
2860         last_ftrace_enabled = ftrace_enabled = 1;
2861
2862         ret = ftrace_process_locs(NULL,
2863                                   __start_mcount_loc,
2864                                   __stop_mcount_loc);
2865
2866         ret = register_module_notifier(&ftrace_module_nb);
2867         if (ret)
2868                 pr_warning("Failed to register trace ftrace module notifier\n");
2869
2870         set_ftrace_early_filters();
2871
2872         return;
2873  failed:
2874         ftrace_disabled = 1;
2875 }
2876
2877 #else
2878
2879 static int __init ftrace_nodyn_init(void)
2880 {
2881         ftrace_enabled = 1;
2882         return 0;
2883 }
2884 device_initcall(ftrace_nodyn_init);
2885
2886 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2887 static inline void ftrace_startup_enable(int command) { }
2888 /* Keep as macros so we do not need to define the commands */
2889 # define ftrace_startup(command)        do { } while (0)
2890 # define ftrace_shutdown(command)       do { } while (0)
2891 # define ftrace_startup_sysctl()        do { } while (0)
2892 # define ftrace_shutdown_sysctl()       do { } while (0)
2893 #endif /* CONFIG_DYNAMIC_FTRACE */
2894
2895 static void clear_ftrace_swapper(void)
2896 {
2897         struct task_struct *p;
2898         int cpu;
2899
2900         get_online_cpus();
2901         for_each_online_cpu(cpu) {
2902                 p = idle_task(cpu);
2903                 clear_tsk_trace_trace(p);
2904         }
2905         put_online_cpus();
2906 }
2907
2908 static void set_ftrace_swapper(void)
2909 {
2910         struct task_struct *p;
2911         int cpu;
2912
2913         get_online_cpus();
2914         for_each_online_cpu(cpu) {
2915                 p = idle_task(cpu);
2916                 set_tsk_trace_trace(p);
2917         }
2918         put_online_cpus();
2919 }
2920
2921 static void clear_ftrace_pid(struct pid *pid)
2922 {
2923         struct task_struct *p;
2924
2925         rcu_read_lock();
2926         do_each_pid_task(pid, PIDTYPE_PID, p) {
2927                 clear_tsk_trace_trace(p);
2928         } while_each_pid_task(pid, PIDTYPE_PID, p);
2929         rcu_read_unlock();
2930
2931         put_pid(pid);
2932 }
2933
2934 static void set_ftrace_pid(struct pid *pid)
2935 {
2936         struct task_struct *p;
2937
2938         rcu_read_lock();
2939         do_each_pid_task(pid, PIDTYPE_PID, p) {
2940                 set_tsk_trace_trace(p);
2941         } while_each_pid_task(pid, PIDTYPE_PID, p);
2942         rcu_read_unlock();
2943 }
2944
2945 static void clear_ftrace_pid_task(struct pid *pid)
2946 {
2947         if (pid == ftrace_swapper_pid)
2948                 clear_ftrace_swapper();
2949         else
2950                 clear_ftrace_pid(pid);
2951 }
2952
2953 static void set_ftrace_pid_task(struct pid *pid)
2954 {
2955         if (pid == ftrace_swapper_pid)
2956                 set_ftrace_swapper();
2957         else
2958                 set_ftrace_pid(pid);
2959 }
2960
2961 static int ftrace_pid_add(int p)
2962 {
2963         struct pid *pid;
2964         struct ftrace_pid *fpid;
2965         int ret = -EINVAL;
2966
2967         mutex_lock(&ftrace_lock);
2968
2969         if (!p)
2970                 pid = ftrace_swapper_pid;
2971         else
2972                 pid = find_get_pid(p);
2973
2974         if (!pid)
2975                 goto out;
2976
2977         ret = 0;
2978
2979         list_for_each_entry(fpid, &ftrace_pids, list)
2980                 if (fpid->pid == pid)
2981                         goto out_put;
2982
2983         ret = -ENOMEM;
2984
2985         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2986         if (!fpid)
2987                 goto out_put;
2988
2989         list_add(&fpid->list, &ftrace_pids);
2990         fpid->pid = pid;
2991
2992         set_ftrace_pid_task(pid);
2993
2994         ftrace_update_pid_func();
2995         ftrace_startup_enable(0);
2996
2997         mutex_unlock(&ftrace_lock);
2998         return 0;
2999
3000 out_put:
3001         if (pid != ftrace_swapper_pid)
3002                 put_pid(pid);
3003
3004 out:
3005         mutex_unlock(&ftrace_lock);
3006         return ret;
3007 }
3008
3009 static void ftrace_pid_reset(void)
3010 {
3011         struct ftrace_pid *fpid, *safe;
3012
3013         mutex_lock(&ftrace_lock);
3014         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3015                 struct pid *pid = fpid->pid;
3016
3017                 clear_ftrace_pid_task(pid);
3018
3019                 list_del(&fpid->list);
3020                 kfree(fpid);
3021         }
3022
3023         ftrace_update_pid_func();
3024         ftrace_startup_enable(0);
3025
3026         mutex_unlock(&ftrace_lock);
3027 }
3028
3029 static void *fpid_start(struct seq_file *m, loff_t *pos)
3030 {
3031         mutex_lock(&ftrace_lock);
3032
3033         if (list_empty(&ftrace_pids) && (!*pos))
3034                 return (void *) 1;
3035
3036         return seq_list_start(&ftrace_pids, *pos);
3037 }
3038
3039 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3040 {
3041         if (v == (void *)1)
3042                 return NULL;
3043
3044         return seq_list_next(v, &ftrace_pids, pos);
3045 }
3046
3047 static void fpid_stop(struct seq_file *m, void *p)
3048 {
3049         mutex_unlock(&ftrace_lock);
3050 }
3051
3052 static int fpid_show(struct seq_file *m, void *v)
3053 {
3054         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3055
3056         if (v == (void *)1) {
3057                 seq_printf(m, "no pid\n");
3058                 return 0;
3059         }
3060
3061         if (fpid->pid == ftrace_swapper_pid)
3062                 seq_printf(m, "swapper tasks\n");
3063         else
3064                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3065
3066         return 0;
3067 }
3068
3069 static const struct seq_operations ftrace_pid_sops = {
3070         .start = fpid_start,
3071         .next = fpid_next,
3072         .stop = fpid_stop,
3073         .show = fpid_show,
3074 };
3075
3076 static int
3077 ftrace_pid_open(struct inode *inode, struct file *file)
3078 {
3079         int ret = 0;
3080
3081         if ((file->f_mode & FMODE_WRITE) &&
3082             (file->f_flags & O_TRUNC))
3083                 ftrace_pid_reset();
3084
3085         if (file->f_mode & FMODE_READ)
3086                 ret = seq_open(file, &ftrace_pid_sops);
3087
3088         return ret;
3089 }
3090
3091 static ssize_t
3092 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3093                    size_t cnt, loff_t *ppos)
3094 {
3095         char buf[64], *tmp;
3096         long val;
3097         int ret;
3098
3099         if (cnt >= sizeof(buf))
3100                 return -EINVAL;
3101
3102         if (copy_from_user(&buf, ubuf, cnt))
3103                 return -EFAULT;
3104
3105         buf[cnt] = 0;
3106
3107         /*
3108          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3109          * to clean the filter quietly.
3110          */
3111         tmp = strstrip(buf);
3112         if (strlen(tmp) == 0)
3113                 return 1;
3114
3115         ret = strict_strtol(tmp, 10, &val);
3116         if (ret < 0)
3117                 return ret;
3118
3119         ret = ftrace_pid_add(val);
3120
3121         return ret ? ret : cnt;
3122 }
3123
3124 static int
3125 ftrace_pid_release(struct inode *inode, struct file *file)
3126 {
3127         if (file->f_mode & FMODE_READ)
3128                 seq_release(inode, file);
3129
3130         return 0;
3131 }
3132
3133 static const struct file_operations ftrace_pid_fops = {
3134         .open           = ftrace_pid_open,
3135         .write          = ftrace_pid_write,
3136         .read           = seq_read,
3137         .llseek         = seq_lseek,
3138         .release        = ftrace_pid_release,
3139 };
3140
3141 static __init int ftrace_init_debugfs(void)
3142 {
3143         struct dentry *d_tracer;
3144
3145         d_tracer = tracing_init_dentry();
3146         if (!d_tracer)
3147                 return 0;
3148
3149         ftrace_init_dyn_debugfs(d_tracer);
3150
3151         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3152                             NULL, &ftrace_pid_fops);
3153
3154         ftrace_profile_debugfs(d_tracer);
3155
3156         return 0;
3157 }
3158 fs_initcall(ftrace_init_debugfs);
3159
3160 /**
3161  * ftrace_kill - kill ftrace
3162  *
3163  * This function should be used by panic code. It stops ftrace
3164  * but in a not so nice way. If you need to simply kill ftrace
3165  * from a non-atomic section, use ftrace_kill.
3166  */
3167 void ftrace_kill(void)
3168 {
3169         ftrace_disabled = 1;
3170         ftrace_enabled = 0;
3171         clear_ftrace_function();
3172 }
3173
3174 /**
3175  * register_ftrace_function - register a function for profiling
3176  * @ops - ops structure that holds the function for profiling.
3177  *
3178  * Register a function to be called by all functions in the
3179  * kernel.
3180  *
3181  * Note: @ops->func and all the functions it calls must be labeled
3182  *       with "notrace", otherwise it will go into a
3183  *       recursive loop.
3184  */
3185 int register_ftrace_function(struct ftrace_ops *ops)
3186 {
3187         int ret = -1;
3188
3189         mutex_lock(&ftrace_lock);
3190
3191         if (unlikely(ftrace_disabled))
3192                 goto out_unlock;
3193
3194         ret = __register_ftrace_function(ops);
3195         ftrace_startup(0);
3196
3197  out_unlock:
3198         mutex_unlock(&ftrace_lock);
3199         return ret;
3200 }
3201
3202 /**
3203  * unregister_ftrace_function - unregister a function for profiling.
3204  * @ops - ops structure that holds the function to unregister
3205  *
3206  * Unregister a function that was added to be called by ftrace profiling.
3207  */
3208 int unregister_ftrace_function(struct ftrace_ops *ops)
3209 {
3210         int ret;
3211
3212         mutex_lock(&ftrace_lock);
3213         ret = __unregister_ftrace_function(ops);
3214         ftrace_shutdown(0);
3215         mutex_unlock(&ftrace_lock);
3216
3217         return ret;
3218 }
3219
3220 int
3221 ftrace_enable_sysctl(struct ctl_table *table, int write,
3222                      void __user *buffer, size_t *lenp,
3223                      loff_t *ppos)
3224 {
3225         int ret = -ENODEV;
3226
3227         mutex_lock(&ftrace_lock);
3228
3229         if (unlikely(ftrace_disabled))
3230                 goto out;
3231
3232         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3233
3234         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3235                 goto out;
3236
3237         last_ftrace_enabled = !!ftrace_enabled;
3238
3239         if (ftrace_enabled) {
3240
3241                 ftrace_startup_sysctl();
3242
3243                 /* we are starting ftrace again */
3244                 if (ftrace_list != &ftrace_list_end) {
3245                         if (ftrace_list->next == &ftrace_list_end)
3246                                 ftrace_trace_function = ftrace_list->func;
3247                         else
3248                                 ftrace_trace_function = ftrace_list_func;
3249                 }
3250
3251         } else {
3252                 /* stopping ftrace calls (just send to ftrace_stub) */
3253                 ftrace_trace_function = ftrace_stub;
3254
3255                 ftrace_shutdown_sysctl();
3256         }
3257
3258  out:
3259         mutex_unlock(&ftrace_lock);
3260         return ret;
3261 }
3262
3263 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3264
3265 static int ftrace_graph_active;
3266 static struct notifier_block ftrace_suspend_notifier;
3267
3268 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3269 {
3270         return 0;
3271 }
3272
3273 /* The callbacks that hook a function */
3274 trace_func_graph_ret_t ftrace_graph_return =
3275                         (trace_func_graph_ret_t)ftrace_stub;
3276 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3277
3278 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3279 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3280 {
3281         int i;
3282         int ret = 0;
3283         unsigned long flags;
3284         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3285         struct task_struct *g, *t;
3286
3287         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3288                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3289                                         * sizeof(struct ftrace_ret_stack),
3290                                         GFP_KERNEL);
3291                 if (!ret_stack_list[i]) {
3292                         start = 0;
3293                         end = i;
3294                         ret = -ENOMEM;
3295                         goto free;
3296                 }
3297         }
3298
3299         read_lock_irqsave(&tasklist_lock, flags);
3300         do_each_thread(g, t) {
3301                 if (start == end) {
3302                         ret = -EAGAIN;
3303                         goto unlock;
3304                 }
3305
3306                 if (t->ret_stack == NULL) {
3307                         atomic_set(&t->tracing_graph_pause, 0);
3308                         atomic_set(&t->trace_overrun, 0);
3309                         t->curr_ret_stack = -1;
3310                         /* Make sure the tasks see the -1 first: */
3311                         smp_wmb();
3312                         t->ret_stack = ret_stack_list[start++];
3313                 }
3314         } while_each_thread(g, t);
3315
3316 unlock:
3317         read_unlock_irqrestore(&tasklist_lock, flags);
3318 free:
3319         for (i = start; i < end; i++)
3320                 kfree(ret_stack_list[i]);
3321         return ret;
3322 }
3323
3324 static void
3325 ftrace_graph_probe_sched_switch(void *ignore,
3326                         struct task_struct *prev, struct task_struct *next)
3327 {
3328         unsigned long long timestamp;
3329         int index;
3330
3331         /*
3332          * Does the user want to count the time a function was asleep.
3333          * If so, do not update the time stamps.
3334          */
3335         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3336                 return;
3337
3338         timestamp = trace_clock_local();
3339
3340         prev->ftrace_timestamp = timestamp;
3341
3342         /* only process tasks that we timestamped */
3343         if (!next->ftrace_timestamp)
3344                 return;
3345
3346         /*
3347          * Update all the counters in next to make up for the
3348          * time next was sleeping.
3349          */
3350         timestamp -= next->ftrace_timestamp;
3351
3352         for (index = next->curr_ret_stack; index >= 0; index--)
3353                 next->ret_stack[index].calltime += timestamp;
3354 }
3355
3356 /* Allocate a return stack for each task */
3357 static int start_graph_tracing(void)
3358 {
3359         struct ftrace_ret_stack **ret_stack_list;
3360         int ret, cpu;
3361
3362         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3363                                 sizeof(struct ftrace_ret_stack *),
3364                                 GFP_KERNEL);
3365
3366         if (!ret_stack_list)
3367                 return -ENOMEM;
3368
3369         /* The cpu_boot init_task->ret_stack will never be freed */
3370         for_each_online_cpu(cpu) {
3371                 if (!idle_task(cpu)->ret_stack)
3372                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3373         }
3374
3375         do {
3376                 ret = alloc_retstack_tasklist(ret_stack_list);
3377         } while (ret == -EAGAIN);
3378
3379         if (!ret) {
3380                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3381                 if (ret)
3382                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3383                                 " probe to kernel_sched_switch\n");
3384         }
3385
3386         kfree(ret_stack_list);
3387         return ret;
3388 }
3389
3390 /*
3391  * Hibernation protection.
3392  * The state of the current task is too much unstable during
3393  * suspend/restore to disk. We want to protect against that.
3394  */
3395 static int
3396 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3397                                                         void *unused)
3398 {
3399         switch (state) {
3400         case PM_HIBERNATION_PREPARE:
3401                 pause_graph_tracing();
3402                 break;
3403
3404         case PM_POST_HIBERNATION:
3405                 unpause_graph_tracing();
3406                 break;
3407         }
3408         return NOTIFY_DONE;
3409 }
3410
3411 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3412                         trace_func_graph_ent_t entryfunc)
3413 {
3414         int ret = 0;
3415
3416         mutex_lock(&ftrace_lock);
3417
3418         /* we currently allow only one tracer registered at a time */
3419         if (ftrace_graph_active) {
3420                 ret = -EBUSY;
3421                 goto out;
3422         }
3423
3424         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3425         register_pm_notifier(&ftrace_suspend_notifier);
3426
3427         ftrace_graph_active++;
3428         ret = start_graph_tracing();
3429         if (ret) {
3430                 ftrace_graph_active--;
3431                 goto out;
3432         }
3433
3434         ftrace_graph_return = retfunc;
3435         ftrace_graph_entry = entryfunc;
3436
3437         ftrace_startup(FTRACE_START_FUNC_RET);
3438
3439 out:
3440         mutex_unlock(&ftrace_lock);
3441         return ret;
3442 }
3443
3444 void unregister_ftrace_graph(void)
3445 {
3446         mutex_lock(&ftrace_lock);
3447
3448         if (unlikely(!ftrace_graph_active))
3449                 goto out;
3450
3451         ftrace_graph_active--;
3452         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3453         ftrace_graph_entry = ftrace_graph_entry_stub;
3454         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3455         unregister_pm_notifier(&ftrace_suspend_notifier);
3456         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3457
3458  out:
3459         mutex_unlock(&ftrace_lock);
3460 }
3461
3462 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3463
3464 static void
3465 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3466 {
3467         atomic_set(&t->tracing_graph_pause, 0);
3468         atomic_set(&t->trace_overrun, 0);
3469         t->ftrace_timestamp = 0;
3470         /* make curr_ret_stack visible before we add the ret_stack */
3471         smp_wmb();
3472         t->ret_stack = ret_stack;
3473 }
3474
3475 /*
3476  * Allocate a return stack for the idle task. May be the first
3477  * time through, or it may be done by CPU hotplug online.
3478  */
3479 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3480 {
3481         t->curr_ret_stack = -1;
3482         /*
3483          * The idle task has no parent, it either has its own
3484          * stack or no stack at all.
3485          */
3486         if (t->ret_stack)
3487                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3488
3489         if (ftrace_graph_active) {
3490                 struct ftrace_ret_stack *ret_stack;
3491
3492                 ret_stack = per_cpu(idle_ret_stack, cpu);
3493                 if (!ret_stack) {
3494                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3495                                             * sizeof(struct ftrace_ret_stack),
3496                                             GFP_KERNEL);
3497                         if (!ret_stack)
3498                                 return;
3499                         per_cpu(idle_ret_stack, cpu) = ret_stack;
3500                 }
3501                 graph_init_task(t, ret_stack);
3502         }
3503 }
3504
3505 /* Allocate a return stack for newly created task */
3506 void ftrace_graph_init_task(struct task_struct *t)
3507 {
3508         /* Make sure we do not use the parent ret_stack */
3509         t->ret_stack = NULL;
3510         t->curr_ret_stack = -1;
3511
3512         if (ftrace_graph_active) {
3513                 struct ftrace_ret_stack *ret_stack;
3514
3515                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3516                                 * sizeof(struct ftrace_ret_stack),
3517                                 GFP_KERNEL);
3518                 if (!ret_stack)
3519                         return;
3520                 graph_init_task(t, ret_stack);
3521         }
3522 }
3523
3524 void ftrace_graph_exit_task(struct task_struct *t)
3525 {
3526         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3527
3528         t->ret_stack = NULL;
3529         /* NULL must become visible to IRQs before we free it: */
3530         barrier();
3531
3532         kfree(ret_stack);
3533 }
3534
3535 void ftrace_graph_stop(void)
3536 {
3537         ftrace_stop();
3538 }
3539 #endif