dcce0bf9c84dd85ff1ed27a85640c39376bdd916
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
62
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
66
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
69
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73         struct list_head list;
74         struct pid *pid;
75 };
76
77 /*
78  * ftrace_disabled is set when an anomaly is discovered.
79  * ftrace_disabled is much stronger than ftrace_enabled.
80  */
81 static int ftrace_disabled __read_mostly;
82
83 static DEFINE_MUTEX(ftrace_lock);
84
85 static struct ftrace_ops ftrace_list_end __read_mostly =
86 {
87         .func           = ftrace_stub,
88 };
89
90 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94 static struct ftrace_ops global_ops;
95
96 /*
97  * Traverse the ftrace_list, invoking all entries.  The reason that we
98  * can use rcu_dereference_raw() is that elements removed from this list
99  * are simply leaked, so there is no need to interact with a grace-period
100  * mechanism.  The rcu_dereference_raw() calls are needed to handle
101  * concurrent insertions into the ftrace_list.
102  *
103  * Silly Alpha and silly pointer-speculation compiler optimizations!
104  */
105 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
106 {
107         struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
108
109         while (op != &ftrace_list_end) {
110                 op->func(ip, parent_ip);
111                 op = rcu_dereference_raw(op->next); /*see above*/
112         };
113 }
114
115 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
116 {
117         if (!test_tsk_trace_trace(current))
118                 return;
119
120         ftrace_pid_function(ip, parent_ip);
121 }
122
123 static void set_ftrace_pid_function(ftrace_func_t func)
124 {
125         /* do not set ftrace_pid_function to itself! */
126         if (func != ftrace_pid_func)
127                 ftrace_pid_function = func;
128 }
129
130 /**
131  * clear_ftrace_function - reset the ftrace function
132  *
133  * This NULLs the ftrace function and in essence stops
134  * tracing.  There may be lag
135  */
136 void clear_ftrace_function(void)
137 {
138         ftrace_trace_function = ftrace_stub;
139         __ftrace_trace_function = ftrace_stub;
140         ftrace_pid_function = ftrace_stub;
141 }
142
143 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
144 /*
145  * For those archs that do not test ftrace_trace_stop in their
146  * mcount call site, we need to do it from C.
147  */
148 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
149 {
150         if (function_trace_stop)
151                 return;
152
153         __ftrace_trace_function(ip, parent_ip);
154 }
155 #endif
156
157 static void update_global_ops(void)
158 {
159         ftrace_func_t func;
160
161         /*
162          * If there's only one function registered, then call that
163          * function directly. Otherwise, we need to iterate over the
164          * registered callers.
165          */
166         if (ftrace_list == &ftrace_list_end ||
167             ftrace_list->next == &ftrace_list_end)
168                 func = ftrace_list->func;
169         else
170                 func = ftrace_list_func;
171
172         /* If we filter on pids, update to use the pid function */
173         if (!list_empty(&ftrace_pids)) {
174                 set_ftrace_pid_function(func);
175                 func = ftrace_pid_func;
176         }
177
178         global_ops.func = func;
179 }
180
181 static void update_ftrace_function(void)
182 {
183         ftrace_func_t func;
184
185         update_global_ops();
186
187         func = global_ops.func;
188
189 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
190         ftrace_trace_function = func;
191 #else
192         __ftrace_trace_function = func;
193         ftrace_trace_function = ftrace_test_stop_func;
194 #endif
195 }
196
197 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
198 {
199         ops->next = *list;
200         /*
201          * We are entering ops into the ftrace_list but another
202          * CPU might be walking that list. We need to make sure
203          * the ops->next pointer is valid before another CPU sees
204          * the ops pointer included into the ftrace_list.
205          */
206         rcu_assign_pointer(*list, ops);
207 }
208
209 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
210 {
211         struct ftrace_ops **p;
212
213         /*
214          * If we are removing the last function, then simply point
215          * to the ftrace_stub.
216          */
217         if (*list == ops && ops->next == &ftrace_list_end) {
218                 *list = &ftrace_list_end;
219                 return 0;
220         }
221
222         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
223                 if (*p == ops)
224                         break;
225
226         if (*p != ops)
227                 return -1;
228
229         *p = (*p)->next;
230         return 0;
231 }
232
233 static int __register_ftrace_function(struct ftrace_ops *ops)
234 {
235         if (ftrace_disabled)
236                 return -ENODEV;
237
238         if (FTRACE_WARN_ON(ops == &global_ops))
239                 return -EINVAL;
240
241         add_ftrace_ops(&ftrace_list, ops);
242         if (ftrace_enabled)
243                 update_ftrace_function();
244
245         return 0;
246 }
247
248 static int __unregister_ftrace_function(struct ftrace_ops *ops)
249 {
250         int ret;
251
252         if (ftrace_disabled)
253                 return -ENODEV;
254
255         if (FTRACE_WARN_ON(ops == &global_ops))
256                 return -EINVAL;
257
258         ret = remove_ftrace_ops(&ftrace_list, ops);
259         if (ret < 0)
260                 return ret;
261         if (ftrace_enabled)
262                 update_ftrace_function();
263
264         return 0;
265 }
266
267 static void ftrace_update_pid_func(void)
268 {
269         /* Only do something if we are tracing something */
270         if (ftrace_trace_function == ftrace_stub)
271                 return;
272
273         update_ftrace_function();
274 }
275
276 #ifdef CONFIG_FUNCTION_PROFILER
277 struct ftrace_profile {
278         struct hlist_node               node;
279         unsigned long                   ip;
280         unsigned long                   counter;
281 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
282         unsigned long long              time;
283         unsigned long long              time_squared;
284 #endif
285 };
286
287 struct ftrace_profile_page {
288         struct ftrace_profile_page      *next;
289         unsigned long                   index;
290         struct ftrace_profile           records[];
291 };
292
293 struct ftrace_profile_stat {
294         atomic_t                        disabled;
295         struct hlist_head               *hash;
296         struct ftrace_profile_page      *pages;
297         struct ftrace_profile_page      *start;
298         struct tracer_stat              stat;
299 };
300
301 #define PROFILE_RECORDS_SIZE                                            \
302         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
303
304 #define PROFILES_PER_PAGE                                       \
305         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
306
307 static int ftrace_profile_bits __read_mostly;
308 static int ftrace_profile_enabled __read_mostly;
309
310 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
311 static DEFINE_MUTEX(ftrace_profile_lock);
312
313 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
314
315 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
316
317 static void *
318 function_stat_next(void *v, int idx)
319 {
320         struct ftrace_profile *rec = v;
321         struct ftrace_profile_page *pg;
322
323         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
324
325  again:
326         if (idx != 0)
327                 rec++;
328
329         if ((void *)rec >= (void *)&pg->records[pg->index]) {
330                 pg = pg->next;
331                 if (!pg)
332                         return NULL;
333                 rec = &pg->records[0];
334                 if (!rec->counter)
335                         goto again;
336         }
337
338         return rec;
339 }
340
341 static void *function_stat_start(struct tracer_stat *trace)
342 {
343         struct ftrace_profile_stat *stat =
344                 container_of(trace, struct ftrace_profile_stat, stat);
345
346         if (!stat || !stat->start)
347                 return NULL;
348
349         return function_stat_next(&stat->start->records[0], 0);
350 }
351
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 /* function graph compares on total time */
354 static int function_stat_cmp(void *p1, void *p2)
355 {
356         struct ftrace_profile *a = p1;
357         struct ftrace_profile *b = p2;
358
359         if (a->time < b->time)
360                 return -1;
361         if (a->time > b->time)
362                 return 1;
363         else
364                 return 0;
365 }
366 #else
367 /* not function graph compares against hits */
368 static int function_stat_cmp(void *p1, void *p2)
369 {
370         struct ftrace_profile *a = p1;
371         struct ftrace_profile *b = p2;
372
373         if (a->counter < b->counter)
374                 return -1;
375         if (a->counter > b->counter)
376                 return 1;
377         else
378                 return 0;
379 }
380 #endif
381
382 static int function_stat_headers(struct seq_file *m)
383 {
384 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
385         seq_printf(m, "  Function                               "
386                    "Hit    Time            Avg             s^2\n"
387                       "  --------                               "
388                    "---    ----            ---             ---\n");
389 #else
390         seq_printf(m, "  Function                               Hit\n"
391                       "  --------                               ---\n");
392 #endif
393         return 0;
394 }
395
396 static int function_stat_show(struct seq_file *m, void *v)
397 {
398         struct ftrace_profile *rec = v;
399         char str[KSYM_SYMBOL_LEN];
400         int ret = 0;
401 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
402         static struct trace_seq s;
403         unsigned long long avg;
404         unsigned long long stddev;
405 #endif
406         mutex_lock(&ftrace_profile_lock);
407
408         /* we raced with function_profile_reset() */
409         if (unlikely(rec->counter == 0)) {
410                 ret = -EBUSY;
411                 goto out;
412         }
413
414         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
415         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
416
417 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
418         seq_printf(m, "    ");
419         avg = rec->time;
420         do_div(avg, rec->counter);
421
422         /* Sample standard deviation (s^2) */
423         if (rec->counter <= 1)
424                 stddev = 0;
425         else {
426                 stddev = rec->time_squared - rec->counter * avg * avg;
427                 /*
428                  * Divide only 1000 for ns^2 -> us^2 conversion.
429                  * trace_print_graph_duration will divide 1000 again.
430                  */
431                 do_div(stddev, (rec->counter - 1) * 1000);
432         }
433
434         trace_seq_init(&s);
435         trace_print_graph_duration(rec->time, &s);
436         trace_seq_puts(&s, "    ");
437         trace_print_graph_duration(avg, &s);
438         trace_seq_puts(&s, "    ");
439         trace_print_graph_duration(stddev, &s);
440         trace_print_seq(m, &s);
441 #endif
442         seq_putc(m, '\n');
443 out:
444         mutex_unlock(&ftrace_profile_lock);
445
446         return ret;
447 }
448
449 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
450 {
451         struct ftrace_profile_page *pg;
452
453         pg = stat->pages = stat->start;
454
455         while (pg) {
456                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
457                 pg->index = 0;
458                 pg = pg->next;
459         }
460
461         memset(stat->hash, 0,
462                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
463 }
464
465 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
466 {
467         struct ftrace_profile_page *pg;
468         int functions;
469         int pages;
470         int i;
471
472         /* If we already allocated, do nothing */
473         if (stat->pages)
474                 return 0;
475
476         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
477         if (!stat->pages)
478                 return -ENOMEM;
479
480 #ifdef CONFIG_DYNAMIC_FTRACE
481         functions = ftrace_update_tot_cnt;
482 #else
483         /*
484          * We do not know the number of functions that exist because
485          * dynamic tracing is what counts them. With past experience
486          * we have around 20K functions. That should be more than enough.
487          * It is highly unlikely we will execute every function in
488          * the kernel.
489          */
490         functions = 20000;
491 #endif
492
493         pg = stat->start = stat->pages;
494
495         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
496
497         for (i = 0; i < pages; i++) {
498                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
499                 if (!pg->next)
500                         goto out_free;
501                 pg = pg->next;
502         }
503
504         return 0;
505
506  out_free:
507         pg = stat->start;
508         while (pg) {
509                 unsigned long tmp = (unsigned long)pg;
510
511                 pg = pg->next;
512                 free_page(tmp);
513         }
514
515         free_page((unsigned long)stat->pages);
516         stat->pages = NULL;
517         stat->start = NULL;
518
519         return -ENOMEM;
520 }
521
522 static int ftrace_profile_init_cpu(int cpu)
523 {
524         struct ftrace_profile_stat *stat;
525         int size;
526
527         stat = &per_cpu(ftrace_profile_stats, cpu);
528
529         if (stat->hash) {
530                 /* If the profile is already created, simply reset it */
531                 ftrace_profile_reset(stat);
532                 return 0;
533         }
534
535         /*
536          * We are profiling all functions, but usually only a few thousand
537          * functions are hit. We'll make a hash of 1024 items.
538          */
539         size = FTRACE_PROFILE_HASH_SIZE;
540
541         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
542
543         if (!stat->hash)
544                 return -ENOMEM;
545
546         if (!ftrace_profile_bits) {
547                 size--;
548
549                 for (; size; size >>= 1)
550                         ftrace_profile_bits++;
551         }
552
553         /* Preallocate the function profiling pages */
554         if (ftrace_profile_pages_init(stat) < 0) {
555                 kfree(stat->hash);
556                 stat->hash = NULL;
557                 return -ENOMEM;
558         }
559
560         return 0;
561 }
562
563 static int ftrace_profile_init(void)
564 {
565         int cpu;
566         int ret = 0;
567
568         for_each_online_cpu(cpu) {
569                 ret = ftrace_profile_init_cpu(cpu);
570                 if (ret)
571                         break;
572         }
573
574         return ret;
575 }
576
577 /* interrupts must be disabled */
578 static struct ftrace_profile *
579 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
580 {
581         struct ftrace_profile *rec;
582         struct hlist_head *hhd;
583         struct hlist_node *n;
584         unsigned long key;
585
586         key = hash_long(ip, ftrace_profile_bits);
587         hhd = &stat->hash[key];
588
589         if (hlist_empty(hhd))
590                 return NULL;
591
592         hlist_for_each_entry_rcu(rec, n, hhd, node) {
593                 if (rec->ip == ip)
594                         return rec;
595         }
596
597         return NULL;
598 }
599
600 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
601                                struct ftrace_profile *rec)
602 {
603         unsigned long key;
604
605         key = hash_long(rec->ip, ftrace_profile_bits);
606         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
607 }
608
609 /*
610  * The memory is already allocated, this simply finds a new record to use.
611  */
612 static struct ftrace_profile *
613 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
614 {
615         struct ftrace_profile *rec = NULL;
616
617         /* prevent recursion (from NMIs) */
618         if (atomic_inc_return(&stat->disabled) != 1)
619                 goto out;
620
621         /*
622          * Try to find the function again since an NMI
623          * could have added it
624          */
625         rec = ftrace_find_profiled_func(stat, ip);
626         if (rec)
627                 goto out;
628
629         if (stat->pages->index == PROFILES_PER_PAGE) {
630                 if (!stat->pages->next)
631                         goto out;
632                 stat->pages = stat->pages->next;
633         }
634
635         rec = &stat->pages->records[stat->pages->index++];
636         rec->ip = ip;
637         ftrace_add_profile(stat, rec);
638
639  out:
640         atomic_dec(&stat->disabled);
641
642         return rec;
643 }
644
645 static void
646 function_profile_call(unsigned long ip, unsigned long parent_ip)
647 {
648         struct ftrace_profile_stat *stat;
649         struct ftrace_profile *rec;
650         unsigned long flags;
651
652         if (!ftrace_profile_enabled)
653                 return;
654
655         local_irq_save(flags);
656
657         stat = &__get_cpu_var(ftrace_profile_stats);
658         if (!stat->hash || !ftrace_profile_enabled)
659                 goto out;
660
661         rec = ftrace_find_profiled_func(stat, ip);
662         if (!rec) {
663                 rec = ftrace_profile_alloc(stat, ip);
664                 if (!rec)
665                         goto out;
666         }
667
668         rec->counter++;
669  out:
670         local_irq_restore(flags);
671 }
672
673 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
674 static int profile_graph_entry(struct ftrace_graph_ent *trace)
675 {
676         function_profile_call(trace->func, 0);
677         return 1;
678 }
679
680 static void profile_graph_return(struct ftrace_graph_ret *trace)
681 {
682         struct ftrace_profile_stat *stat;
683         unsigned long long calltime;
684         struct ftrace_profile *rec;
685         unsigned long flags;
686
687         local_irq_save(flags);
688         stat = &__get_cpu_var(ftrace_profile_stats);
689         if (!stat->hash || !ftrace_profile_enabled)
690                 goto out;
691
692         /* If the calltime was zero'd ignore it */
693         if (!trace->calltime)
694                 goto out;
695
696         calltime = trace->rettime - trace->calltime;
697
698         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
699                 int index;
700
701                 index = trace->depth;
702
703                 /* Append this call time to the parent time to subtract */
704                 if (index)
705                         current->ret_stack[index - 1].subtime += calltime;
706
707                 if (current->ret_stack[index].subtime < calltime)
708                         calltime -= current->ret_stack[index].subtime;
709                 else
710                         calltime = 0;
711         }
712
713         rec = ftrace_find_profiled_func(stat, trace->func);
714         if (rec) {
715                 rec->time += calltime;
716                 rec->time_squared += calltime * calltime;
717         }
718
719  out:
720         local_irq_restore(flags);
721 }
722
723 static int register_ftrace_profiler(void)
724 {
725         return register_ftrace_graph(&profile_graph_return,
726                                      &profile_graph_entry);
727 }
728
729 static void unregister_ftrace_profiler(void)
730 {
731         unregister_ftrace_graph();
732 }
733 #else
734 static struct ftrace_ops ftrace_profile_ops __read_mostly =
735 {
736         .func           = function_profile_call,
737 };
738
739 static int register_ftrace_profiler(void)
740 {
741         return register_ftrace_function(&ftrace_profile_ops);
742 }
743
744 static void unregister_ftrace_profiler(void)
745 {
746         unregister_ftrace_function(&ftrace_profile_ops);
747 }
748 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
749
750 static ssize_t
751 ftrace_profile_write(struct file *filp, const char __user *ubuf,
752                      size_t cnt, loff_t *ppos)
753 {
754         unsigned long val;
755         char buf[64];           /* big enough to hold a number */
756         int ret;
757
758         if (cnt >= sizeof(buf))
759                 return -EINVAL;
760
761         if (copy_from_user(&buf, ubuf, cnt))
762                 return -EFAULT;
763
764         buf[cnt] = 0;
765
766         ret = strict_strtoul(buf, 10, &val);
767         if (ret < 0)
768                 return ret;
769
770         val = !!val;
771
772         mutex_lock(&ftrace_profile_lock);
773         if (ftrace_profile_enabled ^ val) {
774                 if (val) {
775                         ret = ftrace_profile_init();
776                         if (ret < 0) {
777                                 cnt = ret;
778                                 goto out;
779                         }
780
781                         ret = register_ftrace_profiler();
782                         if (ret < 0) {
783                                 cnt = ret;
784                                 goto out;
785                         }
786                         ftrace_profile_enabled = 1;
787                 } else {
788                         ftrace_profile_enabled = 0;
789                         /*
790                          * unregister_ftrace_profiler calls stop_machine
791                          * so this acts like an synchronize_sched.
792                          */
793                         unregister_ftrace_profiler();
794                 }
795         }
796  out:
797         mutex_unlock(&ftrace_profile_lock);
798
799         *ppos += cnt;
800
801         return cnt;
802 }
803
804 static ssize_t
805 ftrace_profile_read(struct file *filp, char __user *ubuf,
806                      size_t cnt, loff_t *ppos)
807 {
808         char buf[64];           /* big enough to hold a number */
809         int r;
810
811         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
812         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
813 }
814
815 static const struct file_operations ftrace_profile_fops = {
816         .open           = tracing_open_generic,
817         .read           = ftrace_profile_read,
818         .write          = ftrace_profile_write,
819         .llseek         = default_llseek,
820 };
821
822 /* used to initialize the real stat files */
823 static struct tracer_stat function_stats __initdata = {
824         .name           = "functions",
825         .stat_start     = function_stat_start,
826         .stat_next      = function_stat_next,
827         .stat_cmp       = function_stat_cmp,
828         .stat_headers   = function_stat_headers,
829         .stat_show      = function_stat_show
830 };
831
832 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
833 {
834         struct ftrace_profile_stat *stat;
835         struct dentry *entry;
836         char *name;
837         int ret;
838         int cpu;
839
840         for_each_possible_cpu(cpu) {
841                 stat = &per_cpu(ftrace_profile_stats, cpu);
842
843                 /* allocate enough for function name + cpu number */
844                 name = kmalloc(32, GFP_KERNEL);
845                 if (!name) {
846                         /*
847                          * The files created are permanent, if something happens
848                          * we still do not free memory.
849                          */
850                         WARN(1,
851                              "Could not allocate stat file for cpu %d\n",
852                              cpu);
853                         return;
854                 }
855                 stat->stat = function_stats;
856                 snprintf(name, 32, "function%d", cpu);
857                 stat->stat.name = name;
858                 ret = register_stat_tracer(&stat->stat);
859                 if (ret) {
860                         WARN(1,
861                              "Could not register function stat for cpu %d\n",
862                              cpu);
863                         kfree(name);
864                         return;
865                 }
866         }
867
868         entry = debugfs_create_file("function_profile_enabled", 0644,
869                                     d_tracer, NULL, &ftrace_profile_fops);
870         if (!entry)
871                 pr_warning("Could not create debugfs "
872                            "'function_profile_enabled' entry\n");
873 }
874
875 #else /* CONFIG_FUNCTION_PROFILER */
876 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
877 {
878 }
879 #endif /* CONFIG_FUNCTION_PROFILER */
880
881 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
882
883 #ifdef CONFIG_DYNAMIC_FTRACE
884
885 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
886 # error Dynamic ftrace depends on MCOUNT_RECORD
887 #endif
888
889 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
890
891 struct ftrace_func_probe {
892         struct hlist_node       node;
893         struct ftrace_probe_ops *ops;
894         unsigned long           flags;
895         unsigned long           ip;
896         void                    *data;
897         struct rcu_head         rcu;
898 };
899
900 enum {
901         FTRACE_ENABLE_CALLS             = (1 << 0),
902         FTRACE_DISABLE_CALLS            = (1 << 1),
903         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
904         FTRACE_START_FUNC_RET           = (1 << 3),
905         FTRACE_STOP_FUNC_RET            = (1 << 4),
906 };
907 struct ftrace_func_entry {
908         struct hlist_node hlist;
909         unsigned long ip;
910 };
911
912 struct ftrace_hash {
913         unsigned long           size_bits;
914         struct hlist_head       *buckets;
915         unsigned long           count;
916 };
917
918 /*
919  * We make these constant because no one should touch them,
920  * but they are used as the default "empty hash", to avoid allocating
921  * it all the time. These are in a read only section such that if
922  * anyone does try to modify it, it will cause an exception.
923  */
924 static const struct hlist_head empty_buckets[1];
925 static const struct ftrace_hash empty_hash = {
926         .buckets = (struct hlist_head *)empty_buckets,
927 };
928 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
929
930 enum {
931         FTRACE_OPS_FL_ENABLED           = 1,
932 };
933
934 static struct ftrace_ops global_ops = {
935         .func                   = ftrace_stub,
936         .notrace_hash           = EMPTY_HASH,
937         .filter_hash            = EMPTY_HASH,
938 };
939
940 static struct dyn_ftrace *ftrace_new_addrs;
941
942 static DEFINE_MUTEX(ftrace_regex_lock);
943
944 struct ftrace_page {
945         struct ftrace_page      *next;
946         int                     index;
947         struct dyn_ftrace       records[];
948 };
949
950 #define ENTRIES_PER_PAGE \
951   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
952
953 /* estimate from running different kernels */
954 #define NR_TO_INIT              10000
955
956 static struct ftrace_page       *ftrace_pages_start;
957 static struct ftrace_page       *ftrace_pages;
958
959 static struct dyn_ftrace *ftrace_free_records;
960
961 static struct ftrace_func_entry *
962 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
963 {
964         unsigned long key;
965         struct ftrace_func_entry *entry;
966         struct hlist_head *hhd;
967         struct hlist_node *n;
968
969         if (!hash->count)
970                 return NULL;
971
972         if (hash->size_bits > 0)
973                 key = hash_long(ip, hash->size_bits);
974         else
975                 key = 0;
976
977         hhd = &hash->buckets[key];
978
979         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
980                 if (entry->ip == ip)
981                         return entry;
982         }
983         return NULL;
984 }
985
986 static void __add_hash_entry(struct ftrace_hash *hash,
987                              struct ftrace_func_entry *entry)
988 {
989         struct hlist_head *hhd;
990         unsigned long key;
991
992         if (hash->size_bits)
993                 key = hash_long(entry->ip, hash->size_bits);
994         else
995                 key = 0;
996
997         hhd = &hash->buckets[key];
998         hlist_add_head(&entry->hlist, hhd);
999         hash->count++;
1000 }
1001
1002 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1003 {
1004         struct ftrace_func_entry *entry;
1005
1006         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1007         if (!entry)
1008                 return -ENOMEM;
1009
1010         entry->ip = ip;
1011         __add_hash_entry(hash, entry);
1012
1013         return 0;
1014 }
1015
1016 static void
1017 free_hash_entry(struct ftrace_hash *hash,
1018                   struct ftrace_func_entry *entry)
1019 {
1020         hlist_del(&entry->hlist);
1021         kfree(entry);
1022         hash->count--;
1023 }
1024
1025 static void
1026 remove_hash_entry(struct ftrace_hash *hash,
1027                   struct ftrace_func_entry *entry)
1028 {
1029         hlist_del(&entry->hlist);
1030         hash->count--;
1031 }
1032
1033 static void ftrace_hash_clear(struct ftrace_hash *hash)
1034 {
1035         struct hlist_head *hhd;
1036         struct hlist_node *tp, *tn;
1037         struct ftrace_func_entry *entry;
1038         int size = 1 << hash->size_bits;
1039         int i;
1040
1041         if (!hash->count)
1042                 return;
1043
1044         for (i = 0; i < size; i++) {
1045                 hhd = &hash->buckets[i];
1046                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1047                         free_hash_entry(hash, entry);
1048         }
1049         FTRACE_WARN_ON(hash->count);
1050 }
1051
1052 static void free_ftrace_hash(struct ftrace_hash *hash)
1053 {
1054         if (!hash || hash == EMPTY_HASH)
1055                 return;
1056         ftrace_hash_clear(hash);
1057         kfree(hash->buckets);
1058         kfree(hash);
1059 }
1060
1061 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1062 {
1063         struct ftrace_hash *hash;
1064         int size;
1065
1066         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1067         if (!hash)
1068                 return NULL;
1069
1070         size = 1 << size_bits;
1071         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1072
1073         if (!hash->buckets) {
1074                 kfree(hash);
1075                 return NULL;
1076         }
1077
1078         hash->size_bits = size_bits;
1079
1080         return hash;
1081 }
1082
1083 static struct ftrace_hash *
1084 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1085 {
1086         struct ftrace_func_entry *entry;
1087         struct ftrace_hash *new_hash;
1088         struct hlist_node *tp;
1089         int size;
1090         int ret;
1091         int i;
1092
1093         new_hash = alloc_ftrace_hash(size_bits);
1094         if (!new_hash)
1095                 return NULL;
1096
1097         /* Empty hash? */
1098         if (!hash || !hash->count)
1099                 return new_hash;
1100
1101         size = 1 << hash->size_bits;
1102         for (i = 0; i < size; i++) {
1103                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1104                         ret = add_hash_entry(new_hash, entry->ip);
1105                         if (ret < 0)
1106                                 goto free_hash;
1107                 }
1108         }
1109
1110         FTRACE_WARN_ON(new_hash->count != hash->count);
1111
1112         return new_hash;
1113
1114  free_hash:
1115         free_ftrace_hash(new_hash);
1116         return NULL;
1117 }
1118
1119 static int
1120 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1121 {
1122         struct ftrace_func_entry *entry;
1123         struct hlist_node *tp, *tn;
1124         struct hlist_head *hhd;
1125         struct ftrace_hash *hash = *dst;
1126         unsigned long key;
1127         int size = src->count;
1128         int bits = 0;
1129         int i;
1130
1131         /*
1132          * If the new source is empty, just free dst and assign it
1133          * the empty_hash.
1134          */
1135         if (!src->count) {
1136                 free_ftrace_hash(*dst);
1137                 *dst = EMPTY_HASH;
1138                 return 0;
1139         }
1140
1141         ftrace_hash_clear(hash);
1142
1143         /*
1144          * Make the hash size about 1/2 the # found
1145          */
1146         for (size /= 2; size; size >>= 1)
1147                 bits++;
1148
1149         /* Don't allocate too much */
1150         if (bits > FTRACE_HASH_MAX_BITS)
1151                 bits = FTRACE_HASH_MAX_BITS;
1152
1153         /* We can't modify the empty_hash */
1154         if (hash == EMPTY_HASH) {
1155                 /* Create a new hash */
1156                 *dst = alloc_ftrace_hash(bits);
1157                 if (!*dst) {
1158                         *dst = EMPTY_HASH;
1159                         return -ENOMEM;
1160                 }
1161                 hash = *dst;
1162         } else {
1163                 size = 1 << bits;
1164
1165                 /* Use the old hash, but create new buckets */
1166                 hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
1167                 if (!hhd)
1168                         return -ENOMEM;
1169
1170                 kfree(hash->buckets);
1171                 hash->buckets = hhd;
1172                 hash->size_bits = bits;
1173         }
1174
1175         size = 1 << src->size_bits;
1176         for (i = 0; i < size; i++) {
1177                 hhd = &src->buckets[i];
1178                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1179                         if (bits > 0)
1180                                 key = hash_long(entry->ip, bits);
1181                         else
1182                                 key = 0;
1183                         remove_hash_entry(src, entry);
1184                         __add_hash_entry(hash, entry);
1185                 }
1186         }
1187
1188         return 0;
1189 }
1190
1191 /*
1192  * This is a double for. Do not use 'break' to break out of the loop,
1193  * you must use a goto.
1194  */
1195 #define do_for_each_ftrace_rec(pg, rec)                                 \
1196         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1197                 int _____i;                                             \
1198                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1199                         rec = &pg->records[_____i];
1200
1201 #define while_for_each_ftrace_rec()             \
1202                 }                               \
1203         }
1204
1205 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1206                                      int filter_hash,
1207                                      bool inc)
1208 {
1209         struct ftrace_hash *hash;
1210         struct ftrace_hash *other_hash;
1211         struct ftrace_page *pg;
1212         struct dyn_ftrace *rec;
1213         int count = 0;
1214         int all = 0;
1215
1216         /* Only update if the ops has been registered */
1217         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1218                 return;
1219
1220         /*
1221          * In the filter_hash case:
1222          *   If the count is zero, we update all records.
1223          *   Otherwise we just update the items in the hash.
1224          *
1225          * In the notrace_hash case:
1226          *   We enable the update in the hash.
1227          *   As disabling notrace means enabling the tracing,
1228          *   and enabling notrace means disabling, the inc variable
1229          *   gets inversed.
1230          */
1231         if (filter_hash) {
1232                 hash = ops->filter_hash;
1233                 other_hash = ops->notrace_hash;
1234                 if (!hash->count)
1235                         all = 1;
1236         } else {
1237                 inc = !inc;
1238                 hash = ops->notrace_hash;
1239                 other_hash = ops->filter_hash;
1240                 /*
1241                  * If the notrace hash has no items,
1242                  * then there's nothing to do.
1243                  */
1244                 if (!hash->count)
1245                         return;
1246         }
1247
1248         do_for_each_ftrace_rec(pg, rec) {
1249                 int in_other_hash = 0;
1250                 int in_hash = 0;
1251                 int match = 0;
1252
1253                 if (all) {
1254                         /*
1255                          * Only the filter_hash affects all records.
1256                          * Update if the record is not in the notrace hash.
1257                          */
1258                         if (!ftrace_lookup_ip(other_hash, rec->ip))
1259                                 match = 1;
1260                 } else {
1261                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1262                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1263
1264                         /*
1265                          *
1266                          */
1267                         if (filter_hash && in_hash && !in_other_hash)
1268                                 match = 1;
1269                         else if (!filter_hash && in_hash &&
1270                                  (in_other_hash || !other_hash->count))
1271                                 match = 1;
1272                 }
1273                 if (!match)
1274                         continue;
1275
1276                 if (inc) {
1277                         rec->flags++;
1278                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1279                                 return;
1280                 } else {
1281                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1282                                 return;
1283                         rec->flags--;
1284                 }
1285                 count++;
1286                 /* Shortcut, if we handled all records, we are done. */
1287                 if (!all && count == hash->count)
1288                         return;
1289         } while_for_each_ftrace_rec();
1290 }
1291
1292 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1293                                     int filter_hash)
1294 {
1295         __ftrace_hash_rec_update(ops, filter_hash, 0);
1296 }
1297
1298 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1299                                    int filter_hash)
1300 {
1301         __ftrace_hash_rec_update(ops, filter_hash, 1);
1302 }
1303
1304 static void ftrace_free_rec(struct dyn_ftrace *rec)
1305 {
1306         rec->freelist = ftrace_free_records;
1307         ftrace_free_records = rec;
1308         rec->flags |= FTRACE_FL_FREE;
1309 }
1310
1311 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1312 {
1313         struct dyn_ftrace *rec;
1314
1315         /* First check for freed records */
1316         if (ftrace_free_records) {
1317                 rec = ftrace_free_records;
1318
1319                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1320                         FTRACE_WARN_ON_ONCE(1);
1321                         ftrace_free_records = NULL;
1322                         return NULL;
1323                 }
1324
1325                 ftrace_free_records = rec->freelist;
1326                 memset(rec, 0, sizeof(*rec));
1327                 return rec;
1328         }
1329
1330         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1331                 if (!ftrace_pages->next) {
1332                         /* allocate another page */
1333                         ftrace_pages->next =
1334                                 (void *)get_zeroed_page(GFP_KERNEL);
1335                         if (!ftrace_pages->next)
1336                                 return NULL;
1337                 }
1338                 ftrace_pages = ftrace_pages->next;
1339         }
1340
1341         return &ftrace_pages->records[ftrace_pages->index++];
1342 }
1343
1344 static struct dyn_ftrace *
1345 ftrace_record_ip(unsigned long ip)
1346 {
1347         struct dyn_ftrace *rec;
1348
1349         if (ftrace_disabled)
1350                 return NULL;
1351
1352         rec = ftrace_alloc_dyn_node(ip);
1353         if (!rec)
1354                 return NULL;
1355
1356         rec->ip = ip;
1357         rec->newlist = ftrace_new_addrs;
1358         ftrace_new_addrs = rec;
1359
1360         return rec;
1361 }
1362
1363 static void print_ip_ins(const char *fmt, unsigned char *p)
1364 {
1365         int i;
1366
1367         printk(KERN_CONT "%s", fmt);
1368
1369         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1370                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1371 }
1372
1373 static void ftrace_bug(int failed, unsigned long ip)
1374 {
1375         switch (failed) {
1376         case -EFAULT:
1377                 FTRACE_WARN_ON_ONCE(1);
1378                 pr_info("ftrace faulted on modifying ");
1379                 print_ip_sym(ip);
1380                 break;
1381         case -EINVAL:
1382                 FTRACE_WARN_ON_ONCE(1);
1383                 pr_info("ftrace failed to modify ");
1384                 print_ip_sym(ip);
1385                 print_ip_ins(" actual: ", (unsigned char *)ip);
1386                 printk(KERN_CONT "\n");
1387                 break;
1388         case -EPERM:
1389                 FTRACE_WARN_ON_ONCE(1);
1390                 pr_info("ftrace faulted on writing ");
1391                 print_ip_sym(ip);
1392                 break;
1393         default:
1394                 FTRACE_WARN_ON_ONCE(1);
1395                 pr_info("ftrace faulted on unknown error ");
1396                 print_ip_sym(ip);
1397         }
1398 }
1399
1400
1401 /* Return 1 if the address range is reserved for ftrace */
1402 int ftrace_text_reserved(void *start, void *end)
1403 {
1404         struct dyn_ftrace *rec;
1405         struct ftrace_page *pg;
1406
1407         do_for_each_ftrace_rec(pg, rec) {
1408                 if (rec->ip <= (unsigned long)end &&
1409                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1410                         return 1;
1411         } while_for_each_ftrace_rec();
1412         return 0;
1413 }
1414
1415
1416 static int
1417 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1418 {
1419         unsigned long ftrace_addr;
1420         unsigned long flag = 0UL;
1421
1422         ftrace_addr = (unsigned long)FTRACE_ADDR;
1423
1424         /*
1425          * If we are enabling tracing:
1426          *
1427          *   If the record has a ref count, then we need to enable it
1428          *   because someone is using it.
1429          *
1430          *   Otherwise we make sure its disabled.
1431          *
1432          * If we are disabling tracing, then disable all records that
1433          * are enabled.
1434          */
1435         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1436                 flag = FTRACE_FL_ENABLED;
1437
1438         /* If the state of this record hasn't changed, then do nothing */
1439         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1440                 return 0;
1441
1442         if (flag) {
1443                 rec->flags |= FTRACE_FL_ENABLED;
1444                 return ftrace_make_call(rec, ftrace_addr);
1445         }
1446
1447         rec->flags &= ~FTRACE_FL_ENABLED;
1448         return ftrace_make_nop(NULL, rec, ftrace_addr);
1449 }
1450
1451 static void ftrace_replace_code(int enable)
1452 {
1453         struct dyn_ftrace *rec;
1454         struct ftrace_page *pg;
1455         int failed;
1456
1457         if (unlikely(ftrace_disabled))
1458                 return;
1459
1460         do_for_each_ftrace_rec(pg, rec) {
1461                 /* Skip over free records */
1462                 if (rec->flags & FTRACE_FL_FREE)
1463                         continue;
1464
1465                 failed = __ftrace_replace_code(rec, enable);
1466                 if (failed) {
1467                         ftrace_bug(failed, rec->ip);
1468                         /* Stop processing */
1469                         return;
1470                 }
1471         } while_for_each_ftrace_rec();
1472 }
1473
1474 static int
1475 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1476 {
1477         unsigned long ip;
1478         int ret;
1479
1480         ip = rec->ip;
1481
1482         if (unlikely(ftrace_disabled))
1483                 return 0;
1484
1485         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1486         if (ret) {
1487                 ftrace_bug(ret, ip);
1488                 return 0;
1489         }
1490         return 1;
1491 }
1492
1493 /*
1494  * archs can override this function if they must do something
1495  * before the modifying code is performed.
1496  */
1497 int __weak ftrace_arch_code_modify_prepare(void)
1498 {
1499         return 0;
1500 }
1501
1502 /*
1503  * archs can override this function if they must do something
1504  * after the modifying code is performed.
1505  */
1506 int __weak ftrace_arch_code_modify_post_process(void)
1507 {
1508         return 0;
1509 }
1510
1511 static int __ftrace_modify_code(void *data)
1512 {
1513         int *command = data;
1514
1515         if (*command & FTRACE_ENABLE_CALLS)
1516                 ftrace_replace_code(1);
1517         else if (*command & FTRACE_DISABLE_CALLS)
1518                 ftrace_replace_code(0);
1519
1520         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1521                 ftrace_update_ftrace_func(ftrace_trace_function);
1522
1523         if (*command & FTRACE_START_FUNC_RET)
1524                 ftrace_enable_ftrace_graph_caller();
1525         else if (*command & FTRACE_STOP_FUNC_RET)
1526                 ftrace_disable_ftrace_graph_caller();
1527
1528         return 0;
1529 }
1530
1531 static void ftrace_run_update_code(int command)
1532 {
1533         int ret;
1534
1535         ret = ftrace_arch_code_modify_prepare();
1536         FTRACE_WARN_ON(ret);
1537         if (ret)
1538                 return;
1539
1540         stop_machine(__ftrace_modify_code, &command, NULL);
1541
1542         ret = ftrace_arch_code_modify_post_process();
1543         FTRACE_WARN_ON(ret);
1544 }
1545
1546 static ftrace_func_t saved_ftrace_func;
1547 static int ftrace_start_up;
1548
1549 static void ftrace_startup_enable(int command)
1550 {
1551         if (saved_ftrace_func != ftrace_trace_function) {
1552                 saved_ftrace_func = ftrace_trace_function;
1553                 command |= FTRACE_UPDATE_TRACE_FUNC;
1554         }
1555
1556         if (!command || !ftrace_enabled)
1557                 return;
1558
1559         ftrace_run_update_code(command);
1560 }
1561
1562 static void ftrace_startup(struct ftrace_ops *ops, int command)
1563 {
1564         if (unlikely(ftrace_disabled))
1565                 return;
1566
1567         ftrace_start_up++;
1568         command |= FTRACE_ENABLE_CALLS;
1569
1570         ops->flags |= FTRACE_OPS_FL_ENABLED;
1571         if (ftrace_start_up == 1)
1572                 ftrace_hash_rec_enable(ops, 1);
1573
1574         ftrace_startup_enable(command);
1575 }
1576
1577 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1578 {
1579         if (unlikely(ftrace_disabled))
1580                 return;
1581
1582         ftrace_start_up--;
1583         /*
1584          * Just warn in case of unbalance, no need to kill ftrace, it's not
1585          * critical but the ftrace_call callers may be never nopped again after
1586          * further ftrace uses.
1587          */
1588         WARN_ON_ONCE(ftrace_start_up < 0);
1589
1590         if (!ftrace_start_up)
1591                 ftrace_hash_rec_disable(ops, 1);
1592
1593         if (!ftrace_start_up) {
1594                 command |= FTRACE_DISABLE_CALLS;
1595                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1596         }
1597
1598         if (saved_ftrace_func != ftrace_trace_function) {
1599                 saved_ftrace_func = ftrace_trace_function;
1600                 command |= FTRACE_UPDATE_TRACE_FUNC;
1601         }
1602
1603         if (!command || !ftrace_enabled)
1604                 return;
1605
1606         ftrace_run_update_code(command);
1607 }
1608
1609 static void ftrace_startup_sysctl(void)
1610 {
1611         if (unlikely(ftrace_disabled))
1612                 return;
1613
1614         /* Force update next time */
1615         saved_ftrace_func = NULL;
1616         /* ftrace_start_up is true if we want ftrace running */
1617         if (ftrace_start_up)
1618                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1619 }
1620
1621 static void ftrace_shutdown_sysctl(void)
1622 {
1623         if (unlikely(ftrace_disabled))
1624                 return;
1625
1626         /* ftrace_start_up is true if ftrace is running */
1627         if (ftrace_start_up)
1628                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1629 }
1630
1631 static cycle_t          ftrace_update_time;
1632 static unsigned long    ftrace_update_cnt;
1633 unsigned long           ftrace_update_tot_cnt;
1634
1635 static int ftrace_update_code(struct module *mod)
1636 {
1637         struct dyn_ftrace *p;
1638         cycle_t start, stop;
1639
1640         start = ftrace_now(raw_smp_processor_id());
1641         ftrace_update_cnt = 0;
1642
1643         while (ftrace_new_addrs) {
1644
1645                 /* If something went wrong, bail without enabling anything */
1646                 if (unlikely(ftrace_disabled))
1647                         return -1;
1648
1649                 p = ftrace_new_addrs;
1650                 ftrace_new_addrs = p->newlist;
1651                 p->flags = 0L;
1652
1653                 /*
1654                  * Do the initial record conversion from mcount jump
1655                  * to the NOP instructions.
1656                  */
1657                 if (!ftrace_code_disable(mod, p)) {
1658                         ftrace_free_rec(p);
1659                         /* Game over */
1660                         break;
1661                 }
1662
1663                 ftrace_update_cnt++;
1664
1665                 /*
1666                  * If the tracing is enabled, go ahead and enable the record.
1667                  *
1668                  * The reason not to enable the record immediatelly is the
1669                  * inherent check of ftrace_make_nop/ftrace_make_call for
1670                  * correct previous instructions.  Making first the NOP
1671                  * conversion puts the module to the correct state, thus
1672                  * passing the ftrace_make_call check.
1673                  */
1674                 if (ftrace_start_up) {
1675                         int failed = __ftrace_replace_code(p, 1);
1676                         if (failed) {
1677                                 ftrace_bug(failed, p->ip);
1678                                 ftrace_free_rec(p);
1679                         }
1680                 }
1681         }
1682
1683         stop = ftrace_now(raw_smp_processor_id());
1684         ftrace_update_time = stop - start;
1685         ftrace_update_tot_cnt += ftrace_update_cnt;
1686
1687         return 0;
1688 }
1689
1690 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1691 {
1692         struct ftrace_page *pg;
1693         int cnt;
1694         int i;
1695
1696         /* allocate a few pages */
1697         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1698         if (!ftrace_pages_start)
1699                 return -1;
1700
1701         /*
1702          * Allocate a few more pages.
1703          *
1704          * TODO: have some parser search vmlinux before
1705          *   final linking to find all calls to ftrace.
1706          *   Then we can:
1707          *    a) know how many pages to allocate.
1708          *     and/or
1709          *    b) set up the table then.
1710          *
1711          *  The dynamic code is still necessary for
1712          *  modules.
1713          */
1714
1715         pg = ftrace_pages = ftrace_pages_start;
1716
1717         cnt = num_to_init / ENTRIES_PER_PAGE;
1718         pr_info("ftrace: allocating %ld entries in %d pages\n",
1719                 num_to_init, cnt + 1);
1720
1721         for (i = 0; i < cnt; i++) {
1722                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1723
1724                 /* If we fail, we'll try later anyway */
1725                 if (!pg->next)
1726                         break;
1727
1728                 pg = pg->next;
1729         }
1730
1731         return 0;
1732 }
1733
1734 enum {
1735         FTRACE_ITER_FILTER      = (1 << 0),
1736         FTRACE_ITER_NOTRACE     = (1 << 1),
1737         FTRACE_ITER_PRINTALL    = (1 << 2),
1738         FTRACE_ITER_HASH        = (1 << 3),
1739         FTRACE_ITER_ENABLED     = (1 << 4),
1740 };
1741
1742 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1743
1744 struct ftrace_iterator {
1745         loff_t                          pos;
1746         loff_t                          func_pos;
1747         struct ftrace_page              *pg;
1748         struct dyn_ftrace               *func;
1749         struct ftrace_func_probe        *probe;
1750         struct trace_parser             parser;
1751         struct ftrace_hash              *hash;
1752         struct ftrace_ops               *ops;
1753         int                             hidx;
1754         int                             idx;
1755         unsigned                        flags;
1756 };
1757
1758 static void *
1759 t_hash_next(struct seq_file *m, loff_t *pos)
1760 {
1761         struct ftrace_iterator *iter = m->private;
1762         struct hlist_node *hnd = NULL;
1763         struct hlist_head *hhd;
1764
1765         (*pos)++;
1766         iter->pos = *pos;
1767
1768         if (iter->probe)
1769                 hnd = &iter->probe->node;
1770  retry:
1771         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1772                 return NULL;
1773
1774         hhd = &ftrace_func_hash[iter->hidx];
1775
1776         if (hlist_empty(hhd)) {
1777                 iter->hidx++;
1778                 hnd = NULL;
1779                 goto retry;
1780         }
1781
1782         if (!hnd)
1783                 hnd = hhd->first;
1784         else {
1785                 hnd = hnd->next;
1786                 if (!hnd) {
1787                         iter->hidx++;
1788                         goto retry;
1789                 }
1790         }
1791
1792         if (WARN_ON_ONCE(!hnd))
1793                 return NULL;
1794
1795         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1796
1797         return iter;
1798 }
1799
1800 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1801 {
1802         struct ftrace_iterator *iter = m->private;
1803         void *p = NULL;
1804         loff_t l;
1805
1806         if (iter->func_pos > *pos)
1807                 return NULL;
1808
1809         iter->hidx = 0;
1810         for (l = 0; l <= (*pos - iter->func_pos); ) {
1811                 p = t_hash_next(m, &l);
1812                 if (!p)
1813                         break;
1814         }
1815         if (!p)
1816                 return NULL;
1817
1818         /* Only set this if we have an item */
1819         iter->flags |= FTRACE_ITER_HASH;
1820
1821         return iter;
1822 }
1823
1824 static int
1825 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1826 {
1827         struct ftrace_func_probe *rec;
1828
1829         rec = iter->probe;
1830         if (WARN_ON_ONCE(!rec))
1831                 return -EIO;
1832
1833         if (rec->ops->print)
1834                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1835
1836         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1837
1838         if (rec->data)
1839                 seq_printf(m, ":%p", rec->data);
1840         seq_putc(m, '\n');
1841
1842         return 0;
1843 }
1844
1845 static void *
1846 t_next(struct seq_file *m, void *v, loff_t *pos)
1847 {
1848         struct ftrace_iterator *iter = m->private;
1849         struct ftrace_ops *ops = &global_ops;
1850         struct dyn_ftrace *rec = NULL;
1851
1852         if (unlikely(ftrace_disabled))
1853                 return NULL;
1854
1855         if (iter->flags & FTRACE_ITER_HASH)
1856                 return t_hash_next(m, pos);
1857
1858         (*pos)++;
1859         iter->pos = iter->func_pos = *pos;
1860
1861         if (iter->flags & FTRACE_ITER_PRINTALL)
1862                 return t_hash_start(m, pos);
1863
1864  retry:
1865         if (iter->idx >= iter->pg->index) {
1866                 if (iter->pg->next) {
1867                         iter->pg = iter->pg->next;
1868                         iter->idx = 0;
1869                         goto retry;
1870                 }
1871         } else {
1872                 rec = &iter->pg->records[iter->idx++];
1873                 if ((rec->flags & FTRACE_FL_FREE) ||
1874
1875                     ((iter->flags & FTRACE_ITER_FILTER) &&
1876                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1877
1878                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1879                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1880
1881                     ((iter->flags & FTRACE_ITER_ENABLED) &&
1882                      !(rec->flags & ~FTRACE_FL_MASK))) {
1883
1884                         rec = NULL;
1885                         goto retry;
1886                 }
1887         }
1888
1889         if (!rec)
1890                 return t_hash_start(m, pos);
1891
1892         iter->func = rec;
1893
1894         return iter;
1895 }
1896
1897 static void reset_iter_read(struct ftrace_iterator *iter)
1898 {
1899         iter->pos = 0;
1900         iter->func_pos = 0;
1901         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1902 }
1903
1904 static void *t_start(struct seq_file *m, loff_t *pos)
1905 {
1906         struct ftrace_iterator *iter = m->private;
1907         struct ftrace_ops *ops = &global_ops;
1908         void *p = NULL;
1909         loff_t l;
1910
1911         mutex_lock(&ftrace_lock);
1912
1913         if (unlikely(ftrace_disabled))
1914                 return NULL;
1915
1916         /*
1917          * If an lseek was done, then reset and start from beginning.
1918          */
1919         if (*pos < iter->pos)
1920                 reset_iter_read(iter);
1921
1922         /*
1923          * For set_ftrace_filter reading, if we have the filter
1924          * off, we can short cut and just print out that all
1925          * functions are enabled.
1926          */
1927         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
1928                 if (*pos > 0)
1929                         return t_hash_start(m, pos);
1930                 iter->flags |= FTRACE_ITER_PRINTALL;
1931                 /* reset in case of seek/pread */
1932                 iter->flags &= ~FTRACE_ITER_HASH;
1933                 return iter;
1934         }
1935
1936         if (iter->flags & FTRACE_ITER_HASH)
1937                 return t_hash_start(m, pos);
1938
1939         /*
1940          * Unfortunately, we need to restart at ftrace_pages_start
1941          * every time we let go of the ftrace_mutex. This is because
1942          * those pointers can change without the lock.
1943          */
1944         iter->pg = ftrace_pages_start;
1945         iter->idx = 0;
1946         for (l = 0; l <= *pos; ) {
1947                 p = t_next(m, p, &l);
1948                 if (!p)
1949                         break;
1950         }
1951
1952         if (!p) {
1953                 if (iter->flags & FTRACE_ITER_FILTER)
1954                         return t_hash_start(m, pos);
1955
1956                 return NULL;
1957         }
1958
1959         return iter;
1960 }
1961
1962 static void t_stop(struct seq_file *m, void *p)
1963 {
1964         mutex_unlock(&ftrace_lock);
1965 }
1966
1967 static int t_show(struct seq_file *m, void *v)
1968 {
1969         struct ftrace_iterator *iter = m->private;
1970         struct dyn_ftrace *rec;
1971
1972         if (iter->flags & FTRACE_ITER_HASH)
1973                 return t_hash_show(m, iter);
1974
1975         if (iter->flags & FTRACE_ITER_PRINTALL) {
1976                 seq_printf(m, "#### all functions enabled ####\n");
1977                 return 0;
1978         }
1979
1980         rec = iter->func;
1981
1982         if (!rec)
1983                 return 0;
1984
1985         seq_printf(m, "%ps", (void *)rec->ip);
1986         if (iter->flags & FTRACE_ITER_ENABLED)
1987                 seq_printf(m, " (%ld)",
1988                            rec->flags & ~FTRACE_FL_MASK);
1989         seq_printf(m, "\n");
1990
1991         return 0;
1992 }
1993
1994 static const struct seq_operations show_ftrace_seq_ops = {
1995         .start = t_start,
1996         .next = t_next,
1997         .stop = t_stop,
1998         .show = t_show,
1999 };
2000
2001 static int
2002 ftrace_avail_open(struct inode *inode, struct file *file)
2003 {
2004         struct ftrace_iterator *iter;
2005         int ret;
2006
2007         if (unlikely(ftrace_disabled))
2008                 return -ENODEV;
2009
2010         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2011         if (!iter)
2012                 return -ENOMEM;
2013
2014         iter->pg = ftrace_pages_start;
2015
2016         ret = seq_open(file, &show_ftrace_seq_ops);
2017         if (!ret) {
2018                 struct seq_file *m = file->private_data;
2019
2020                 m->private = iter;
2021         } else {
2022                 kfree(iter);
2023         }
2024
2025         return ret;
2026 }
2027
2028 static int
2029 ftrace_enabled_open(struct inode *inode, struct file *file)
2030 {
2031         struct ftrace_iterator *iter;
2032         int ret;
2033
2034         if (unlikely(ftrace_disabled))
2035                 return -ENODEV;
2036
2037         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2038         if (!iter)
2039                 return -ENOMEM;
2040
2041         iter->pg = ftrace_pages_start;
2042         iter->flags = FTRACE_ITER_ENABLED;
2043
2044         ret = seq_open(file, &show_ftrace_seq_ops);
2045         if (!ret) {
2046                 struct seq_file *m = file->private_data;
2047
2048                 m->private = iter;
2049         } else {
2050                 kfree(iter);
2051         }
2052
2053         return ret;
2054 }
2055
2056 static void ftrace_filter_reset(struct ftrace_hash *hash)
2057 {
2058         mutex_lock(&ftrace_lock);
2059         ftrace_hash_clear(hash);
2060         mutex_unlock(&ftrace_lock);
2061 }
2062
2063 static int
2064 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2065                   struct inode *inode, struct file *file)
2066 {
2067         struct ftrace_iterator *iter;
2068         struct ftrace_hash *hash;
2069         int ret = 0;
2070
2071         if (unlikely(ftrace_disabled))
2072                 return -ENODEV;
2073
2074         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2075         if (!iter)
2076                 return -ENOMEM;
2077
2078         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2079                 kfree(iter);
2080                 return -ENOMEM;
2081         }
2082
2083         if (flag & FTRACE_ITER_NOTRACE)
2084                 hash = ops->notrace_hash;
2085         else
2086                 hash = ops->filter_hash;
2087
2088         iter->ops = ops;
2089         iter->flags = flag;
2090
2091         if (file->f_mode & FMODE_WRITE) {
2092                 mutex_lock(&ftrace_lock);
2093                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2094                 mutex_unlock(&ftrace_lock);
2095
2096                 if (!iter->hash) {
2097                         trace_parser_put(&iter->parser);
2098                         kfree(iter);
2099                         return -ENOMEM;
2100                 }
2101         }
2102
2103         mutex_lock(&ftrace_regex_lock);
2104
2105         if ((file->f_mode & FMODE_WRITE) &&
2106             (file->f_flags & O_TRUNC))
2107                 ftrace_filter_reset(iter->hash);
2108
2109         if (file->f_mode & FMODE_READ) {
2110                 iter->pg = ftrace_pages_start;
2111
2112                 ret = seq_open(file, &show_ftrace_seq_ops);
2113                 if (!ret) {
2114                         struct seq_file *m = file->private_data;
2115                         m->private = iter;
2116                 } else {
2117                         /* Failed */
2118                         free_ftrace_hash(iter->hash);
2119                         trace_parser_put(&iter->parser);
2120                         kfree(iter);
2121                 }
2122         } else
2123                 file->private_data = iter;
2124         mutex_unlock(&ftrace_regex_lock);
2125
2126         return ret;
2127 }
2128
2129 static int
2130 ftrace_filter_open(struct inode *inode, struct file *file)
2131 {
2132         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2133                                  inode, file);
2134 }
2135
2136 static int
2137 ftrace_notrace_open(struct inode *inode, struct file *file)
2138 {
2139         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2140                                  inode, file);
2141 }
2142
2143 static loff_t
2144 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2145 {
2146         loff_t ret;
2147
2148         if (file->f_mode & FMODE_READ)
2149                 ret = seq_lseek(file, offset, origin);
2150         else
2151                 file->f_pos = ret = 1;
2152
2153         return ret;
2154 }
2155
2156 static int ftrace_match(char *str, char *regex, int len, int type)
2157 {
2158         int matched = 0;
2159         int slen;
2160
2161         switch (type) {
2162         case MATCH_FULL:
2163                 if (strcmp(str, regex) == 0)
2164                         matched = 1;
2165                 break;
2166         case MATCH_FRONT_ONLY:
2167                 if (strncmp(str, regex, len) == 0)
2168                         matched = 1;
2169                 break;
2170         case MATCH_MIDDLE_ONLY:
2171                 if (strstr(str, regex))
2172                         matched = 1;
2173                 break;
2174         case MATCH_END_ONLY:
2175                 slen = strlen(str);
2176                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2177                         matched = 1;
2178                 break;
2179         }
2180
2181         return matched;
2182 }
2183
2184 static int
2185 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2186 {
2187         struct ftrace_func_entry *entry;
2188         int ret = 0;
2189
2190         entry = ftrace_lookup_ip(hash, rec->ip);
2191         if (not) {
2192                 /* Do nothing if it doesn't exist */
2193                 if (!entry)
2194                         return 0;
2195
2196                 free_hash_entry(hash, entry);
2197         } else {
2198                 /* Do nothing if it exists */
2199                 if (entry)
2200                         return 0;
2201
2202                 ret = add_hash_entry(hash, rec->ip);
2203         }
2204         return ret;
2205 }
2206
2207 static int
2208 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2209                     char *regex, int len, int type)
2210 {
2211         char str[KSYM_SYMBOL_LEN];
2212         char *modname;
2213
2214         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2215
2216         if (mod) {
2217                 /* module lookup requires matching the module */
2218                 if (!modname || strcmp(modname, mod))
2219                         return 0;
2220
2221                 /* blank search means to match all funcs in the mod */
2222                 if (!len)
2223                         return 1;
2224         }
2225
2226         return ftrace_match(str, regex, len, type);
2227 }
2228
2229 static int
2230 match_records(struct ftrace_hash *hash, char *buff,
2231               int len, char *mod, int not)
2232 {
2233         unsigned search_len = 0;
2234         struct ftrace_page *pg;
2235         struct dyn_ftrace *rec;
2236         int type = MATCH_FULL;
2237         char *search = buff;
2238         int found = 0;
2239         int ret;
2240
2241         if (len) {
2242                 type = filter_parse_regex(buff, len, &search, &not);
2243                 search_len = strlen(search);
2244         }
2245
2246         mutex_lock(&ftrace_lock);
2247
2248         if (unlikely(ftrace_disabled))
2249                 goto out_unlock;
2250
2251         do_for_each_ftrace_rec(pg, rec) {
2252
2253                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2254                         ret = enter_record(hash, rec, not);
2255                         if (ret < 0) {
2256                                 found = ret;
2257                                 goto out_unlock;
2258                         }
2259                         found = 1;
2260                 }
2261         } while_for_each_ftrace_rec();
2262  out_unlock:
2263         mutex_unlock(&ftrace_lock);
2264
2265         return found;
2266 }
2267
2268 static int
2269 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2270 {
2271         return match_records(hash, buff, len, NULL, 0);
2272 }
2273
2274 static int
2275 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2276 {
2277         int not = 0;
2278
2279         /* blank or '*' mean the same */
2280         if (strcmp(buff, "*") == 0)
2281                 buff[0] = 0;
2282
2283         /* handle the case of 'dont filter this module' */
2284         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2285                 buff[0] = 0;
2286                 not = 1;
2287         }
2288
2289         return match_records(hash, buff, strlen(buff), mod, not);
2290 }
2291
2292 /*
2293  * We register the module command as a template to show others how
2294  * to register the a command as well.
2295  */
2296
2297 static int
2298 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2299 {
2300         struct ftrace_ops *ops = &global_ops;
2301         struct ftrace_hash *hash;
2302         char *mod;
2303         int ret = -EINVAL;
2304
2305         /*
2306          * cmd == 'mod' because we only registered this func
2307          * for the 'mod' ftrace_func_command.
2308          * But if you register one func with multiple commands,
2309          * you can tell which command was used by the cmd
2310          * parameter.
2311          */
2312
2313         /* we must have a module name */
2314         if (!param)
2315                 return ret;
2316
2317         mod = strsep(&param, ":");
2318         if (!strlen(mod))
2319                 return ret;
2320
2321         if (enable)
2322                 hash = ops->filter_hash;
2323         else
2324                 hash = ops->notrace_hash;
2325
2326         ret = ftrace_match_module_records(hash, func, mod);
2327         if (!ret)
2328                 ret = -EINVAL;
2329         if (ret < 0)
2330                 return ret;
2331
2332         return 0;
2333 }
2334
2335 static struct ftrace_func_command ftrace_mod_cmd = {
2336         .name                   = "mod",
2337         .func                   = ftrace_mod_callback,
2338 };
2339
2340 static int __init ftrace_mod_cmd_init(void)
2341 {
2342         return register_ftrace_command(&ftrace_mod_cmd);
2343 }
2344 device_initcall(ftrace_mod_cmd_init);
2345
2346 static void
2347 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2348 {
2349         struct ftrace_func_probe *entry;
2350         struct hlist_head *hhd;
2351         struct hlist_node *n;
2352         unsigned long key;
2353
2354         key = hash_long(ip, FTRACE_HASH_BITS);
2355
2356         hhd = &ftrace_func_hash[key];
2357
2358         if (hlist_empty(hhd))
2359                 return;
2360
2361         /*
2362          * Disable preemption for these calls to prevent a RCU grace
2363          * period. This syncs the hash iteration and freeing of items
2364          * on the hash. rcu_read_lock is too dangerous here.
2365          */
2366         preempt_disable_notrace();
2367         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2368                 if (entry->ip == ip)
2369                         entry->ops->func(ip, parent_ip, &entry->data);
2370         }
2371         preempt_enable_notrace();
2372 }
2373
2374 static struct ftrace_ops trace_probe_ops __read_mostly =
2375 {
2376         .func           = function_trace_probe_call,
2377 };
2378
2379 static int ftrace_probe_registered;
2380
2381 static void __enable_ftrace_function_probe(void)
2382 {
2383         int i;
2384
2385         if (ftrace_probe_registered)
2386                 return;
2387
2388         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2389                 struct hlist_head *hhd = &ftrace_func_hash[i];
2390                 if (hhd->first)
2391                         break;
2392         }
2393         /* Nothing registered? */
2394         if (i == FTRACE_FUNC_HASHSIZE)
2395                 return;
2396
2397         __register_ftrace_function(&trace_probe_ops);
2398         ftrace_startup(&global_ops, 0);
2399         ftrace_probe_registered = 1;
2400 }
2401
2402 static void __disable_ftrace_function_probe(void)
2403 {
2404         int i;
2405
2406         if (!ftrace_probe_registered)
2407                 return;
2408
2409         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2410                 struct hlist_head *hhd = &ftrace_func_hash[i];
2411                 if (hhd->first)
2412                         return;
2413         }
2414
2415         /* no more funcs left */
2416         __unregister_ftrace_function(&trace_probe_ops);
2417         ftrace_shutdown(&global_ops, 0);
2418         ftrace_probe_registered = 0;
2419 }
2420
2421
2422 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2423 {
2424         struct ftrace_func_probe *entry =
2425                 container_of(rhp, struct ftrace_func_probe, rcu);
2426
2427         if (entry->ops->free)
2428                 entry->ops->free(&entry->data);
2429         kfree(entry);
2430 }
2431
2432
2433 int
2434 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2435                               void *data)
2436 {
2437         struct ftrace_func_probe *entry;
2438         struct ftrace_page *pg;
2439         struct dyn_ftrace *rec;
2440         int type, len, not;
2441         unsigned long key;
2442         int count = 0;
2443         char *search;
2444
2445         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2446         len = strlen(search);
2447
2448         /* we do not support '!' for function probes */
2449         if (WARN_ON(not))
2450                 return -EINVAL;
2451
2452         mutex_lock(&ftrace_lock);
2453
2454         if (unlikely(ftrace_disabled))
2455                 goto out_unlock;
2456
2457         do_for_each_ftrace_rec(pg, rec) {
2458
2459                 if (!ftrace_match_record(rec, NULL, search, len, type))
2460                         continue;
2461
2462                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2463                 if (!entry) {
2464                         /* If we did not process any, then return error */
2465                         if (!count)
2466                                 count = -ENOMEM;
2467                         goto out_unlock;
2468                 }
2469
2470                 count++;
2471
2472                 entry->data = data;
2473
2474                 /*
2475                  * The caller might want to do something special
2476                  * for each function we find. We call the callback
2477                  * to give the caller an opportunity to do so.
2478                  */
2479                 if (ops->callback) {
2480                         if (ops->callback(rec->ip, &entry->data) < 0) {
2481                                 /* caller does not like this func */
2482                                 kfree(entry);
2483                                 continue;
2484                         }
2485                 }
2486
2487                 entry->ops = ops;
2488                 entry->ip = rec->ip;
2489
2490                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2491                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2492
2493         } while_for_each_ftrace_rec();
2494         __enable_ftrace_function_probe();
2495
2496  out_unlock:
2497         mutex_unlock(&ftrace_lock);
2498
2499         return count;
2500 }
2501
2502 enum {
2503         PROBE_TEST_FUNC         = 1,
2504         PROBE_TEST_DATA         = 2
2505 };
2506
2507 static void
2508 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2509                                   void *data, int flags)
2510 {
2511         struct ftrace_func_probe *entry;
2512         struct hlist_node *n, *tmp;
2513         char str[KSYM_SYMBOL_LEN];
2514         int type = MATCH_FULL;
2515         int i, len = 0;
2516         char *search;
2517
2518         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2519                 glob = NULL;
2520         else if (glob) {
2521                 int not;
2522
2523                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2524                 len = strlen(search);
2525
2526                 /* we do not support '!' for function probes */
2527                 if (WARN_ON(not))
2528                         return;
2529         }
2530
2531         mutex_lock(&ftrace_lock);
2532         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2533                 struct hlist_head *hhd = &ftrace_func_hash[i];
2534
2535                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2536
2537                         /* break up if statements for readability */
2538                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2539                                 continue;
2540
2541                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2542                                 continue;
2543
2544                         /* do this last, since it is the most expensive */
2545                         if (glob) {
2546                                 kallsyms_lookup(entry->ip, NULL, NULL,
2547                                                 NULL, str);
2548                                 if (!ftrace_match(str, glob, len, type))
2549                                         continue;
2550                         }
2551
2552                         hlist_del(&entry->node);
2553                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2554                 }
2555         }
2556         __disable_ftrace_function_probe();
2557         mutex_unlock(&ftrace_lock);
2558 }
2559
2560 void
2561 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2562                                 void *data)
2563 {
2564         __unregister_ftrace_function_probe(glob, ops, data,
2565                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2566 }
2567
2568 void
2569 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2570 {
2571         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2572 }
2573
2574 void unregister_ftrace_function_probe_all(char *glob)
2575 {
2576         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2577 }
2578
2579 static LIST_HEAD(ftrace_commands);
2580 static DEFINE_MUTEX(ftrace_cmd_mutex);
2581
2582 int register_ftrace_command(struct ftrace_func_command *cmd)
2583 {
2584         struct ftrace_func_command *p;
2585         int ret = 0;
2586
2587         mutex_lock(&ftrace_cmd_mutex);
2588         list_for_each_entry(p, &ftrace_commands, list) {
2589                 if (strcmp(cmd->name, p->name) == 0) {
2590                         ret = -EBUSY;
2591                         goto out_unlock;
2592                 }
2593         }
2594         list_add(&cmd->list, &ftrace_commands);
2595  out_unlock:
2596         mutex_unlock(&ftrace_cmd_mutex);
2597
2598         return ret;
2599 }
2600
2601 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2602 {
2603         struct ftrace_func_command *p, *n;
2604         int ret = -ENODEV;
2605
2606         mutex_lock(&ftrace_cmd_mutex);
2607         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2608                 if (strcmp(cmd->name, p->name) == 0) {
2609                         ret = 0;
2610                         list_del_init(&p->list);
2611                         goto out_unlock;
2612                 }
2613         }
2614  out_unlock:
2615         mutex_unlock(&ftrace_cmd_mutex);
2616
2617         return ret;
2618 }
2619
2620 static int ftrace_process_regex(struct ftrace_hash *hash,
2621                                 char *buff, int len, int enable)
2622 {
2623         char *func, *command, *next = buff;
2624         struct ftrace_func_command *p;
2625         int ret;
2626
2627         func = strsep(&next, ":");
2628
2629         if (!next) {
2630                 ret = ftrace_match_records(hash, func, len);
2631                 if (!ret)
2632                         ret = -EINVAL;
2633                 if (ret < 0)
2634                         return ret;
2635                 return 0;
2636         }
2637
2638         /* command found */
2639
2640         command = strsep(&next, ":");
2641
2642         mutex_lock(&ftrace_cmd_mutex);
2643         list_for_each_entry(p, &ftrace_commands, list) {
2644                 if (strcmp(p->name, command) == 0) {
2645                         ret = p->func(func, command, next, enable);
2646                         goto out_unlock;
2647                 }
2648         }
2649  out_unlock:
2650         mutex_unlock(&ftrace_cmd_mutex);
2651
2652         return ret;
2653 }
2654
2655 static ssize_t
2656 ftrace_regex_write(struct file *file, const char __user *ubuf,
2657                    size_t cnt, loff_t *ppos, int enable)
2658 {
2659         struct ftrace_iterator *iter;
2660         struct trace_parser *parser;
2661         ssize_t ret, read;
2662
2663         if (!cnt)
2664                 return 0;
2665
2666         mutex_lock(&ftrace_regex_lock);
2667
2668         ret = -ENODEV;
2669         if (unlikely(ftrace_disabled))
2670                 goto out_unlock;
2671
2672         if (file->f_mode & FMODE_READ) {
2673                 struct seq_file *m = file->private_data;
2674                 iter = m->private;
2675         } else
2676                 iter = file->private_data;
2677
2678         parser = &iter->parser;
2679         read = trace_get_user(parser, ubuf, cnt, ppos);
2680
2681         if (read >= 0 && trace_parser_loaded(parser) &&
2682             !trace_parser_cont(parser)) {
2683                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2684                                            parser->idx, enable);
2685                 trace_parser_clear(parser);
2686                 if (ret)
2687                         goto out_unlock;
2688         }
2689
2690         ret = read;
2691 out_unlock:
2692         mutex_unlock(&ftrace_regex_lock);
2693
2694         return ret;
2695 }
2696
2697 static ssize_t
2698 ftrace_filter_write(struct file *file, const char __user *ubuf,
2699                     size_t cnt, loff_t *ppos)
2700 {
2701         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2702 }
2703
2704 static ssize_t
2705 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2706                      size_t cnt, loff_t *ppos)
2707 {
2708         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2709 }
2710
2711 static int
2712 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2713                  int reset, int enable)
2714 {
2715         struct ftrace_hash **orig_hash;
2716         struct ftrace_hash *hash;
2717         int ret;
2718
2719         if (unlikely(ftrace_disabled))
2720                 return -ENODEV;
2721
2722         if (enable)
2723                 orig_hash = &ops->filter_hash;
2724         else
2725                 orig_hash = &ops->notrace_hash;
2726
2727         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2728         if (!hash)
2729                 return -ENOMEM;
2730
2731         mutex_lock(&ftrace_regex_lock);
2732         if (reset)
2733                 ftrace_filter_reset(hash);
2734         if (buf)
2735                 ftrace_match_records(hash, buf, len);
2736
2737         mutex_lock(&ftrace_lock);
2738         ret = ftrace_hash_move(orig_hash, hash);
2739         mutex_unlock(&ftrace_lock);
2740
2741         mutex_unlock(&ftrace_regex_lock);
2742
2743         free_ftrace_hash(hash);
2744         return ret;
2745 }
2746
2747 /**
2748  * ftrace_set_filter - set a function to filter on in ftrace
2749  * @buf - the string that holds the function filter text.
2750  * @len - the length of the string.
2751  * @reset - non zero to reset all filters before applying this filter.
2752  *
2753  * Filters denote which functions should be enabled when tracing is enabled.
2754  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2755  */
2756 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2757 {
2758         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2759 }
2760
2761 /**
2762  * ftrace_set_notrace - set a function to not trace in ftrace
2763  * @buf - the string that holds the function notrace text.
2764  * @len - the length of the string.
2765  * @reset - non zero to reset all filters before applying this filter.
2766  *
2767  * Notrace Filters denote which functions should not be enabled when tracing
2768  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2769  * for tracing.
2770  */
2771 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2772 {
2773         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2774 }
2775
2776 /*
2777  * command line interface to allow users to set filters on boot up.
2778  */
2779 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2780 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2781 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2782
2783 static int __init set_ftrace_notrace(char *str)
2784 {
2785         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2786         return 1;
2787 }
2788 __setup("ftrace_notrace=", set_ftrace_notrace);
2789
2790 static int __init set_ftrace_filter(char *str)
2791 {
2792         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2793         return 1;
2794 }
2795 __setup("ftrace_filter=", set_ftrace_filter);
2796
2797 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2798 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2799 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2800
2801 static int __init set_graph_function(char *str)
2802 {
2803         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2804         return 1;
2805 }
2806 __setup("ftrace_graph_filter=", set_graph_function);
2807
2808 static void __init set_ftrace_early_graph(char *buf)
2809 {
2810         int ret;
2811         char *func;
2812
2813         while (buf) {
2814                 func = strsep(&buf, ",");
2815                 /* we allow only one expression at a time */
2816                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2817                                       func);
2818                 if (ret)
2819                         printk(KERN_DEBUG "ftrace: function %s not "
2820                                           "traceable\n", func);
2821         }
2822 }
2823 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2824
2825 static void __init
2826 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2827 {
2828         char *func;
2829
2830         while (buf) {
2831                 func = strsep(&buf, ",");
2832                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2833         }
2834 }
2835
2836 static void __init set_ftrace_early_filters(void)
2837 {
2838         if (ftrace_filter_buf[0])
2839                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2840         if (ftrace_notrace_buf[0])
2841                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2842 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2843         if (ftrace_graph_buf[0])
2844                 set_ftrace_early_graph(ftrace_graph_buf);
2845 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2846 }
2847
2848 static int
2849 ftrace_regex_release(struct inode *inode, struct file *file)
2850 {
2851         struct seq_file *m = (struct seq_file *)file->private_data;
2852         struct ftrace_iterator *iter;
2853         struct ftrace_hash **orig_hash;
2854         struct trace_parser *parser;
2855         int filter_hash;
2856         int ret;
2857
2858         mutex_lock(&ftrace_regex_lock);
2859         if (file->f_mode & FMODE_READ) {
2860                 iter = m->private;
2861
2862                 seq_release(inode, file);
2863         } else
2864                 iter = file->private_data;
2865
2866         parser = &iter->parser;
2867         if (trace_parser_loaded(parser)) {
2868                 parser->buffer[parser->idx] = 0;
2869                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
2870         }
2871
2872         trace_parser_put(parser);
2873
2874         if (file->f_mode & FMODE_WRITE) {
2875                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
2876
2877                 if (filter_hash)
2878                         orig_hash = &iter->ops->filter_hash;
2879                 else
2880                         orig_hash = &iter->ops->notrace_hash;
2881
2882                 mutex_lock(&ftrace_lock);
2883                 /*
2884                  * Remove the current set, update the hash and add
2885                  * them back.
2886                  */
2887                 ftrace_hash_rec_disable(iter->ops, filter_hash);
2888                 ret = ftrace_hash_move(orig_hash, iter->hash);
2889                 if (!ret) {
2890                         ftrace_hash_rec_enable(iter->ops, filter_hash);
2891                         if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
2892                             && ftrace_enabled)
2893                                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2894                 }
2895                 mutex_unlock(&ftrace_lock);
2896         }
2897         free_ftrace_hash(iter->hash);
2898         kfree(iter);
2899
2900         mutex_unlock(&ftrace_regex_lock);
2901         return 0;
2902 }
2903
2904 static const struct file_operations ftrace_avail_fops = {
2905         .open = ftrace_avail_open,
2906         .read = seq_read,
2907         .llseek = seq_lseek,
2908         .release = seq_release_private,
2909 };
2910
2911 static const struct file_operations ftrace_enabled_fops = {
2912         .open = ftrace_enabled_open,
2913         .read = seq_read,
2914         .llseek = seq_lseek,
2915         .release = seq_release_private,
2916 };
2917
2918 static const struct file_operations ftrace_filter_fops = {
2919         .open = ftrace_filter_open,
2920         .read = seq_read,
2921         .write = ftrace_filter_write,
2922         .llseek = ftrace_regex_lseek,
2923         .release = ftrace_regex_release,
2924 };
2925
2926 static const struct file_operations ftrace_notrace_fops = {
2927         .open = ftrace_notrace_open,
2928         .read = seq_read,
2929         .write = ftrace_notrace_write,
2930         .llseek = ftrace_regex_lseek,
2931         .release = ftrace_regex_release,
2932 };
2933
2934 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2935
2936 static DEFINE_MUTEX(graph_lock);
2937
2938 int ftrace_graph_count;
2939 int ftrace_graph_filter_enabled;
2940 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2941
2942 static void *
2943 __g_next(struct seq_file *m, loff_t *pos)
2944 {
2945         if (*pos >= ftrace_graph_count)
2946                 return NULL;
2947         return &ftrace_graph_funcs[*pos];
2948 }
2949
2950 static void *
2951 g_next(struct seq_file *m, void *v, loff_t *pos)
2952 {
2953         (*pos)++;
2954         return __g_next(m, pos);
2955 }
2956
2957 static void *g_start(struct seq_file *m, loff_t *pos)
2958 {
2959         mutex_lock(&graph_lock);
2960
2961         /* Nothing, tell g_show to print all functions are enabled */
2962         if (!ftrace_graph_filter_enabled && !*pos)
2963                 return (void *)1;
2964
2965         return __g_next(m, pos);
2966 }
2967
2968 static void g_stop(struct seq_file *m, void *p)
2969 {
2970         mutex_unlock(&graph_lock);
2971 }
2972
2973 static int g_show(struct seq_file *m, void *v)
2974 {
2975         unsigned long *ptr = v;
2976
2977         if (!ptr)
2978                 return 0;
2979
2980         if (ptr == (unsigned long *)1) {
2981                 seq_printf(m, "#### all functions enabled ####\n");
2982                 return 0;
2983         }
2984
2985         seq_printf(m, "%ps\n", (void *)*ptr);
2986
2987         return 0;
2988 }
2989
2990 static const struct seq_operations ftrace_graph_seq_ops = {
2991         .start = g_start,
2992         .next = g_next,
2993         .stop = g_stop,
2994         .show = g_show,
2995 };
2996
2997 static int
2998 ftrace_graph_open(struct inode *inode, struct file *file)
2999 {
3000         int ret = 0;
3001
3002         if (unlikely(ftrace_disabled))
3003                 return -ENODEV;
3004
3005         mutex_lock(&graph_lock);
3006         if ((file->f_mode & FMODE_WRITE) &&
3007             (file->f_flags & O_TRUNC)) {
3008                 ftrace_graph_filter_enabled = 0;
3009                 ftrace_graph_count = 0;
3010                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3011         }
3012         mutex_unlock(&graph_lock);
3013
3014         if (file->f_mode & FMODE_READ)
3015                 ret = seq_open(file, &ftrace_graph_seq_ops);
3016
3017         return ret;
3018 }
3019
3020 static int
3021 ftrace_graph_release(struct inode *inode, struct file *file)
3022 {
3023         if (file->f_mode & FMODE_READ)
3024                 seq_release(inode, file);
3025         return 0;
3026 }
3027
3028 static int
3029 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3030 {
3031         struct dyn_ftrace *rec;
3032         struct ftrace_page *pg;
3033         int search_len;
3034         int fail = 1;
3035         int type, not;
3036         char *search;
3037         bool exists;
3038         int i;
3039
3040         /* decode regex */
3041         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3042         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3043                 return -EBUSY;
3044
3045         search_len = strlen(search);
3046
3047         mutex_lock(&ftrace_lock);
3048
3049         if (unlikely(ftrace_disabled)) {
3050                 mutex_unlock(&ftrace_lock);
3051                 return -ENODEV;
3052         }
3053
3054         do_for_each_ftrace_rec(pg, rec) {
3055
3056                 if (rec->flags & FTRACE_FL_FREE)
3057                         continue;
3058
3059                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3060                         /* if it is in the array */
3061                         exists = false;
3062                         for (i = 0; i < *idx; i++) {
3063                                 if (array[i] == rec->ip) {
3064                                         exists = true;
3065                                         break;
3066                                 }
3067                         }
3068
3069                         if (!not) {
3070                                 fail = 0;
3071                                 if (!exists) {
3072                                         array[(*idx)++] = rec->ip;
3073                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3074                                                 goto out;
3075                                 }
3076                         } else {
3077                                 if (exists) {
3078                                         array[i] = array[--(*idx)];
3079                                         array[*idx] = 0;
3080                                         fail = 0;
3081                                 }
3082                         }
3083                 }
3084         } while_for_each_ftrace_rec();
3085 out:
3086         mutex_unlock(&ftrace_lock);
3087
3088         if (fail)
3089                 return -EINVAL;
3090
3091         ftrace_graph_filter_enabled = 1;
3092         return 0;
3093 }
3094
3095 static ssize_t
3096 ftrace_graph_write(struct file *file, const char __user *ubuf,
3097                    size_t cnt, loff_t *ppos)
3098 {
3099         struct trace_parser parser;
3100         ssize_t read, ret;
3101
3102         if (!cnt)
3103                 return 0;
3104
3105         mutex_lock(&graph_lock);
3106
3107         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3108                 ret = -ENOMEM;
3109                 goto out_unlock;
3110         }
3111
3112         read = trace_get_user(&parser, ubuf, cnt, ppos);
3113
3114         if (read >= 0 && trace_parser_loaded((&parser))) {
3115                 parser.buffer[parser.idx] = 0;
3116
3117                 /* we allow only one expression at a time */
3118                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3119                                         parser.buffer);
3120                 if (ret)
3121                         goto out_free;
3122         }
3123
3124         ret = read;
3125
3126 out_free:
3127         trace_parser_put(&parser);
3128 out_unlock:
3129         mutex_unlock(&graph_lock);
3130
3131         return ret;
3132 }
3133
3134 static const struct file_operations ftrace_graph_fops = {
3135         .open           = ftrace_graph_open,
3136         .read           = seq_read,
3137         .write          = ftrace_graph_write,
3138         .release        = ftrace_graph_release,
3139         .llseek         = seq_lseek,
3140 };
3141 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3142
3143 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3144 {
3145
3146         trace_create_file("available_filter_functions", 0444,
3147                         d_tracer, NULL, &ftrace_avail_fops);
3148
3149         trace_create_file("enabled_functions", 0444,
3150                         d_tracer, NULL, &ftrace_enabled_fops);
3151
3152         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3153                         NULL, &ftrace_filter_fops);
3154
3155         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3156                                     NULL, &ftrace_notrace_fops);
3157
3158 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3159         trace_create_file("set_graph_function", 0444, d_tracer,
3160                                     NULL,
3161                                     &ftrace_graph_fops);
3162 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3163
3164         return 0;
3165 }
3166
3167 static int ftrace_process_locs(struct module *mod,
3168                                unsigned long *start,
3169                                unsigned long *end)
3170 {
3171         unsigned long *p;
3172         unsigned long addr;
3173
3174         mutex_lock(&ftrace_lock);
3175         p = start;
3176         while (p < end) {
3177                 addr = ftrace_call_adjust(*p++);
3178                 /*
3179                  * Some architecture linkers will pad between
3180                  * the different mcount_loc sections of different
3181                  * object files to satisfy alignments.
3182                  * Skip any NULL pointers.
3183                  */
3184                 if (!addr)
3185                         continue;
3186                 ftrace_record_ip(addr);
3187         }
3188
3189         ftrace_update_code(mod);
3190         mutex_unlock(&ftrace_lock);
3191
3192         return 0;
3193 }
3194
3195 #ifdef CONFIG_MODULES
3196 void ftrace_release_mod(struct module *mod)
3197 {
3198         struct dyn_ftrace *rec;
3199         struct ftrace_page *pg;
3200
3201         mutex_lock(&ftrace_lock);
3202
3203         if (ftrace_disabled)
3204                 goto out_unlock;
3205
3206         do_for_each_ftrace_rec(pg, rec) {
3207                 if (within_module_core(rec->ip, mod)) {
3208                         /*
3209                          * rec->ip is changed in ftrace_free_rec()
3210                          * It should not between s and e if record was freed.
3211                          */
3212                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3213                         ftrace_free_rec(rec);
3214                 }
3215         } while_for_each_ftrace_rec();
3216  out_unlock:
3217         mutex_unlock(&ftrace_lock);
3218 }
3219
3220 static void ftrace_init_module(struct module *mod,
3221                                unsigned long *start, unsigned long *end)
3222 {
3223         if (ftrace_disabled || start == end)
3224                 return;
3225         ftrace_process_locs(mod, start, end);
3226 }
3227
3228 static int ftrace_module_notify(struct notifier_block *self,
3229                                 unsigned long val, void *data)
3230 {
3231         struct module *mod = data;
3232
3233         switch (val) {
3234         case MODULE_STATE_COMING:
3235                 ftrace_init_module(mod, mod->ftrace_callsites,
3236                                    mod->ftrace_callsites +
3237                                    mod->num_ftrace_callsites);
3238                 break;
3239         case MODULE_STATE_GOING:
3240                 ftrace_release_mod(mod);
3241                 break;
3242         }
3243
3244         return 0;
3245 }
3246 #else
3247 static int ftrace_module_notify(struct notifier_block *self,
3248                                 unsigned long val, void *data)
3249 {
3250         return 0;
3251 }
3252 #endif /* CONFIG_MODULES */
3253
3254 struct notifier_block ftrace_module_nb = {
3255         .notifier_call = ftrace_module_notify,
3256         .priority = 0,
3257 };
3258
3259 extern unsigned long __start_mcount_loc[];
3260 extern unsigned long __stop_mcount_loc[];
3261
3262 void __init ftrace_init(void)
3263 {
3264         unsigned long count, addr, flags;
3265         int ret;
3266
3267         /* Keep the ftrace pointer to the stub */
3268         addr = (unsigned long)ftrace_stub;
3269
3270         local_irq_save(flags);
3271         ftrace_dyn_arch_init(&addr);
3272         local_irq_restore(flags);
3273
3274         /* ftrace_dyn_arch_init places the return code in addr */
3275         if (addr)
3276                 goto failed;
3277
3278         count = __stop_mcount_loc - __start_mcount_loc;
3279
3280         ret = ftrace_dyn_table_alloc(count);
3281         if (ret)
3282                 goto failed;
3283
3284         last_ftrace_enabled = ftrace_enabled = 1;
3285
3286         ret = ftrace_process_locs(NULL,
3287                                   __start_mcount_loc,
3288                                   __stop_mcount_loc);
3289
3290         ret = register_module_notifier(&ftrace_module_nb);
3291         if (ret)
3292                 pr_warning("Failed to register trace ftrace module notifier\n");
3293
3294         set_ftrace_early_filters();
3295
3296         return;
3297  failed:
3298         ftrace_disabled = 1;
3299 }
3300
3301 #else
3302
3303 static struct ftrace_ops global_ops = {
3304         .func                   = ftrace_stub,
3305 };
3306
3307 static int __init ftrace_nodyn_init(void)
3308 {
3309         ftrace_enabled = 1;
3310         return 0;
3311 }
3312 device_initcall(ftrace_nodyn_init);
3313
3314 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3315 static inline void ftrace_startup_enable(int command) { }
3316 /* Keep as macros so we do not need to define the commands */
3317 # define ftrace_startup(ops, command)   do { } while (0)
3318 # define ftrace_shutdown(ops, command)  do { } while (0)
3319 # define ftrace_startup_sysctl()        do { } while (0)
3320 # define ftrace_shutdown_sysctl()       do { } while (0)
3321 #endif /* CONFIG_DYNAMIC_FTRACE */
3322
3323 static void clear_ftrace_swapper(void)
3324 {
3325         struct task_struct *p;
3326         int cpu;
3327
3328         get_online_cpus();
3329         for_each_online_cpu(cpu) {
3330                 p = idle_task(cpu);
3331                 clear_tsk_trace_trace(p);
3332         }
3333         put_online_cpus();
3334 }
3335
3336 static void set_ftrace_swapper(void)
3337 {
3338         struct task_struct *p;
3339         int cpu;
3340
3341         get_online_cpus();
3342         for_each_online_cpu(cpu) {
3343                 p = idle_task(cpu);
3344                 set_tsk_trace_trace(p);
3345         }
3346         put_online_cpus();
3347 }
3348
3349 static void clear_ftrace_pid(struct pid *pid)
3350 {
3351         struct task_struct *p;
3352
3353         rcu_read_lock();
3354         do_each_pid_task(pid, PIDTYPE_PID, p) {
3355                 clear_tsk_trace_trace(p);
3356         } while_each_pid_task(pid, PIDTYPE_PID, p);
3357         rcu_read_unlock();
3358
3359         put_pid(pid);
3360 }
3361
3362 static void set_ftrace_pid(struct pid *pid)
3363 {
3364         struct task_struct *p;
3365
3366         rcu_read_lock();
3367         do_each_pid_task(pid, PIDTYPE_PID, p) {
3368                 set_tsk_trace_trace(p);
3369         } while_each_pid_task(pid, PIDTYPE_PID, p);
3370         rcu_read_unlock();
3371 }
3372
3373 static void clear_ftrace_pid_task(struct pid *pid)
3374 {
3375         if (pid == ftrace_swapper_pid)
3376                 clear_ftrace_swapper();
3377         else
3378                 clear_ftrace_pid(pid);
3379 }
3380
3381 static void set_ftrace_pid_task(struct pid *pid)
3382 {
3383         if (pid == ftrace_swapper_pid)
3384                 set_ftrace_swapper();
3385         else
3386                 set_ftrace_pid(pid);
3387 }
3388
3389 static int ftrace_pid_add(int p)
3390 {
3391         struct pid *pid;
3392         struct ftrace_pid *fpid;
3393         int ret = -EINVAL;
3394
3395         mutex_lock(&ftrace_lock);
3396
3397         if (!p)
3398                 pid = ftrace_swapper_pid;
3399         else
3400                 pid = find_get_pid(p);
3401
3402         if (!pid)
3403                 goto out;
3404
3405         ret = 0;
3406
3407         list_for_each_entry(fpid, &ftrace_pids, list)
3408                 if (fpid->pid == pid)
3409                         goto out_put;
3410
3411         ret = -ENOMEM;
3412
3413         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3414         if (!fpid)
3415                 goto out_put;
3416
3417         list_add(&fpid->list, &ftrace_pids);
3418         fpid->pid = pid;
3419
3420         set_ftrace_pid_task(pid);
3421
3422         ftrace_update_pid_func();
3423         ftrace_startup_enable(0);
3424
3425         mutex_unlock(&ftrace_lock);
3426         return 0;
3427
3428 out_put:
3429         if (pid != ftrace_swapper_pid)
3430                 put_pid(pid);
3431
3432 out:
3433         mutex_unlock(&ftrace_lock);
3434         return ret;
3435 }
3436
3437 static void ftrace_pid_reset(void)
3438 {
3439         struct ftrace_pid *fpid, *safe;
3440
3441         mutex_lock(&ftrace_lock);
3442         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3443                 struct pid *pid = fpid->pid;
3444
3445                 clear_ftrace_pid_task(pid);
3446
3447                 list_del(&fpid->list);
3448                 kfree(fpid);
3449         }
3450
3451         ftrace_update_pid_func();
3452         ftrace_startup_enable(0);
3453
3454         mutex_unlock(&ftrace_lock);
3455 }
3456
3457 static void *fpid_start(struct seq_file *m, loff_t *pos)
3458 {
3459         mutex_lock(&ftrace_lock);
3460
3461         if (list_empty(&ftrace_pids) && (!*pos))
3462                 return (void *) 1;
3463
3464         return seq_list_start(&ftrace_pids, *pos);
3465 }
3466
3467 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3468 {
3469         if (v == (void *)1)
3470                 return NULL;
3471
3472         return seq_list_next(v, &ftrace_pids, pos);
3473 }
3474
3475 static void fpid_stop(struct seq_file *m, void *p)
3476 {
3477         mutex_unlock(&ftrace_lock);
3478 }
3479
3480 static int fpid_show(struct seq_file *m, void *v)
3481 {
3482         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3483
3484         if (v == (void *)1) {
3485                 seq_printf(m, "no pid\n");
3486                 return 0;
3487         }
3488
3489         if (fpid->pid == ftrace_swapper_pid)
3490                 seq_printf(m, "swapper tasks\n");
3491         else
3492                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3493
3494         return 0;
3495 }
3496
3497 static const struct seq_operations ftrace_pid_sops = {
3498         .start = fpid_start,
3499         .next = fpid_next,
3500         .stop = fpid_stop,
3501         .show = fpid_show,
3502 };
3503
3504 static int
3505 ftrace_pid_open(struct inode *inode, struct file *file)
3506 {
3507         int ret = 0;
3508
3509         if ((file->f_mode & FMODE_WRITE) &&
3510             (file->f_flags & O_TRUNC))
3511                 ftrace_pid_reset();
3512
3513         if (file->f_mode & FMODE_READ)
3514                 ret = seq_open(file, &ftrace_pid_sops);
3515
3516         return ret;
3517 }
3518
3519 static ssize_t
3520 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3521                    size_t cnt, loff_t *ppos)
3522 {
3523         char buf[64], *tmp;
3524         long val;
3525         int ret;
3526
3527         if (cnt >= sizeof(buf))
3528                 return -EINVAL;
3529
3530         if (copy_from_user(&buf, ubuf, cnt))
3531                 return -EFAULT;
3532
3533         buf[cnt] = 0;
3534
3535         /*
3536          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3537          * to clean the filter quietly.
3538          */
3539         tmp = strstrip(buf);
3540         if (strlen(tmp) == 0)
3541                 return 1;
3542
3543         ret = strict_strtol(tmp, 10, &val);
3544         if (ret < 0)
3545                 return ret;
3546
3547         ret = ftrace_pid_add(val);
3548
3549         return ret ? ret : cnt;
3550 }
3551
3552 static int
3553 ftrace_pid_release(struct inode *inode, struct file *file)
3554 {
3555         if (file->f_mode & FMODE_READ)
3556                 seq_release(inode, file);
3557
3558         return 0;
3559 }
3560
3561 static const struct file_operations ftrace_pid_fops = {
3562         .open           = ftrace_pid_open,
3563         .write          = ftrace_pid_write,
3564         .read           = seq_read,
3565         .llseek         = seq_lseek,
3566         .release        = ftrace_pid_release,
3567 };
3568
3569 static __init int ftrace_init_debugfs(void)
3570 {
3571         struct dentry *d_tracer;
3572
3573         d_tracer = tracing_init_dentry();
3574         if (!d_tracer)
3575                 return 0;
3576
3577         ftrace_init_dyn_debugfs(d_tracer);
3578
3579         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3580                             NULL, &ftrace_pid_fops);
3581
3582         ftrace_profile_debugfs(d_tracer);
3583
3584         return 0;
3585 }
3586 fs_initcall(ftrace_init_debugfs);
3587
3588 /**
3589  * ftrace_kill - kill ftrace
3590  *
3591  * This function should be used by panic code. It stops ftrace
3592  * but in a not so nice way. If you need to simply kill ftrace
3593  * from a non-atomic section, use ftrace_kill.
3594  */
3595 void ftrace_kill(void)
3596 {
3597         ftrace_disabled = 1;
3598         ftrace_enabled = 0;
3599         clear_ftrace_function();
3600 }
3601
3602 /**
3603  * register_ftrace_function - register a function for profiling
3604  * @ops - ops structure that holds the function for profiling.
3605  *
3606  * Register a function to be called by all functions in the
3607  * kernel.
3608  *
3609  * Note: @ops->func and all the functions it calls must be labeled
3610  *       with "notrace", otherwise it will go into a
3611  *       recursive loop.
3612  */
3613 int register_ftrace_function(struct ftrace_ops *ops)
3614 {
3615         int ret = -1;
3616
3617         mutex_lock(&ftrace_lock);
3618
3619         if (unlikely(ftrace_disabled))
3620                 goto out_unlock;
3621
3622         ret = __register_ftrace_function(ops);
3623         ftrace_startup(&global_ops, 0);
3624
3625  out_unlock:
3626         mutex_unlock(&ftrace_lock);
3627         return ret;
3628 }
3629
3630 /**
3631  * unregister_ftrace_function - unregister a function for profiling.
3632  * @ops - ops structure that holds the function to unregister
3633  *
3634  * Unregister a function that was added to be called by ftrace profiling.
3635  */
3636 int unregister_ftrace_function(struct ftrace_ops *ops)
3637 {
3638         int ret;
3639
3640         mutex_lock(&ftrace_lock);
3641         ret = __unregister_ftrace_function(ops);
3642         ftrace_shutdown(&global_ops, 0);
3643         mutex_unlock(&ftrace_lock);
3644
3645         return ret;
3646 }
3647
3648 int
3649 ftrace_enable_sysctl(struct ctl_table *table, int write,
3650                      void __user *buffer, size_t *lenp,
3651                      loff_t *ppos)
3652 {
3653         int ret = -ENODEV;
3654
3655         mutex_lock(&ftrace_lock);
3656
3657         if (unlikely(ftrace_disabled))
3658                 goto out;
3659
3660         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3661
3662         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3663                 goto out;
3664
3665         last_ftrace_enabled = !!ftrace_enabled;
3666
3667         if (ftrace_enabled) {
3668
3669                 ftrace_startup_sysctl();
3670
3671                 /* we are starting ftrace again */
3672                 if (ftrace_list != &ftrace_list_end) {
3673                         if (ftrace_list->next == &ftrace_list_end)
3674                                 ftrace_trace_function = ftrace_list->func;
3675                         else
3676                                 ftrace_trace_function = ftrace_list_func;
3677                 }
3678
3679         } else {
3680                 /* stopping ftrace calls (just send to ftrace_stub) */
3681                 ftrace_trace_function = ftrace_stub;
3682
3683                 ftrace_shutdown_sysctl();
3684         }
3685
3686  out:
3687         mutex_unlock(&ftrace_lock);
3688         return ret;
3689 }
3690
3691 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3692
3693 static int ftrace_graph_active;
3694 static struct notifier_block ftrace_suspend_notifier;
3695
3696 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3697 {
3698         return 0;
3699 }
3700
3701 /* The callbacks that hook a function */
3702 trace_func_graph_ret_t ftrace_graph_return =
3703                         (trace_func_graph_ret_t)ftrace_stub;
3704 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3705
3706 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3707 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3708 {
3709         int i;
3710         int ret = 0;
3711         unsigned long flags;
3712         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3713         struct task_struct *g, *t;
3714
3715         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3716                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3717                                         * sizeof(struct ftrace_ret_stack),
3718                                         GFP_KERNEL);
3719                 if (!ret_stack_list[i]) {
3720                         start = 0;
3721                         end = i;
3722                         ret = -ENOMEM;
3723                         goto free;
3724                 }
3725         }
3726
3727         read_lock_irqsave(&tasklist_lock, flags);
3728         do_each_thread(g, t) {
3729                 if (start == end) {
3730                         ret = -EAGAIN;
3731                         goto unlock;
3732                 }
3733
3734                 if (t->ret_stack == NULL) {
3735                         atomic_set(&t->tracing_graph_pause, 0);
3736                         atomic_set(&t->trace_overrun, 0);
3737                         t->curr_ret_stack = -1;
3738                         /* Make sure the tasks see the -1 first: */
3739                         smp_wmb();
3740                         t->ret_stack = ret_stack_list[start++];
3741                 }
3742         } while_each_thread(g, t);
3743
3744 unlock:
3745         read_unlock_irqrestore(&tasklist_lock, flags);
3746 free:
3747         for (i = start; i < end; i++)
3748                 kfree(ret_stack_list[i]);
3749         return ret;
3750 }
3751
3752 static void
3753 ftrace_graph_probe_sched_switch(void *ignore,
3754                         struct task_struct *prev, struct task_struct *next)
3755 {
3756         unsigned long long timestamp;
3757         int index;
3758
3759         /*
3760          * Does the user want to count the time a function was asleep.
3761          * If so, do not update the time stamps.
3762          */
3763         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3764                 return;
3765
3766         timestamp = trace_clock_local();
3767
3768         prev->ftrace_timestamp = timestamp;
3769
3770         /* only process tasks that we timestamped */
3771         if (!next->ftrace_timestamp)
3772                 return;
3773
3774         /*
3775          * Update all the counters in next to make up for the
3776          * time next was sleeping.
3777          */
3778         timestamp -= next->ftrace_timestamp;
3779
3780         for (index = next->curr_ret_stack; index >= 0; index--)
3781                 next->ret_stack[index].calltime += timestamp;
3782 }
3783
3784 /* Allocate a return stack for each task */
3785 static int start_graph_tracing(void)
3786 {
3787         struct ftrace_ret_stack **ret_stack_list;
3788         int ret, cpu;
3789
3790         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3791                                 sizeof(struct ftrace_ret_stack *),
3792                                 GFP_KERNEL);
3793
3794         if (!ret_stack_list)
3795                 return -ENOMEM;
3796
3797         /* The cpu_boot init_task->ret_stack will never be freed */
3798         for_each_online_cpu(cpu) {
3799                 if (!idle_task(cpu)->ret_stack)
3800                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3801         }
3802
3803         do {
3804                 ret = alloc_retstack_tasklist(ret_stack_list);
3805         } while (ret == -EAGAIN);
3806
3807         if (!ret) {
3808                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3809                 if (ret)
3810                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3811                                 " probe to kernel_sched_switch\n");
3812         }
3813
3814         kfree(ret_stack_list);
3815         return ret;
3816 }
3817
3818 /*
3819  * Hibernation protection.
3820  * The state of the current task is too much unstable during
3821  * suspend/restore to disk. We want to protect against that.
3822  */
3823 static int
3824 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3825                                                         void *unused)
3826 {
3827         switch (state) {
3828         case PM_HIBERNATION_PREPARE:
3829                 pause_graph_tracing();
3830                 break;
3831
3832         case PM_POST_HIBERNATION:
3833                 unpause_graph_tracing();
3834                 break;
3835         }
3836         return NOTIFY_DONE;
3837 }
3838
3839 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3840                         trace_func_graph_ent_t entryfunc)
3841 {
3842         int ret = 0;
3843
3844         mutex_lock(&ftrace_lock);
3845
3846         /* we currently allow only one tracer registered at a time */
3847         if (ftrace_graph_active) {
3848                 ret = -EBUSY;
3849                 goto out;
3850         }
3851
3852         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3853         register_pm_notifier(&ftrace_suspend_notifier);
3854
3855         ftrace_graph_active++;
3856         ret = start_graph_tracing();
3857         if (ret) {
3858                 ftrace_graph_active--;
3859                 goto out;
3860         }
3861
3862         ftrace_graph_return = retfunc;
3863         ftrace_graph_entry = entryfunc;
3864
3865         ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
3866
3867 out:
3868         mutex_unlock(&ftrace_lock);
3869         return ret;
3870 }
3871
3872 void unregister_ftrace_graph(void)
3873 {
3874         mutex_lock(&ftrace_lock);
3875
3876         if (unlikely(!ftrace_graph_active))
3877                 goto out;
3878
3879         ftrace_graph_active--;
3880         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3881         ftrace_graph_entry = ftrace_graph_entry_stub;
3882         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
3883         unregister_pm_notifier(&ftrace_suspend_notifier);
3884         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3885
3886  out:
3887         mutex_unlock(&ftrace_lock);
3888 }
3889
3890 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3891
3892 static void
3893 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3894 {
3895         atomic_set(&t->tracing_graph_pause, 0);
3896         atomic_set(&t->trace_overrun, 0);
3897         t->ftrace_timestamp = 0;
3898         /* make curr_ret_stack visible before we add the ret_stack */
3899         smp_wmb();
3900         t->ret_stack = ret_stack;
3901 }
3902
3903 /*
3904  * Allocate a return stack for the idle task. May be the first
3905  * time through, or it may be done by CPU hotplug online.
3906  */
3907 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3908 {
3909         t->curr_ret_stack = -1;
3910         /*
3911          * The idle task has no parent, it either has its own
3912          * stack or no stack at all.
3913          */
3914         if (t->ret_stack)
3915                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3916
3917         if (ftrace_graph_active) {
3918                 struct ftrace_ret_stack *ret_stack;
3919
3920                 ret_stack = per_cpu(idle_ret_stack, cpu);
3921                 if (!ret_stack) {
3922                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3923                                             * sizeof(struct ftrace_ret_stack),
3924                                             GFP_KERNEL);
3925                         if (!ret_stack)
3926                                 return;
3927                         per_cpu(idle_ret_stack, cpu) = ret_stack;
3928                 }
3929                 graph_init_task(t, ret_stack);
3930         }
3931 }
3932
3933 /* Allocate a return stack for newly created task */
3934 void ftrace_graph_init_task(struct task_struct *t)
3935 {
3936         /* Make sure we do not use the parent ret_stack */
3937         t->ret_stack = NULL;
3938         t->curr_ret_stack = -1;
3939
3940         if (ftrace_graph_active) {
3941                 struct ftrace_ret_stack *ret_stack;
3942
3943                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3944                                 * sizeof(struct ftrace_ret_stack),
3945                                 GFP_KERNEL);
3946                 if (!ret_stack)
3947                         return;
3948                 graph_init_task(t, ret_stack);
3949         }
3950 }
3951
3952 void ftrace_graph_exit_task(struct task_struct *t)
3953 {
3954         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3955
3956         t->ret_stack = NULL;
3957         /* NULL must become visible to IRQs before we free it: */
3958         barrier();
3959
3960         kfree(ret_stack);
3961 }
3962
3963 void ftrace_graph_stop(void)
3964 {
3965         ftrace_stop();
3966 }
3967 #endif