800a8a2fbddb34ec06381a0cd4cf881af965f7e1
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (unlikely(ftrace_disabled))
371                 return -ENODEV;
372
373         if (FTRACE_WARN_ON(ops == &global_ops))
374                 return -EINVAL;
375
376         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377                 return -EBUSY;
378
379         /* We don't support both control and global flags set. */
380         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381                 return -EINVAL;
382
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384         /*
385          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388          */
389         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391                 return -EINVAL;
392
393         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396
397         if (!core_kernel_data((unsigned long)ops))
398                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399
400         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402                 ops->flags |= FTRACE_OPS_FL_ENABLED;
403         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404                 if (control_ops_alloc(ops))
405                         return -ENOMEM;
406                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407         } else
408                 add_ftrace_ops(&ftrace_ops_list, ops);
409
410         if (ftrace_enabled)
411                 update_ftrace_function();
412
413         return 0;
414 }
415
416 static void ftrace_sync(struct work_struct *work)
417 {
418         /*
419          * This function is just a stub to implement a hard force
420          * of synchronize_sched(). This requires synchronizing
421          * tasks even in userspace and idle.
422          *
423          * Yes, function tracing is rude.
424          */
425 }
426
427 static int __unregister_ftrace_function(struct ftrace_ops *ops)
428 {
429         int ret;
430
431         if (ftrace_disabled)
432                 return -ENODEV;
433
434         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435                 return -EBUSY;
436
437         if (FTRACE_WARN_ON(ops == &global_ops))
438                 return -EINVAL;
439
440         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
441                 ret = remove_ftrace_list_ops(&ftrace_global_list,
442                                              &global_ops, ops);
443                 if (!ret)
444                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
445         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
446                 ret = remove_ftrace_list_ops(&ftrace_control_list,
447                                              &control_ops, ops);
448                 if (!ret) {
449                         /*
450                          * The ftrace_ops is now removed from the list,
451                          * so there'll be no new users. We must ensure
452                          * all current users are done before we free
453                          * the control data.
454                          * Note synchronize_sched() is not enough, as we
455                          * use preempt_disable() to do RCU, but the function
456                          * tracer can be called where RCU is not active
457                          * (before user_exit()).
458                          */
459                         schedule_on_each_cpu(ftrace_sync);
460                         control_ops_free(ops);
461                 }
462         } else
463                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
464
465         if (ret < 0)
466                 return ret;
467
468         if (ftrace_enabled)
469                 update_ftrace_function();
470
471         /*
472          * Dynamic ops may be freed, we must make sure that all
473          * callers are done before leaving this function.
474          *
475          * Again, normal synchronize_sched() is not good enough.
476          * We need to do a hard force of sched synchronization.
477          */
478         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
479                 schedule_on_each_cpu(ftrace_sync);
480
481
482         return 0;
483 }
484
485 static void ftrace_update_pid_func(void)
486 {
487         /* Only do something if we are tracing something */
488         if (ftrace_trace_function == ftrace_stub)
489                 return;
490
491         update_ftrace_function();
492 }
493
494 #ifdef CONFIG_FUNCTION_PROFILER
495 struct ftrace_profile {
496         struct hlist_node               node;
497         unsigned long                   ip;
498         unsigned long                   counter;
499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
500         unsigned long long              time;
501         unsigned long long              time_squared;
502 #endif
503 };
504
505 struct ftrace_profile_page {
506         struct ftrace_profile_page      *next;
507         unsigned long                   index;
508         struct ftrace_profile           records[];
509 };
510
511 struct ftrace_profile_stat {
512         atomic_t                        disabled;
513         struct hlist_head               *hash;
514         struct ftrace_profile_page      *pages;
515         struct ftrace_profile_page      *start;
516         struct tracer_stat              stat;
517 };
518
519 #define PROFILE_RECORDS_SIZE                                            \
520         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
521
522 #define PROFILES_PER_PAGE                                       \
523         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
524
525 static int ftrace_profile_enabled __read_mostly;
526
527 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
528 static DEFINE_MUTEX(ftrace_profile_lock);
529
530 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
531
532 #define FTRACE_PROFILE_HASH_BITS 10
533 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
534
535 static void *
536 function_stat_next(void *v, int idx)
537 {
538         struct ftrace_profile *rec = v;
539         struct ftrace_profile_page *pg;
540
541         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
542
543  again:
544         if (idx != 0)
545                 rec++;
546
547         if ((void *)rec >= (void *)&pg->records[pg->index]) {
548                 pg = pg->next;
549                 if (!pg)
550                         return NULL;
551                 rec = &pg->records[0];
552                 if (!rec->counter)
553                         goto again;
554         }
555
556         return rec;
557 }
558
559 static void *function_stat_start(struct tracer_stat *trace)
560 {
561         struct ftrace_profile_stat *stat =
562                 container_of(trace, struct ftrace_profile_stat, stat);
563
564         if (!stat || !stat->start)
565                 return NULL;
566
567         return function_stat_next(&stat->start->records[0], 0);
568 }
569
570 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
571 /* function graph compares on total time */
572 static int function_stat_cmp(void *p1, void *p2)
573 {
574         struct ftrace_profile *a = p1;
575         struct ftrace_profile *b = p2;
576
577         if (a->time < b->time)
578                 return -1;
579         if (a->time > b->time)
580                 return 1;
581         else
582                 return 0;
583 }
584 #else
585 /* not function graph compares against hits */
586 static int function_stat_cmp(void *p1, void *p2)
587 {
588         struct ftrace_profile *a = p1;
589         struct ftrace_profile *b = p2;
590
591         if (a->counter < b->counter)
592                 return -1;
593         if (a->counter > b->counter)
594                 return 1;
595         else
596                 return 0;
597 }
598 #endif
599
600 static int function_stat_headers(struct seq_file *m)
601 {
602 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603         seq_printf(m, "  Function                               "
604                    "Hit    Time            Avg             s^2\n"
605                       "  --------                               "
606                    "---    ----            ---             ---\n");
607 #else
608         seq_printf(m, "  Function                               Hit\n"
609                       "  --------                               ---\n");
610 #endif
611         return 0;
612 }
613
614 static int function_stat_show(struct seq_file *m, void *v)
615 {
616         struct ftrace_profile *rec = v;
617         char str[KSYM_SYMBOL_LEN];
618         int ret = 0;
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620         static struct trace_seq s;
621         unsigned long long avg;
622         unsigned long long stddev;
623 #endif
624         mutex_lock(&ftrace_profile_lock);
625
626         /* we raced with function_profile_reset() */
627         if (unlikely(rec->counter == 0)) {
628                 ret = -EBUSY;
629                 goto out;
630         }
631
632         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
633         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
634
635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
636         seq_printf(m, "    ");
637         avg = rec->time;
638         do_div(avg, rec->counter);
639
640         /* Sample standard deviation (s^2) */
641         if (rec->counter <= 1)
642                 stddev = 0;
643         else {
644                 stddev = rec->time_squared - rec->counter * avg * avg;
645                 /*
646                  * Divide only 1000 for ns^2 -> us^2 conversion.
647                  * trace_print_graph_duration will divide 1000 again.
648                  */
649                 do_div(stddev, (rec->counter - 1) * 1000);
650         }
651
652         trace_seq_init(&s);
653         trace_print_graph_duration(rec->time, &s);
654         trace_seq_puts(&s, "    ");
655         trace_print_graph_duration(avg, &s);
656         trace_seq_puts(&s, "    ");
657         trace_print_graph_duration(stddev, &s);
658         trace_print_seq(m, &s);
659 #endif
660         seq_putc(m, '\n');
661 out:
662         mutex_unlock(&ftrace_profile_lock);
663
664         return ret;
665 }
666
667 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
668 {
669         struct ftrace_profile_page *pg;
670
671         pg = stat->pages = stat->start;
672
673         while (pg) {
674                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
675                 pg->index = 0;
676                 pg = pg->next;
677         }
678
679         memset(stat->hash, 0,
680                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
681 }
682
683 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
684 {
685         struct ftrace_profile_page *pg;
686         int functions;
687         int pages;
688         int i;
689
690         /* If we already allocated, do nothing */
691         if (stat->pages)
692                 return 0;
693
694         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
695         if (!stat->pages)
696                 return -ENOMEM;
697
698 #ifdef CONFIG_DYNAMIC_FTRACE
699         functions = ftrace_update_tot_cnt;
700 #else
701         /*
702          * We do not know the number of functions that exist because
703          * dynamic tracing is what counts them. With past experience
704          * we have around 20K functions. That should be more than enough.
705          * It is highly unlikely we will execute every function in
706          * the kernel.
707          */
708         functions = 20000;
709 #endif
710
711         pg = stat->start = stat->pages;
712
713         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
714
715         for (i = 1; i < pages; i++) {
716                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
717                 if (!pg->next)
718                         goto out_free;
719                 pg = pg->next;
720         }
721
722         return 0;
723
724  out_free:
725         pg = stat->start;
726         while (pg) {
727                 unsigned long tmp = (unsigned long)pg;
728
729                 pg = pg->next;
730                 free_page(tmp);
731         }
732
733         stat->pages = NULL;
734         stat->start = NULL;
735
736         return -ENOMEM;
737 }
738
739 static int ftrace_profile_init_cpu(int cpu)
740 {
741         struct ftrace_profile_stat *stat;
742         int size;
743
744         stat = &per_cpu(ftrace_profile_stats, cpu);
745
746         if (stat->hash) {
747                 /* If the profile is already created, simply reset it */
748                 ftrace_profile_reset(stat);
749                 return 0;
750         }
751
752         /*
753          * We are profiling all functions, but usually only a few thousand
754          * functions are hit. We'll make a hash of 1024 items.
755          */
756         size = FTRACE_PROFILE_HASH_SIZE;
757
758         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
759
760         if (!stat->hash)
761                 return -ENOMEM;
762
763         /* Preallocate the function profiling pages */
764         if (ftrace_profile_pages_init(stat) < 0) {
765                 kfree(stat->hash);
766                 stat->hash = NULL;
767                 return -ENOMEM;
768         }
769
770         return 0;
771 }
772
773 static int ftrace_profile_init(void)
774 {
775         int cpu;
776         int ret = 0;
777
778         for_each_online_cpu(cpu) {
779                 ret = ftrace_profile_init_cpu(cpu);
780                 if (ret)
781                         break;
782         }
783
784         return ret;
785 }
786
787 /* interrupts must be disabled */
788 static struct ftrace_profile *
789 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
790 {
791         struct ftrace_profile *rec;
792         struct hlist_head *hhd;
793         unsigned long key;
794
795         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
796         hhd = &stat->hash[key];
797
798         if (hlist_empty(hhd))
799                 return NULL;
800
801         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
802                 if (rec->ip == ip)
803                         return rec;
804         }
805
806         return NULL;
807 }
808
809 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
810                                struct ftrace_profile *rec)
811 {
812         unsigned long key;
813
814         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
815         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
816 }
817
818 /*
819  * The memory is already allocated, this simply finds a new record to use.
820  */
821 static struct ftrace_profile *
822 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
823 {
824         struct ftrace_profile *rec = NULL;
825
826         /* prevent recursion (from NMIs) */
827         if (atomic_inc_return(&stat->disabled) != 1)
828                 goto out;
829
830         /*
831          * Try to find the function again since an NMI
832          * could have added it
833          */
834         rec = ftrace_find_profiled_func(stat, ip);
835         if (rec)
836                 goto out;
837
838         if (stat->pages->index == PROFILES_PER_PAGE) {
839                 if (!stat->pages->next)
840                         goto out;
841                 stat->pages = stat->pages->next;
842         }
843
844         rec = &stat->pages->records[stat->pages->index++];
845         rec->ip = ip;
846         ftrace_add_profile(stat, rec);
847
848  out:
849         atomic_dec(&stat->disabled);
850
851         return rec;
852 }
853
854 static void
855 function_profile_call(unsigned long ip, unsigned long parent_ip,
856                       struct ftrace_ops *ops, struct pt_regs *regs)
857 {
858         struct ftrace_profile_stat *stat;
859         struct ftrace_profile *rec;
860         unsigned long flags;
861
862         if (!ftrace_profile_enabled)
863                 return;
864
865         local_irq_save(flags);
866
867         stat = &__get_cpu_var(ftrace_profile_stats);
868         if (!stat->hash || !ftrace_profile_enabled)
869                 goto out;
870
871         rec = ftrace_find_profiled_func(stat, ip);
872         if (!rec) {
873                 rec = ftrace_profile_alloc(stat, ip);
874                 if (!rec)
875                         goto out;
876         }
877
878         rec->counter++;
879  out:
880         local_irq_restore(flags);
881 }
882
883 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
884 static int profile_graph_entry(struct ftrace_graph_ent *trace)
885 {
886         function_profile_call(trace->func, 0, NULL, NULL);
887         return 1;
888 }
889
890 static void profile_graph_return(struct ftrace_graph_ret *trace)
891 {
892         struct ftrace_profile_stat *stat;
893         unsigned long long calltime;
894         struct ftrace_profile *rec;
895         unsigned long flags;
896
897         local_irq_save(flags);
898         stat = &__get_cpu_var(ftrace_profile_stats);
899         if (!stat->hash || !ftrace_profile_enabled)
900                 goto out;
901
902         /* If the calltime was zero'd ignore it */
903         if (!trace->calltime)
904                 goto out;
905
906         calltime = trace->rettime - trace->calltime;
907
908         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
909                 int index;
910
911                 index = trace->depth;
912
913                 /* Append this call time to the parent time to subtract */
914                 if (index)
915                         current->ret_stack[index - 1].subtime += calltime;
916
917                 if (current->ret_stack[index].subtime < calltime)
918                         calltime -= current->ret_stack[index].subtime;
919                 else
920                         calltime = 0;
921         }
922
923         rec = ftrace_find_profiled_func(stat, trace->func);
924         if (rec) {
925                 rec->time += calltime;
926                 rec->time_squared += calltime * calltime;
927         }
928
929  out:
930         local_irq_restore(flags);
931 }
932
933 static int register_ftrace_profiler(void)
934 {
935         return register_ftrace_graph(&profile_graph_return,
936                                      &profile_graph_entry);
937 }
938
939 static void unregister_ftrace_profiler(void)
940 {
941         unregister_ftrace_graph();
942 }
943 #else
944 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
945         .func           = function_profile_call,
946         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
947         INIT_REGEX_LOCK(ftrace_profile_ops)
948 };
949
950 static int register_ftrace_profiler(void)
951 {
952         return register_ftrace_function(&ftrace_profile_ops);
953 }
954
955 static void unregister_ftrace_profiler(void)
956 {
957         unregister_ftrace_function(&ftrace_profile_ops);
958 }
959 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
960
961 static ssize_t
962 ftrace_profile_write(struct file *filp, const char __user *ubuf,
963                      size_t cnt, loff_t *ppos)
964 {
965         unsigned long val;
966         int ret;
967
968         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
969         if (ret)
970                 return ret;
971
972         val = !!val;
973
974         mutex_lock(&ftrace_profile_lock);
975         if (ftrace_profile_enabled ^ val) {
976                 if (val) {
977                         ret = ftrace_profile_init();
978                         if (ret < 0) {
979                                 cnt = ret;
980                                 goto out;
981                         }
982
983                         ret = register_ftrace_profiler();
984                         if (ret < 0) {
985                                 cnt = ret;
986                                 goto out;
987                         }
988                         ftrace_profile_enabled = 1;
989                 } else {
990                         ftrace_profile_enabled = 0;
991                         /*
992                          * unregister_ftrace_profiler calls stop_machine
993                          * so this acts like an synchronize_sched.
994                          */
995                         unregister_ftrace_profiler();
996                 }
997         }
998  out:
999         mutex_unlock(&ftrace_profile_lock);
1000
1001         *ppos += cnt;
1002
1003         return cnt;
1004 }
1005
1006 static ssize_t
1007 ftrace_profile_read(struct file *filp, char __user *ubuf,
1008                      size_t cnt, loff_t *ppos)
1009 {
1010         char buf[64];           /* big enough to hold a number */
1011         int r;
1012
1013         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1014         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1015 }
1016
1017 static const struct file_operations ftrace_profile_fops = {
1018         .open           = tracing_open_generic,
1019         .read           = ftrace_profile_read,
1020         .write          = ftrace_profile_write,
1021         .llseek         = default_llseek,
1022 };
1023
1024 /* used to initialize the real stat files */
1025 static struct tracer_stat function_stats __initdata = {
1026         .name           = "functions",
1027         .stat_start     = function_stat_start,
1028         .stat_next      = function_stat_next,
1029         .stat_cmp       = function_stat_cmp,
1030         .stat_headers   = function_stat_headers,
1031         .stat_show      = function_stat_show
1032 };
1033
1034 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1035 {
1036         struct ftrace_profile_stat *stat;
1037         struct dentry *entry;
1038         char *name;
1039         int ret;
1040         int cpu;
1041
1042         for_each_possible_cpu(cpu) {
1043                 stat = &per_cpu(ftrace_profile_stats, cpu);
1044
1045                 /* allocate enough for function name + cpu number */
1046                 name = kmalloc(32, GFP_KERNEL);
1047                 if (!name) {
1048                         /*
1049                          * The files created are permanent, if something happens
1050                          * we still do not free memory.
1051                          */
1052                         WARN(1,
1053                              "Could not allocate stat file for cpu %d\n",
1054                              cpu);
1055                         return;
1056                 }
1057                 stat->stat = function_stats;
1058                 snprintf(name, 32, "function%d", cpu);
1059                 stat->stat.name = name;
1060                 ret = register_stat_tracer(&stat->stat);
1061                 if (ret) {
1062                         WARN(1,
1063                              "Could not register function stat for cpu %d\n",
1064                              cpu);
1065                         kfree(name);
1066                         return;
1067                 }
1068         }
1069
1070         entry = debugfs_create_file("function_profile_enabled", 0644,
1071                                     d_tracer, NULL, &ftrace_profile_fops);
1072         if (!entry)
1073                 pr_warning("Could not create debugfs "
1074                            "'function_profile_enabled' entry\n");
1075 }
1076
1077 #else /* CONFIG_FUNCTION_PROFILER */
1078 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1079 {
1080 }
1081 #endif /* CONFIG_FUNCTION_PROFILER */
1082
1083 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1084
1085 loff_t
1086 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1087 {
1088         loff_t ret;
1089
1090         if (file->f_mode & FMODE_READ)
1091                 ret = seq_lseek(file, offset, whence);
1092         else
1093                 file->f_pos = ret = 1;
1094
1095         return ret;
1096 }
1097
1098 #ifdef CONFIG_DYNAMIC_FTRACE
1099
1100 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1101 # error Dynamic ftrace depends on MCOUNT_RECORD
1102 #endif
1103
1104 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1105
1106 struct ftrace_func_probe {
1107         struct hlist_node       node;
1108         struct ftrace_probe_ops *ops;
1109         unsigned long           flags;
1110         unsigned long           ip;
1111         void                    *data;
1112         struct list_head        free_list;
1113 };
1114
1115 struct ftrace_func_entry {
1116         struct hlist_node hlist;
1117         unsigned long ip;
1118 };
1119
1120 struct ftrace_hash {
1121         unsigned long           size_bits;
1122         struct hlist_head       *buckets;
1123         unsigned long           count;
1124         struct rcu_head         rcu;
1125 };
1126
1127 /*
1128  * We make these constant because no one should touch them,
1129  * but they are used as the default "empty hash", to avoid allocating
1130  * it all the time. These are in a read only section such that if
1131  * anyone does try to modify it, it will cause an exception.
1132  */
1133 static const struct hlist_head empty_buckets[1];
1134 static const struct ftrace_hash empty_hash = {
1135         .buckets = (struct hlist_head *)empty_buckets,
1136 };
1137 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1138
1139 static struct ftrace_ops global_ops = {
1140         .func                   = ftrace_stub,
1141         .notrace_hash           = EMPTY_HASH,
1142         .filter_hash            = EMPTY_HASH,
1143         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1144         INIT_REGEX_LOCK(global_ops)
1145 };
1146
1147 struct ftrace_page {
1148         struct ftrace_page      *next;
1149         struct dyn_ftrace       *records;
1150         int                     index;
1151         int                     size;
1152 };
1153
1154 static struct ftrace_page *ftrace_new_pgs;
1155
1156 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1157 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1158
1159 /* estimate from running different kernels */
1160 #define NR_TO_INIT              10000
1161
1162 static struct ftrace_page       *ftrace_pages_start;
1163 static struct ftrace_page       *ftrace_pages;
1164
1165 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1166 {
1167         return !hash || !hash->count;
1168 }
1169
1170 static struct ftrace_func_entry *
1171 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1172 {
1173         unsigned long key;
1174         struct ftrace_func_entry *entry;
1175         struct hlist_head *hhd;
1176
1177         if (ftrace_hash_empty(hash))
1178                 return NULL;
1179
1180         if (hash->size_bits > 0)
1181                 key = hash_long(ip, hash->size_bits);
1182         else
1183                 key = 0;
1184
1185         hhd = &hash->buckets[key];
1186
1187         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1188                 if (entry->ip == ip)
1189                         return entry;
1190         }
1191         return NULL;
1192 }
1193
1194 static void __add_hash_entry(struct ftrace_hash *hash,
1195                              struct ftrace_func_entry *entry)
1196 {
1197         struct hlist_head *hhd;
1198         unsigned long key;
1199
1200         if (hash->size_bits)
1201                 key = hash_long(entry->ip, hash->size_bits);
1202         else
1203                 key = 0;
1204
1205         hhd = &hash->buckets[key];
1206         hlist_add_head(&entry->hlist, hhd);
1207         hash->count++;
1208 }
1209
1210 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1211 {
1212         struct ftrace_func_entry *entry;
1213
1214         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1215         if (!entry)
1216                 return -ENOMEM;
1217
1218         entry->ip = ip;
1219         __add_hash_entry(hash, entry);
1220
1221         return 0;
1222 }
1223
1224 static void
1225 free_hash_entry(struct ftrace_hash *hash,
1226                   struct ftrace_func_entry *entry)
1227 {
1228         hlist_del(&entry->hlist);
1229         kfree(entry);
1230         hash->count--;
1231 }
1232
1233 static void
1234 remove_hash_entry(struct ftrace_hash *hash,
1235                   struct ftrace_func_entry *entry)
1236 {
1237         hlist_del(&entry->hlist);
1238         hash->count--;
1239 }
1240
1241 static void ftrace_hash_clear(struct ftrace_hash *hash)
1242 {
1243         struct hlist_head *hhd;
1244         struct hlist_node *tn;
1245         struct ftrace_func_entry *entry;
1246         int size = 1 << hash->size_bits;
1247         int i;
1248
1249         if (!hash->count)
1250                 return;
1251
1252         for (i = 0; i < size; i++) {
1253                 hhd = &hash->buckets[i];
1254                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1255                         free_hash_entry(hash, entry);
1256         }
1257         FTRACE_WARN_ON(hash->count);
1258 }
1259
1260 static void free_ftrace_hash(struct ftrace_hash *hash)
1261 {
1262         if (!hash || hash == EMPTY_HASH)
1263                 return;
1264         ftrace_hash_clear(hash);
1265         kfree(hash->buckets);
1266         kfree(hash);
1267 }
1268
1269 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1270 {
1271         struct ftrace_hash *hash;
1272
1273         hash = container_of(rcu, struct ftrace_hash, rcu);
1274         free_ftrace_hash(hash);
1275 }
1276
1277 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1278 {
1279         if (!hash || hash == EMPTY_HASH)
1280                 return;
1281         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1282 }
1283
1284 void ftrace_free_filter(struct ftrace_ops *ops)
1285 {
1286         ftrace_ops_init(ops);
1287         free_ftrace_hash(ops->filter_hash);
1288         free_ftrace_hash(ops->notrace_hash);
1289 }
1290
1291 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1292 {
1293         struct ftrace_hash *hash;
1294         int size;
1295
1296         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1297         if (!hash)
1298                 return NULL;
1299
1300         size = 1 << size_bits;
1301         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1302
1303         if (!hash->buckets) {
1304                 kfree(hash);
1305                 return NULL;
1306         }
1307
1308         hash->size_bits = size_bits;
1309
1310         return hash;
1311 }
1312
1313 static struct ftrace_hash *
1314 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1315 {
1316         struct ftrace_func_entry *entry;
1317         struct ftrace_hash *new_hash;
1318         int size;
1319         int ret;
1320         int i;
1321
1322         new_hash = alloc_ftrace_hash(size_bits);
1323         if (!new_hash)
1324                 return NULL;
1325
1326         /* Empty hash? */
1327         if (ftrace_hash_empty(hash))
1328                 return new_hash;
1329
1330         size = 1 << hash->size_bits;
1331         for (i = 0; i < size; i++) {
1332                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1333                         ret = add_hash_entry(new_hash, entry->ip);
1334                         if (ret < 0)
1335                                 goto free_hash;
1336                 }
1337         }
1338
1339         FTRACE_WARN_ON(new_hash->count != hash->count);
1340
1341         return new_hash;
1342
1343  free_hash:
1344         free_ftrace_hash(new_hash);
1345         return NULL;
1346 }
1347
1348 static void
1349 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1350 static void
1351 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1352
1353 static int
1354 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1355                  struct ftrace_hash **dst, struct ftrace_hash *src)
1356 {
1357         struct ftrace_func_entry *entry;
1358         struct hlist_node *tn;
1359         struct hlist_head *hhd;
1360         struct ftrace_hash *old_hash;
1361         struct ftrace_hash *new_hash;
1362         int size = src->count;
1363         int bits = 0;
1364         int ret;
1365         int i;
1366
1367         /*
1368          * Remove the current set, update the hash and add
1369          * them back.
1370          */
1371         ftrace_hash_rec_disable(ops, enable);
1372
1373         /*
1374          * If the new source is empty, just free dst and assign it
1375          * the empty_hash.
1376          */
1377         if (!src->count) {
1378                 free_ftrace_hash_rcu(*dst);
1379                 rcu_assign_pointer(*dst, EMPTY_HASH);
1380                 /* still need to update the function records */
1381                 ret = 0;
1382                 goto out;
1383         }
1384
1385         /*
1386          * Make the hash size about 1/2 the # found
1387          */
1388         for (size /= 2; size; size >>= 1)
1389                 bits++;
1390
1391         /* Don't allocate too much */
1392         if (bits > FTRACE_HASH_MAX_BITS)
1393                 bits = FTRACE_HASH_MAX_BITS;
1394
1395         ret = -ENOMEM;
1396         new_hash = alloc_ftrace_hash(bits);
1397         if (!new_hash)
1398                 goto out;
1399
1400         size = 1 << src->size_bits;
1401         for (i = 0; i < size; i++) {
1402                 hhd = &src->buckets[i];
1403                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1404                         remove_hash_entry(src, entry);
1405                         __add_hash_entry(new_hash, entry);
1406                 }
1407         }
1408
1409         old_hash = *dst;
1410         rcu_assign_pointer(*dst, new_hash);
1411         free_ftrace_hash_rcu(old_hash);
1412
1413         ret = 0;
1414  out:
1415         /*
1416          * Enable regardless of ret:
1417          *  On success, we enable the new hash.
1418          *  On failure, we re-enable the original hash.
1419          */
1420         ftrace_hash_rec_enable(ops, enable);
1421
1422         return ret;
1423 }
1424
1425 /*
1426  * Test the hashes for this ops to see if we want to call
1427  * the ops->func or not.
1428  *
1429  * It's a match if the ip is in the ops->filter_hash or
1430  * the filter_hash does not exist or is empty,
1431  *  AND
1432  * the ip is not in the ops->notrace_hash.
1433  *
1434  * This needs to be called with preemption disabled as
1435  * the hashes are freed with call_rcu_sched().
1436  */
1437 static int
1438 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1439 {
1440         struct ftrace_hash *filter_hash;
1441         struct ftrace_hash *notrace_hash;
1442         int ret;
1443
1444         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1445         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1446
1447         if ((ftrace_hash_empty(filter_hash) ||
1448              ftrace_lookup_ip(filter_hash, ip)) &&
1449             (ftrace_hash_empty(notrace_hash) ||
1450              !ftrace_lookup_ip(notrace_hash, ip)))
1451                 ret = 1;
1452         else
1453                 ret = 0;
1454
1455         return ret;
1456 }
1457
1458 /*
1459  * This is a double for. Do not use 'break' to break out of the loop,
1460  * you must use a goto.
1461  */
1462 #define do_for_each_ftrace_rec(pg, rec)                                 \
1463         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1464                 int _____i;                                             \
1465                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1466                         rec = &pg->records[_____i];
1467
1468 #define while_for_each_ftrace_rec()             \
1469                 }                               \
1470         }
1471
1472
1473 static int ftrace_cmp_recs(const void *a, const void *b)
1474 {
1475         const struct dyn_ftrace *key = a;
1476         const struct dyn_ftrace *rec = b;
1477
1478         if (key->flags < rec->ip)
1479                 return -1;
1480         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1481                 return 1;
1482         return 0;
1483 }
1484
1485 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1486 {
1487         struct ftrace_page *pg;
1488         struct dyn_ftrace *rec;
1489         struct dyn_ftrace key;
1490
1491         key.ip = start;
1492         key.flags = end;        /* overload flags, as it is unsigned long */
1493
1494         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1495                 if (end < pg->records[0].ip ||
1496                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1497                         continue;
1498                 rec = bsearch(&key, pg->records, pg->index,
1499                               sizeof(struct dyn_ftrace),
1500                               ftrace_cmp_recs);
1501                 if (rec)
1502                         return rec->ip;
1503         }
1504
1505         return 0;
1506 }
1507
1508 /**
1509  * ftrace_location - return true if the ip giving is a traced location
1510  * @ip: the instruction pointer to check
1511  *
1512  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1513  * That is, the instruction that is either a NOP or call to
1514  * the function tracer. It checks the ftrace internal tables to
1515  * determine if the address belongs or not.
1516  */
1517 unsigned long ftrace_location(unsigned long ip)
1518 {
1519         return ftrace_location_range(ip, ip);
1520 }
1521
1522 /**
1523  * ftrace_text_reserved - return true if range contains an ftrace location
1524  * @start: start of range to search
1525  * @end: end of range to search (inclusive). @end points to the last byte to check.
1526  *
1527  * Returns 1 if @start and @end contains a ftrace location.
1528  * That is, the instruction that is either a NOP or call to
1529  * the function tracer. It checks the ftrace internal tables to
1530  * determine if the address belongs or not.
1531  */
1532 int ftrace_text_reserved(void *start, void *end)
1533 {
1534         unsigned long ret;
1535
1536         ret = ftrace_location_range((unsigned long)start,
1537                                     (unsigned long)end);
1538
1539         return (int)!!ret;
1540 }
1541
1542 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1543                                      int filter_hash,
1544                                      bool inc)
1545 {
1546         struct ftrace_hash *hash;
1547         struct ftrace_hash *other_hash;
1548         struct ftrace_page *pg;
1549         struct dyn_ftrace *rec;
1550         int count = 0;
1551         int all = 0;
1552
1553         /* Only update if the ops has been registered */
1554         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1555                 return;
1556
1557         /*
1558          * In the filter_hash case:
1559          *   If the count is zero, we update all records.
1560          *   Otherwise we just update the items in the hash.
1561          *
1562          * In the notrace_hash case:
1563          *   We enable the update in the hash.
1564          *   As disabling notrace means enabling the tracing,
1565          *   and enabling notrace means disabling, the inc variable
1566          *   gets inversed.
1567          */
1568         if (filter_hash) {
1569                 hash = ops->filter_hash;
1570                 other_hash = ops->notrace_hash;
1571                 if (ftrace_hash_empty(hash))
1572                         all = 1;
1573         } else {
1574                 inc = !inc;
1575                 hash = ops->notrace_hash;
1576                 other_hash = ops->filter_hash;
1577                 /*
1578                  * If the notrace hash has no items,
1579                  * then there's nothing to do.
1580                  */
1581                 if (ftrace_hash_empty(hash))
1582                         return;
1583         }
1584
1585         do_for_each_ftrace_rec(pg, rec) {
1586                 int in_other_hash = 0;
1587                 int in_hash = 0;
1588                 int match = 0;
1589
1590                 if (all) {
1591                         /*
1592                          * Only the filter_hash affects all records.
1593                          * Update if the record is not in the notrace hash.
1594                          */
1595                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1596                                 match = 1;
1597                 } else {
1598                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1599                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1600
1601                         /*
1602                          *
1603                          */
1604                         if (filter_hash && in_hash && !in_other_hash)
1605                                 match = 1;
1606                         else if (!filter_hash && in_hash &&
1607                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1608                                 match = 1;
1609                 }
1610                 if (!match)
1611                         continue;
1612
1613                 if (inc) {
1614                         rec->flags++;
1615                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1616                                 return;
1617                         /*
1618                          * If any ops wants regs saved for this function
1619                          * then all ops will get saved regs.
1620                          */
1621                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1622                                 rec->flags |= FTRACE_FL_REGS;
1623                 } else {
1624                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1625                                 return;
1626                         rec->flags--;
1627                 }
1628                 count++;
1629                 /* Shortcut, if we handled all records, we are done. */
1630                 if (!all && count == hash->count)
1631                         return;
1632         } while_for_each_ftrace_rec();
1633 }
1634
1635 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1636                                     int filter_hash)
1637 {
1638         __ftrace_hash_rec_update(ops, filter_hash, 0);
1639 }
1640
1641 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1642                                    int filter_hash)
1643 {
1644         __ftrace_hash_rec_update(ops, filter_hash, 1);
1645 }
1646
1647 static void print_ip_ins(const char *fmt, unsigned char *p)
1648 {
1649         int i;
1650
1651         printk(KERN_CONT "%s", fmt);
1652
1653         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1654                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1655 }
1656
1657 /**
1658  * ftrace_bug - report and shutdown function tracer
1659  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1660  * @ip: The address that failed
1661  *
1662  * The arch code that enables or disables the function tracing
1663  * can call ftrace_bug() when it has detected a problem in
1664  * modifying the code. @failed should be one of either:
1665  * EFAULT - if the problem happens on reading the @ip address
1666  * EINVAL - if what is read at @ip is not what was expected
1667  * EPERM - if the problem happens on writting to the @ip address
1668  */
1669 void ftrace_bug(int failed, unsigned long ip)
1670 {
1671         switch (failed) {
1672         case -EFAULT:
1673                 FTRACE_WARN_ON_ONCE(1);
1674                 pr_info("ftrace faulted on modifying ");
1675                 print_ip_sym(ip);
1676                 break;
1677         case -EINVAL:
1678                 FTRACE_WARN_ON_ONCE(1);
1679                 pr_info("ftrace failed to modify ");
1680                 print_ip_sym(ip);
1681                 print_ip_ins(" actual: ", (unsigned char *)ip);
1682                 printk(KERN_CONT "\n");
1683                 break;
1684         case -EPERM:
1685                 FTRACE_WARN_ON_ONCE(1);
1686                 pr_info("ftrace faulted on writing ");
1687                 print_ip_sym(ip);
1688                 break;
1689         default:
1690                 FTRACE_WARN_ON_ONCE(1);
1691                 pr_info("ftrace faulted on unknown error ");
1692                 print_ip_sym(ip);
1693         }
1694 }
1695
1696 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1697 {
1698         unsigned long flag = 0UL;
1699
1700         /*
1701          * If we are updating calls:
1702          *
1703          *   If the record has a ref count, then we need to enable it
1704          *   because someone is using it.
1705          *
1706          *   Otherwise we make sure its disabled.
1707          *
1708          * If we are disabling calls, then disable all records that
1709          * are enabled.
1710          */
1711         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1712                 flag = FTRACE_FL_ENABLED;
1713
1714         /*
1715          * If enabling and the REGS flag does not match the REGS_EN, then
1716          * do not ignore this record. Set flags to fail the compare against
1717          * ENABLED.
1718          */
1719         if (flag &&
1720             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1721                 flag |= FTRACE_FL_REGS;
1722
1723         /* If the state of this record hasn't changed, then do nothing */
1724         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1725                 return FTRACE_UPDATE_IGNORE;
1726
1727         if (flag) {
1728                 /* Save off if rec is being enabled (for return value) */
1729                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1730
1731                 if (update) {
1732                         rec->flags |= FTRACE_FL_ENABLED;
1733                         if (flag & FTRACE_FL_REGS) {
1734                                 if (rec->flags & FTRACE_FL_REGS)
1735                                         rec->flags |= FTRACE_FL_REGS_EN;
1736                                 else
1737                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1738                         }
1739                 }
1740
1741                 /*
1742                  * If this record is being updated from a nop, then
1743                  *   return UPDATE_MAKE_CALL.
1744                  * Otherwise, if the EN flag is set, then return
1745                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1746                  *   from the non-save regs, to a save regs function.
1747                  * Otherwise,
1748                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1749                  *   from the save regs, to a non-save regs function.
1750                  */
1751                 if (flag & FTRACE_FL_ENABLED)
1752                         return FTRACE_UPDATE_MAKE_CALL;
1753                 else if (rec->flags & FTRACE_FL_REGS_EN)
1754                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1755                 else
1756                         return FTRACE_UPDATE_MODIFY_CALL;
1757         }
1758
1759         if (update) {
1760                 /* If there's no more users, clear all flags */
1761                 if (!(rec->flags & ~FTRACE_FL_MASK))
1762                         rec->flags = 0;
1763                 else
1764                         /* Just disable the record (keep REGS state) */
1765                         rec->flags &= ~FTRACE_FL_ENABLED;
1766         }
1767
1768         return FTRACE_UPDATE_MAKE_NOP;
1769 }
1770
1771 /**
1772  * ftrace_update_record, set a record that now is tracing or not
1773  * @rec: the record to update
1774  * @enable: set to 1 if the record is tracing, zero to force disable
1775  *
1776  * The records that represent all functions that can be traced need
1777  * to be updated when tracing has been enabled.
1778  */
1779 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1780 {
1781         return ftrace_check_record(rec, enable, 1);
1782 }
1783
1784 /**
1785  * ftrace_test_record, check if the record has been enabled or not
1786  * @rec: the record to test
1787  * @enable: set to 1 to check if enabled, 0 if it is disabled
1788  *
1789  * The arch code may need to test if a record is already set to
1790  * tracing to determine how to modify the function code that it
1791  * represents.
1792  */
1793 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1794 {
1795         return ftrace_check_record(rec, enable, 0);
1796 }
1797
1798 static int
1799 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1800 {
1801         unsigned long ftrace_old_addr;
1802         unsigned long ftrace_addr;
1803         int ret;
1804
1805         ret = ftrace_update_record(rec, enable);
1806
1807         if (rec->flags & FTRACE_FL_REGS)
1808                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1809         else
1810                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1811
1812         switch (ret) {
1813         case FTRACE_UPDATE_IGNORE:
1814                 return 0;
1815
1816         case FTRACE_UPDATE_MAKE_CALL:
1817                 return ftrace_make_call(rec, ftrace_addr);
1818
1819         case FTRACE_UPDATE_MAKE_NOP:
1820                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1821
1822         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1823         case FTRACE_UPDATE_MODIFY_CALL:
1824                 if (rec->flags & FTRACE_FL_REGS)
1825                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1826                 else
1827                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1828
1829                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1830         }
1831
1832         return -1; /* unknow ftrace bug */
1833 }
1834
1835 void __weak ftrace_replace_code(int enable)
1836 {
1837         struct dyn_ftrace *rec;
1838         struct ftrace_page *pg;
1839         int failed;
1840
1841         if (unlikely(ftrace_disabled))
1842                 return;
1843
1844         do_for_each_ftrace_rec(pg, rec) {
1845                 failed = __ftrace_replace_code(rec, enable);
1846                 if (failed) {
1847                         ftrace_bug(failed, rec->ip);
1848                         /* Stop processing */
1849                         return;
1850                 }
1851         } while_for_each_ftrace_rec();
1852 }
1853
1854 struct ftrace_rec_iter {
1855         struct ftrace_page      *pg;
1856         int                     index;
1857 };
1858
1859 /**
1860  * ftrace_rec_iter_start, start up iterating over traced functions
1861  *
1862  * Returns an iterator handle that is used to iterate over all
1863  * the records that represent address locations where functions
1864  * are traced.
1865  *
1866  * May return NULL if no records are available.
1867  */
1868 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1869 {
1870         /*
1871          * We only use a single iterator.
1872          * Protected by the ftrace_lock mutex.
1873          */
1874         static struct ftrace_rec_iter ftrace_rec_iter;
1875         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1876
1877         iter->pg = ftrace_pages_start;
1878         iter->index = 0;
1879
1880         /* Could have empty pages */
1881         while (iter->pg && !iter->pg->index)
1882                 iter->pg = iter->pg->next;
1883
1884         if (!iter->pg)
1885                 return NULL;
1886
1887         return iter;
1888 }
1889
1890 /**
1891  * ftrace_rec_iter_next, get the next record to process.
1892  * @iter: The handle to the iterator.
1893  *
1894  * Returns the next iterator after the given iterator @iter.
1895  */
1896 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1897 {
1898         iter->index++;
1899
1900         if (iter->index >= iter->pg->index) {
1901                 iter->pg = iter->pg->next;
1902                 iter->index = 0;
1903
1904                 /* Could have empty pages */
1905                 while (iter->pg && !iter->pg->index)
1906                         iter->pg = iter->pg->next;
1907         }
1908
1909         if (!iter->pg)
1910                 return NULL;
1911
1912         return iter;
1913 }
1914
1915 /**
1916  * ftrace_rec_iter_record, get the record at the iterator location
1917  * @iter: The current iterator location
1918  *
1919  * Returns the record that the current @iter is at.
1920  */
1921 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1922 {
1923         return &iter->pg->records[iter->index];
1924 }
1925
1926 static int
1927 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1928 {
1929         unsigned long ip;
1930         int ret;
1931
1932         ip = rec->ip;
1933
1934         if (unlikely(ftrace_disabled))
1935                 return 0;
1936
1937         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1938         if (ret) {
1939                 ftrace_bug(ret, ip);
1940                 return 0;
1941         }
1942         return 1;
1943 }
1944
1945 /*
1946  * archs can override this function if they must do something
1947  * before the modifying code is performed.
1948  */
1949 int __weak ftrace_arch_code_modify_prepare(void)
1950 {
1951         return 0;
1952 }
1953
1954 /*
1955  * archs can override this function if they must do something
1956  * after the modifying code is performed.
1957  */
1958 int __weak ftrace_arch_code_modify_post_process(void)
1959 {
1960         return 0;
1961 }
1962
1963 void ftrace_modify_all_code(int command)
1964 {
1965         if (command & FTRACE_UPDATE_CALLS)
1966                 ftrace_replace_code(1);
1967         else if (command & FTRACE_DISABLE_CALLS)
1968                 ftrace_replace_code(0);
1969
1970         if (command & FTRACE_UPDATE_TRACE_FUNC)
1971                 ftrace_update_ftrace_func(ftrace_trace_function);
1972
1973         if (command & FTRACE_START_FUNC_RET)
1974                 ftrace_enable_ftrace_graph_caller();
1975         else if (command & FTRACE_STOP_FUNC_RET)
1976                 ftrace_disable_ftrace_graph_caller();
1977 }
1978
1979 static int __ftrace_modify_code(void *data)
1980 {
1981         int *command = data;
1982
1983         ftrace_modify_all_code(*command);
1984
1985         return 0;
1986 }
1987
1988 /**
1989  * ftrace_run_stop_machine, go back to the stop machine method
1990  * @command: The command to tell ftrace what to do
1991  *
1992  * If an arch needs to fall back to the stop machine method, the
1993  * it can call this function.
1994  */
1995 void ftrace_run_stop_machine(int command)
1996 {
1997         stop_machine(__ftrace_modify_code, &command, NULL);
1998 }
1999
2000 /**
2001  * arch_ftrace_update_code, modify the code to trace or not trace
2002  * @command: The command that needs to be done
2003  *
2004  * Archs can override this function if it does not need to
2005  * run stop_machine() to modify code.
2006  */
2007 void __weak arch_ftrace_update_code(int command)
2008 {
2009         ftrace_run_stop_machine(command);
2010 }
2011
2012 static void ftrace_run_update_code(int command)
2013 {
2014         int ret;
2015
2016         ret = ftrace_arch_code_modify_prepare();
2017         FTRACE_WARN_ON(ret);
2018         if (ret)
2019                 return;
2020         /*
2021          * Do not call function tracer while we update the code.
2022          * We are in stop machine.
2023          */
2024         function_trace_stop++;
2025
2026         /*
2027          * By default we use stop_machine() to modify the code.
2028          * But archs can do what ever they want as long as it
2029          * is safe. The stop_machine() is the safest, but also
2030          * produces the most overhead.
2031          */
2032         arch_ftrace_update_code(command);
2033
2034         function_trace_stop--;
2035
2036         ret = ftrace_arch_code_modify_post_process();
2037         FTRACE_WARN_ON(ret);
2038 }
2039
2040 static ftrace_func_t saved_ftrace_func;
2041 static int ftrace_start_up;
2042 static int global_start_up;
2043
2044 static void ftrace_startup_enable(int command)
2045 {
2046         if (saved_ftrace_func != ftrace_trace_function) {
2047                 saved_ftrace_func = ftrace_trace_function;
2048                 command |= FTRACE_UPDATE_TRACE_FUNC;
2049         }
2050
2051         if (!command || !ftrace_enabled)
2052                 return;
2053
2054         ftrace_run_update_code(command);
2055 }
2056
2057 static int ftrace_startup(struct ftrace_ops *ops, int command)
2058 {
2059         bool hash_enable = true;
2060
2061         if (unlikely(ftrace_disabled))
2062                 return -ENODEV;
2063
2064         ftrace_start_up++;
2065         command |= FTRACE_UPDATE_CALLS;
2066
2067         /* ops marked global share the filter hashes */
2068         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2069                 ops = &global_ops;
2070                 /* Don't update hash if global is already set */
2071                 if (global_start_up)
2072                         hash_enable = false;
2073                 global_start_up++;
2074         }
2075
2076         ops->flags |= FTRACE_OPS_FL_ENABLED;
2077         if (hash_enable)
2078                 ftrace_hash_rec_enable(ops, 1);
2079
2080         ftrace_startup_enable(command);
2081
2082         return 0;
2083 }
2084
2085 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2086 {
2087         bool hash_disable = true;
2088
2089         if (unlikely(ftrace_disabled))
2090                 return;
2091
2092         ftrace_start_up--;
2093         /*
2094          * Just warn in case of unbalance, no need to kill ftrace, it's not
2095          * critical but the ftrace_call callers may be never nopped again after
2096          * further ftrace uses.
2097          */
2098         WARN_ON_ONCE(ftrace_start_up < 0);
2099
2100         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2101                 ops = &global_ops;
2102                 global_start_up--;
2103                 WARN_ON_ONCE(global_start_up < 0);
2104                 /* Don't update hash if global still has users */
2105                 if (global_start_up) {
2106                         WARN_ON_ONCE(!ftrace_start_up);
2107                         hash_disable = false;
2108                 }
2109         }
2110
2111         if (hash_disable)
2112                 ftrace_hash_rec_disable(ops, 1);
2113
2114         if (ops != &global_ops || !global_start_up)
2115                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2116
2117         command |= FTRACE_UPDATE_CALLS;
2118
2119         if (saved_ftrace_func != ftrace_trace_function) {
2120                 saved_ftrace_func = ftrace_trace_function;
2121                 command |= FTRACE_UPDATE_TRACE_FUNC;
2122         }
2123
2124         if (!command || !ftrace_enabled)
2125                 return;
2126
2127         ftrace_run_update_code(command);
2128 }
2129
2130 static void ftrace_startup_sysctl(void)
2131 {
2132         if (unlikely(ftrace_disabled))
2133                 return;
2134
2135         /* Force update next time */
2136         saved_ftrace_func = NULL;
2137         /* ftrace_start_up is true if we want ftrace running */
2138         if (ftrace_start_up)
2139                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2140 }
2141
2142 static void ftrace_shutdown_sysctl(void)
2143 {
2144         if (unlikely(ftrace_disabled))
2145                 return;
2146
2147         /* ftrace_start_up is true if ftrace is running */
2148         if (ftrace_start_up)
2149                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2150 }
2151
2152 static cycle_t          ftrace_update_time;
2153 static unsigned long    ftrace_update_cnt;
2154 unsigned long           ftrace_update_tot_cnt;
2155
2156 static int ops_traces_mod(struct ftrace_ops *ops)
2157 {
2158         struct ftrace_hash *hash;
2159
2160         hash = ops->filter_hash;
2161         return ftrace_hash_empty(hash);
2162 }
2163
2164 static int ftrace_update_code(struct module *mod)
2165 {
2166         struct ftrace_page *pg;
2167         struct dyn_ftrace *p;
2168         cycle_t start, stop;
2169         unsigned long ref = 0;
2170         int i;
2171
2172         /*
2173          * When adding a module, we need to check if tracers are
2174          * currently enabled and if they are set to trace all functions.
2175          * If they are, we need to enable the module functions as well
2176          * as update the reference counts for those function records.
2177          */
2178         if (mod) {
2179                 struct ftrace_ops *ops;
2180
2181                 for (ops = ftrace_ops_list;
2182                      ops != &ftrace_list_end; ops = ops->next) {
2183                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2184                             ops_traces_mod(ops))
2185                                 ref++;
2186                 }
2187         }
2188
2189         start = ftrace_now(raw_smp_processor_id());
2190         ftrace_update_cnt = 0;
2191
2192         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2193
2194                 for (i = 0; i < pg->index; i++) {
2195                         /* If something went wrong, bail without enabling anything */
2196                         if (unlikely(ftrace_disabled))
2197                                 return -1;
2198
2199                         p = &pg->records[i];
2200                         p->flags = ref;
2201
2202                         /*
2203                          * Do the initial record conversion from mcount jump
2204                          * to the NOP instructions.
2205                          */
2206                         if (!ftrace_code_disable(mod, p))
2207                                 break;
2208
2209                         ftrace_update_cnt++;
2210
2211                         /*
2212                          * If the tracing is enabled, go ahead and enable the record.
2213                          *
2214                          * The reason not to enable the record immediatelly is the
2215                          * inherent check of ftrace_make_nop/ftrace_make_call for
2216                          * correct previous instructions.  Making first the NOP
2217                          * conversion puts the module to the correct state, thus
2218                          * passing the ftrace_make_call check.
2219                          */
2220                         if (ftrace_start_up && ref) {
2221                                 int failed = __ftrace_replace_code(p, 1);
2222                                 if (failed)
2223                                         ftrace_bug(failed, p->ip);
2224                         }
2225                 }
2226         }
2227
2228         ftrace_new_pgs = NULL;
2229
2230         stop = ftrace_now(raw_smp_processor_id());
2231         ftrace_update_time = stop - start;
2232         ftrace_update_tot_cnt += ftrace_update_cnt;
2233
2234         return 0;
2235 }
2236
2237 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2238 {
2239         int order;
2240         int cnt;
2241
2242         if (WARN_ON(!count))
2243                 return -EINVAL;
2244
2245         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2246
2247         /*
2248          * We want to fill as much as possible. No more than a page
2249          * may be empty.
2250          */
2251         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2252                 order--;
2253
2254  again:
2255         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2256
2257         if (!pg->records) {
2258                 /* if we can't allocate this size, try something smaller */
2259                 if (!order)
2260                         return -ENOMEM;
2261                 order >>= 1;
2262                 goto again;
2263         }
2264
2265         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2266         pg->size = cnt;
2267
2268         if (cnt > count)
2269                 cnt = count;
2270
2271         return cnt;
2272 }
2273
2274 static struct ftrace_page *
2275 ftrace_allocate_pages(unsigned long num_to_init)
2276 {
2277         struct ftrace_page *start_pg;
2278         struct ftrace_page *pg;
2279         int order;
2280         int cnt;
2281
2282         if (!num_to_init)
2283                 return 0;
2284
2285         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2286         if (!pg)
2287                 return NULL;
2288
2289         /*
2290          * Try to allocate as much as possible in one continues
2291          * location that fills in all of the space. We want to
2292          * waste as little space as possible.
2293          */
2294         for (;;) {
2295                 cnt = ftrace_allocate_records(pg, num_to_init);
2296                 if (cnt < 0)
2297                         goto free_pages;
2298
2299                 num_to_init -= cnt;
2300                 if (!num_to_init)
2301                         break;
2302
2303                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2304                 if (!pg->next)
2305                         goto free_pages;
2306
2307                 pg = pg->next;
2308         }
2309
2310         return start_pg;
2311
2312  free_pages:
2313         while (start_pg) {
2314                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2315                 free_pages((unsigned long)pg->records, order);
2316                 start_pg = pg->next;
2317                 kfree(pg);
2318                 pg = start_pg;
2319         }
2320         pr_info("ftrace: FAILED to allocate memory for functions\n");
2321         return NULL;
2322 }
2323
2324 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2325 {
2326         int cnt;
2327
2328         if (!num_to_init) {
2329                 pr_info("ftrace: No functions to be traced?\n");
2330                 return -1;
2331         }
2332
2333         cnt = num_to_init / ENTRIES_PER_PAGE;
2334         pr_info("ftrace: allocating %ld entries in %d pages\n",
2335                 num_to_init, cnt + 1);
2336
2337         return 0;
2338 }
2339
2340 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2341
2342 struct ftrace_iterator {
2343         loff_t                          pos;
2344         loff_t                          func_pos;
2345         struct ftrace_page              *pg;
2346         struct dyn_ftrace               *func;
2347         struct ftrace_func_probe        *probe;
2348         struct trace_parser             parser;
2349         struct ftrace_hash              *hash;
2350         struct ftrace_ops               *ops;
2351         int                             hidx;
2352         int                             idx;
2353         unsigned                        flags;
2354 };
2355
2356 static void *
2357 t_hash_next(struct seq_file *m, loff_t *pos)
2358 {
2359         struct ftrace_iterator *iter = m->private;
2360         struct hlist_node *hnd = NULL;
2361         struct hlist_head *hhd;
2362
2363         (*pos)++;
2364         iter->pos = *pos;
2365
2366         if (iter->probe)
2367                 hnd = &iter->probe->node;
2368  retry:
2369         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2370                 return NULL;
2371
2372         hhd = &ftrace_func_hash[iter->hidx];
2373
2374         if (hlist_empty(hhd)) {
2375                 iter->hidx++;
2376                 hnd = NULL;
2377                 goto retry;
2378         }
2379
2380         if (!hnd)
2381                 hnd = hhd->first;
2382         else {
2383                 hnd = hnd->next;
2384                 if (!hnd) {
2385                         iter->hidx++;
2386                         goto retry;
2387                 }
2388         }
2389
2390         if (WARN_ON_ONCE(!hnd))
2391                 return NULL;
2392
2393         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2394
2395         return iter;
2396 }
2397
2398 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2399 {
2400         struct ftrace_iterator *iter = m->private;
2401         void *p = NULL;
2402         loff_t l;
2403
2404         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2405                 return NULL;
2406
2407         if (iter->func_pos > *pos)
2408                 return NULL;
2409
2410         iter->hidx = 0;
2411         for (l = 0; l <= (*pos - iter->func_pos); ) {
2412                 p = t_hash_next(m, &l);
2413                 if (!p)
2414                         break;
2415         }
2416         if (!p)
2417                 return NULL;
2418
2419         /* Only set this if we have an item */
2420         iter->flags |= FTRACE_ITER_HASH;
2421
2422         return iter;
2423 }
2424
2425 static int
2426 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2427 {
2428         struct ftrace_func_probe *rec;
2429
2430         rec = iter->probe;
2431         if (WARN_ON_ONCE(!rec))
2432                 return -EIO;
2433
2434         if (rec->ops->print)
2435                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2436
2437         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2438
2439         if (rec->data)
2440                 seq_printf(m, ":%p", rec->data);
2441         seq_putc(m, '\n');
2442
2443         return 0;
2444 }
2445
2446 static void *
2447 t_next(struct seq_file *m, void *v, loff_t *pos)
2448 {
2449         struct ftrace_iterator *iter = m->private;
2450         struct ftrace_ops *ops = iter->ops;
2451         struct dyn_ftrace *rec = NULL;
2452
2453         if (unlikely(ftrace_disabled))
2454                 return NULL;
2455
2456         if (iter->flags & FTRACE_ITER_HASH)
2457                 return t_hash_next(m, pos);
2458
2459         (*pos)++;
2460         iter->pos = iter->func_pos = *pos;
2461
2462         if (iter->flags & FTRACE_ITER_PRINTALL)
2463                 return t_hash_start(m, pos);
2464
2465  retry:
2466         if (iter->idx >= iter->pg->index) {
2467                 if (iter->pg->next) {
2468                         iter->pg = iter->pg->next;
2469                         iter->idx = 0;
2470                         goto retry;
2471                 }
2472         } else {
2473                 rec = &iter->pg->records[iter->idx++];
2474                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2475                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2476
2477                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2478                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2479
2480                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2481                      !(rec->flags & FTRACE_FL_ENABLED))) {
2482
2483                         rec = NULL;
2484                         goto retry;
2485                 }
2486         }
2487
2488         if (!rec)
2489                 return t_hash_start(m, pos);
2490
2491         iter->func = rec;
2492
2493         return iter;
2494 }
2495
2496 static void reset_iter_read(struct ftrace_iterator *iter)
2497 {
2498         iter->pos = 0;
2499         iter->func_pos = 0;
2500         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2501 }
2502
2503 static void *t_start(struct seq_file *m, loff_t *pos)
2504 {
2505         struct ftrace_iterator *iter = m->private;
2506         struct ftrace_ops *ops = iter->ops;
2507         void *p = NULL;
2508         loff_t l;
2509
2510         mutex_lock(&ftrace_lock);
2511
2512         if (unlikely(ftrace_disabled))
2513                 return NULL;
2514
2515         /*
2516          * If an lseek was done, then reset and start from beginning.
2517          */
2518         if (*pos < iter->pos)
2519                 reset_iter_read(iter);
2520
2521         /*
2522          * For set_ftrace_filter reading, if we have the filter
2523          * off, we can short cut and just print out that all
2524          * functions are enabled.
2525          */
2526         if (iter->flags & FTRACE_ITER_FILTER &&
2527             ftrace_hash_empty(ops->filter_hash)) {
2528                 if (*pos > 0)
2529                         return t_hash_start(m, pos);
2530                 iter->flags |= FTRACE_ITER_PRINTALL;
2531                 /* reset in case of seek/pread */
2532                 iter->flags &= ~FTRACE_ITER_HASH;
2533                 return iter;
2534         }
2535
2536         if (iter->flags & FTRACE_ITER_HASH)
2537                 return t_hash_start(m, pos);
2538
2539         /*
2540          * Unfortunately, we need to restart at ftrace_pages_start
2541          * every time we let go of the ftrace_mutex. This is because
2542          * those pointers can change without the lock.
2543          */
2544         iter->pg = ftrace_pages_start;
2545         iter->idx = 0;
2546         for (l = 0; l <= *pos; ) {
2547                 p = t_next(m, p, &l);
2548                 if (!p)
2549                         break;
2550         }
2551
2552         if (!p)
2553                 return t_hash_start(m, pos);
2554
2555         return iter;
2556 }
2557
2558 static void t_stop(struct seq_file *m, void *p)
2559 {
2560         mutex_unlock(&ftrace_lock);
2561 }
2562
2563 static int t_show(struct seq_file *m, void *v)
2564 {
2565         struct ftrace_iterator *iter = m->private;
2566         struct dyn_ftrace *rec;
2567
2568         if (iter->flags & FTRACE_ITER_HASH)
2569                 return t_hash_show(m, iter);
2570
2571         if (iter->flags & FTRACE_ITER_PRINTALL) {
2572                 seq_printf(m, "#### all functions enabled ####\n");
2573                 return 0;
2574         }
2575
2576         rec = iter->func;
2577
2578         if (!rec)
2579                 return 0;
2580
2581         seq_printf(m, "%ps", (void *)rec->ip);
2582         if (iter->flags & FTRACE_ITER_ENABLED)
2583                 seq_printf(m, " (%ld)%s",
2584                            rec->flags & ~FTRACE_FL_MASK,
2585                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2586         seq_printf(m, "\n");
2587
2588         return 0;
2589 }
2590
2591 static const struct seq_operations show_ftrace_seq_ops = {
2592         .start = t_start,
2593         .next = t_next,
2594         .stop = t_stop,
2595         .show = t_show,
2596 };
2597
2598 static int
2599 ftrace_avail_open(struct inode *inode, struct file *file)
2600 {
2601         struct ftrace_iterator *iter;
2602
2603         if (unlikely(ftrace_disabled))
2604                 return -ENODEV;
2605
2606         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2607         if (iter) {
2608                 iter->pg = ftrace_pages_start;
2609                 iter->ops = &global_ops;
2610         }
2611
2612         return iter ? 0 : -ENOMEM;
2613 }
2614
2615 static int
2616 ftrace_enabled_open(struct inode *inode, struct file *file)
2617 {
2618         struct ftrace_iterator *iter;
2619
2620         if (unlikely(ftrace_disabled))
2621                 return -ENODEV;
2622
2623         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2624         if (iter) {
2625                 iter->pg = ftrace_pages_start;
2626                 iter->flags = FTRACE_ITER_ENABLED;
2627                 iter->ops = &global_ops;
2628         }
2629
2630         return iter ? 0 : -ENOMEM;
2631 }
2632
2633 static void ftrace_filter_reset(struct ftrace_hash *hash)
2634 {
2635         mutex_lock(&ftrace_lock);
2636         ftrace_hash_clear(hash);
2637         mutex_unlock(&ftrace_lock);
2638 }
2639
2640 /**
2641  * ftrace_regex_open - initialize function tracer filter files
2642  * @ops: The ftrace_ops that hold the hash filters
2643  * @flag: The type of filter to process
2644  * @inode: The inode, usually passed in to your open routine
2645  * @file: The file, usually passed in to your open routine
2646  *
2647  * ftrace_regex_open() initializes the filter files for the
2648  * @ops. Depending on @flag it may process the filter hash or
2649  * the notrace hash of @ops. With this called from the open
2650  * routine, you can use ftrace_filter_write() for the write
2651  * routine if @flag has FTRACE_ITER_FILTER set, or
2652  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2653  * ftrace_filter_lseek() should be used as the lseek routine, and
2654  * release must call ftrace_regex_release().
2655  */
2656 int
2657 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2658                   struct inode *inode, struct file *file)
2659 {
2660         struct ftrace_iterator *iter;
2661         struct ftrace_hash *hash;
2662         int ret = 0;
2663
2664         ftrace_ops_init(ops);
2665
2666         if (unlikely(ftrace_disabled))
2667                 return -ENODEV;
2668
2669         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2670         if (!iter)
2671                 return -ENOMEM;
2672
2673         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2674                 kfree(iter);
2675                 return -ENOMEM;
2676         }
2677
2678         iter->ops = ops;
2679         iter->flags = flag;
2680
2681         mutex_lock(&ops->regex_lock);
2682
2683         if (flag & FTRACE_ITER_NOTRACE)
2684                 hash = ops->notrace_hash;
2685         else
2686                 hash = ops->filter_hash;
2687
2688         if (file->f_mode & FMODE_WRITE) {
2689                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2690                 if (!iter->hash) {
2691                         trace_parser_put(&iter->parser);
2692                         kfree(iter);
2693                         ret = -ENOMEM;
2694                         goto out_unlock;
2695                 }
2696         }
2697
2698         if ((file->f_mode & FMODE_WRITE) &&
2699             (file->f_flags & O_TRUNC))
2700                 ftrace_filter_reset(iter->hash);
2701
2702         if (file->f_mode & FMODE_READ) {
2703                 iter->pg = ftrace_pages_start;
2704
2705                 ret = seq_open(file, &show_ftrace_seq_ops);
2706                 if (!ret) {
2707                         struct seq_file *m = file->private_data;
2708                         m->private = iter;
2709                 } else {
2710                         /* Failed */
2711                         free_ftrace_hash(iter->hash);
2712                         trace_parser_put(&iter->parser);
2713                         kfree(iter);
2714                 }
2715         } else
2716                 file->private_data = iter;
2717
2718  out_unlock:
2719         mutex_unlock(&ops->regex_lock);
2720
2721         return ret;
2722 }
2723
2724 static int
2725 ftrace_filter_open(struct inode *inode, struct file *file)
2726 {
2727         return ftrace_regex_open(&global_ops,
2728                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2729                         inode, file);
2730 }
2731
2732 static int
2733 ftrace_notrace_open(struct inode *inode, struct file *file)
2734 {
2735         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2736                                  inode, file);
2737 }
2738
2739 static int ftrace_match(char *str, char *regex, int len, int type)
2740 {
2741         int matched = 0;
2742         int slen;
2743
2744         switch (type) {
2745         case MATCH_FULL:
2746                 if (strcmp(str, regex) == 0)
2747                         matched = 1;
2748                 break;
2749         case MATCH_FRONT_ONLY:
2750                 if (strncmp(str, regex, len) == 0)
2751                         matched = 1;
2752                 break;
2753         case MATCH_MIDDLE_ONLY:
2754                 if (strstr(str, regex))
2755                         matched = 1;
2756                 break;
2757         case MATCH_END_ONLY:
2758                 slen = strlen(str);
2759                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2760                         matched = 1;
2761                 break;
2762         }
2763
2764         return matched;
2765 }
2766
2767 static int
2768 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2769 {
2770         struct ftrace_func_entry *entry;
2771         int ret = 0;
2772
2773         entry = ftrace_lookup_ip(hash, rec->ip);
2774         if (not) {
2775                 /* Do nothing if it doesn't exist */
2776                 if (!entry)
2777                         return 0;
2778
2779                 free_hash_entry(hash, entry);
2780         } else {
2781                 /* Do nothing if it exists */
2782                 if (entry)
2783                         return 0;
2784
2785                 ret = add_hash_entry(hash, rec->ip);
2786         }
2787         return ret;
2788 }
2789
2790 static int
2791 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2792                     char *regex, int len, int type)
2793 {
2794         char str[KSYM_SYMBOL_LEN];
2795         char *modname;
2796
2797         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2798
2799         if (mod) {
2800                 /* module lookup requires matching the module */
2801                 if (!modname || strcmp(modname, mod))
2802                         return 0;
2803
2804                 /* blank search means to match all funcs in the mod */
2805                 if (!len)
2806                         return 1;
2807         }
2808
2809         return ftrace_match(str, regex, len, type);
2810 }
2811
2812 static int
2813 match_records(struct ftrace_hash *hash, char *buff,
2814               int len, char *mod, int not)
2815 {
2816         unsigned search_len = 0;
2817         struct ftrace_page *pg;
2818         struct dyn_ftrace *rec;
2819         int type = MATCH_FULL;
2820         char *search = buff;
2821         int found = 0;
2822         int ret;
2823
2824         if (len) {
2825                 type = filter_parse_regex(buff, len, &search, &not);
2826                 search_len = strlen(search);
2827         }
2828
2829         mutex_lock(&ftrace_lock);
2830
2831         if (unlikely(ftrace_disabled))
2832                 goto out_unlock;
2833
2834         do_for_each_ftrace_rec(pg, rec) {
2835                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2836                         ret = enter_record(hash, rec, not);
2837                         if (ret < 0) {
2838                                 found = ret;
2839                                 goto out_unlock;
2840                         }
2841                         found = 1;
2842                 }
2843         } while_for_each_ftrace_rec();
2844  out_unlock:
2845         mutex_unlock(&ftrace_lock);
2846
2847         return found;
2848 }
2849
2850 static int
2851 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2852 {
2853         return match_records(hash, buff, len, NULL, 0);
2854 }
2855
2856 static int
2857 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2858 {
2859         int not = 0;
2860
2861         /* blank or '*' mean the same */
2862         if (strcmp(buff, "*") == 0)
2863                 buff[0] = 0;
2864
2865         /* handle the case of 'dont filter this module' */
2866         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2867                 buff[0] = 0;
2868                 not = 1;
2869         }
2870
2871         return match_records(hash, buff, strlen(buff), mod, not);
2872 }
2873
2874 /*
2875  * We register the module command as a template to show others how
2876  * to register the a command as well.
2877  */
2878
2879 static int
2880 ftrace_mod_callback(struct ftrace_hash *hash,
2881                     char *func, char *cmd, char *param, int enable)
2882 {
2883         char *mod;
2884         int ret = -EINVAL;
2885
2886         /*
2887          * cmd == 'mod' because we only registered this func
2888          * for the 'mod' ftrace_func_command.
2889          * But if you register one func with multiple commands,
2890          * you can tell which command was used by the cmd
2891          * parameter.
2892          */
2893
2894         /* we must have a module name */
2895         if (!param)
2896                 return ret;
2897
2898         mod = strsep(&param, ":");
2899         if (!strlen(mod))
2900                 return ret;
2901
2902         ret = ftrace_match_module_records(hash, func, mod);
2903         if (!ret)
2904                 ret = -EINVAL;
2905         if (ret < 0)
2906                 return ret;
2907
2908         return 0;
2909 }
2910
2911 static struct ftrace_func_command ftrace_mod_cmd = {
2912         .name                   = "mod",
2913         .func                   = ftrace_mod_callback,
2914 };
2915
2916 static int __init ftrace_mod_cmd_init(void)
2917 {
2918         return register_ftrace_command(&ftrace_mod_cmd);
2919 }
2920 core_initcall(ftrace_mod_cmd_init);
2921
2922 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2923                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2924 {
2925         struct ftrace_func_probe *entry;
2926         struct hlist_head *hhd;
2927         unsigned long key;
2928
2929         key = hash_long(ip, FTRACE_HASH_BITS);
2930
2931         hhd = &ftrace_func_hash[key];
2932
2933         if (hlist_empty(hhd))
2934                 return;
2935
2936         /*
2937          * Disable preemption for these calls to prevent a RCU grace
2938          * period. This syncs the hash iteration and freeing of items
2939          * on the hash. rcu_read_lock is too dangerous here.
2940          */
2941         preempt_disable_notrace();
2942         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2943                 if (entry->ip == ip)
2944                         entry->ops->func(ip, parent_ip, &entry->data);
2945         }
2946         preempt_enable_notrace();
2947 }
2948
2949 static struct ftrace_ops trace_probe_ops __read_mostly =
2950 {
2951         .func           = function_trace_probe_call,
2952         .flags          = FTRACE_OPS_FL_INITIALIZED,
2953         INIT_REGEX_LOCK(trace_probe_ops)
2954 };
2955
2956 static int ftrace_probe_registered;
2957
2958 static void __enable_ftrace_function_probe(void)
2959 {
2960         int ret;
2961         int i;
2962
2963         if (ftrace_probe_registered) {
2964                 /* still need to update the function call sites */
2965                 if (ftrace_enabled)
2966                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2967                 return;
2968         }
2969
2970         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2971                 struct hlist_head *hhd = &ftrace_func_hash[i];
2972                 if (hhd->first)
2973                         break;
2974         }
2975         /* Nothing registered? */
2976         if (i == FTRACE_FUNC_HASHSIZE)
2977                 return;
2978
2979         ret = __register_ftrace_function(&trace_probe_ops);
2980         if (!ret)
2981                 ret = ftrace_startup(&trace_probe_ops, 0);
2982
2983         ftrace_probe_registered = 1;
2984 }
2985
2986 static void __disable_ftrace_function_probe(void)
2987 {
2988         int ret;
2989         int i;
2990
2991         if (!ftrace_probe_registered)
2992                 return;
2993
2994         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2995                 struct hlist_head *hhd = &ftrace_func_hash[i];
2996                 if (hhd->first)
2997                         return;
2998         }
2999
3000         /* no more funcs left */
3001         ret = __unregister_ftrace_function(&trace_probe_ops);
3002         if (!ret)
3003                 ftrace_shutdown(&trace_probe_ops, 0);
3004
3005         ftrace_probe_registered = 0;
3006 }
3007
3008
3009 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3010 {
3011         if (entry->ops->free)
3012                 entry->ops->free(entry->ops, entry->ip, &entry->data);
3013         kfree(entry);
3014 }
3015
3016 int
3017 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3018                               void *data)
3019 {
3020         struct ftrace_func_probe *entry;
3021         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3022         struct ftrace_hash *hash;
3023         struct ftrace_page *pg;
3024         struct dyn_ftrace *rec;
3025         int type, len, not;
3026         unsigned long key;
3027         int count = 0;
3028         char *search;
3029         int ret;
3030
3031         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3032         len = strlen(search);
3033
3034         /* we do not support '!' for function probes */
3035         if (WARN_ON(not))
3036                 return -EINVAL;
3037
3038         mutex_lock(&trace_probe_ops.regex_lock);
3039
3040         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3041         if (!hash) {
3042                 count = -ENOMEM;
3043                 goto out;
3044         }
3045
3046         if (unlikely(ftrace_disabled)) {
3047                 count = -ENODEV;
3048                 goto out;
3049         }
3050
3051         mutex_lock(&ftrace_lock);
3052
3053         do_for_each_ftrace_rec(pg, rec) {
3054
3055                 if (!ftrace_match_record(rec, NULL, search, len, type))
3056                         continue;
3057
3058                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3059                 if (!entry) {
3060                         /* If we did not process any, then return error */
3061                         if (!count)
3062                                 count = -ENOMEM;
3063                         goto out_unlock;
3064                 }
3065
3066                 count++;
3067
3068                 entry->data = data;
3069
3070                 /*
3071                  * The caller might want to do something special
3072                  * for each function we find. We call the callback
3073                  * to give the caller an opportunity to do so.
3074                  */
3075                 if (ops->init) {
3076                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3077                                 /* caller does not like this func */
3078                                 kfree(entry);
3079                                 continue;
3080                         }
3081                 }
3082
3083                 ret = enter_record(hash, rec, 0);
3084                 if (ret < 0) {
3085                         kfree(entry);
3086                         count = ret;
3087                         goto out_unlock;
3088                 }
3089
3090                 entry->ops = ops;
3091                 entry->ip = rec->ip;
3092
3093                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3094                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3095
3096         } while_for_each_ftrace_rec();
3097
3098         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3099         if (ret < 0)
3100                 count = ret;
3101
3102         __enable_ftrace_function_probe();
3103
3104  out_unlock:
3105         mutex_unlock(&ftrace_lock);
3106  out:
3107         mutex_unlock(&trace_probe_ops.regex_lock);
3108         free_ftrace_hash(hash);
3109
3110         return count;
3111 }
3112
3113 enum {
3114         PROBE_TEST_FUNC         = 1,
3115         PROBE_TEST_DATA         = 2
3116 };
3117
3118 static void
3119 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3120                                   void *data, int flags)
3121 {
3122         struct ftrace_func_entry *rec_entry;
3123         struct ftrace_func_probe *entry;
3124         struct ftrace_func_probe *p;
3125         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3126         struct list_head free_list;
3127         struct ftrace_hash *hash;
3128         struct hlist_node *tmp;
3129         char str[KSYM_SYMBOL_LEN];
3130         int type = MATCH_FULL;
3131         int i, len = 0;
3132         char *search;
3133
3134         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3135                 glob = NULL;
3136         else if (glob) {
3137                 int not;
3138
3139                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3140                 len = strlen(search);
3141
3142                 /* we do not support '!' for function probes */
3143                 if (WARN_ON(not))
3144                         return;
3145         }
3146
3147         mutex_lock(&trace_probe_ops.regex_lock);
3148
3149         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3150         if (!hash)
3151                 /* Hmm, should report this somehow */
3152                 goto out_unlock;
3153
3154         INIT_LIST_HEAD(&free_list);
3155
3156         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3157                 struct hlist_head *hhd = &ftrace_func_hash[i];
3158
3159                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3160
3161                         /* break up if statements for readability */
3162                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3163                                 continue;
3164
3165                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3166                                 continue;
3167
3168                         /* do this last, since it is the most expensive */
3169                         if (glob) {
3170                                 kallsyms_lookup(entry->ip, NULL, NULL,
3171                                                 NULL, str);
3172                                 if (!ftrace_match(str, glob, len, type))
3173                                         continue;
3174                         }
3175
3176                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3177                         /* It is possible more than one entry had this ip */
3178                         if (rec_entry)
3179                                 free_hash_entry(hash, rec_entry);
3180
3181                         hlist_del_rcu(&entry->node);
3182                         list_add(&entry->free_list, &free_list);
3183                 }
3184         }
3185         mutex_lock(&ftrace_lock);
3186         __disable_ftrace_function_probe();
3187         /*
3188          * Remove after the disable is called. Otherwise, if the last
3189          * probe is removed, a null hash means *all enabled*.
3190          */
3191         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3192         synchronize_sched();
3193         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3194                 list_del(&entry->free_list);
3195                 ftrace_free_entry(entry);
3196         }
3197         mutex_unlock(&ftrace_lock);
3198                 
3199  out_unlock:
3200         mutex_unlock(&trace_probe_ops.regex_lock);
3201         free_ftrace_hash(hash);
3202 }
3203
3204 void
3205 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3206                                 void *data)
3207 {
3208         __unregister_ftrace_function_probe(glob, ops, data,
3209                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3210 }
3211
3212 void
3213 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3214 {
3215         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3216 }
3217
3218 void unregister_ftrace_function_probe_all(char *glob)
3219 {
3220         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3221 }
3222
3223 static LIST_HEAD(ftrace_commands);
3224 static DEFINE_MUTEX(ftrace_cmd_mutex);
3225
3226 int register_ftrace_command(struct ftrace_func_command *cmd)
3227 {
3228         struct ftrace_func_command *p;
3229         int ret = 0;
3230
3231         mutex_lock(&ftrace_cmd_mutex);
3232         list_for_each_entry(p, &ftrace_commands, list) {
3233                 if (strcmp(cmd->name, p->name) == 0) {
3234                         ret = -EBUSY;
3235                         goto out_unlock;
3236                 }
3237         }
3238         list_add(&cmd->list, &ftrace_commands);
3239  out_unlock:
3240         mutex_unlock(&ftrace_cmd_mutex);
3241
3242         return ret;
3243 }
3244
3245 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3246 {
3247         struct ftrace_func_command *p, *n;
3248         int ret = -ENODEV;
3249
3250         mutex_lock(&ftrace_cmd_mutex);
3251         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3252                 if (strcmp(cmd->name, p->name) == 0) {
3253                         ret = 0;
3254                         list_del_init(&p->list);
3255                         goto out_unlock;
3256                 }
3257         }
3258  out_unlock:
3259         mutex_unlock(&ftrace_cmd_mutex);
3260
3261         return ret;
3262 }
3263
3264 static int ftrace_process_regex(struct ftrace_hash *hash,
3265                                 char *buff, int len, int enable)
3266 {
3267         char *func, *command, *next = buff;
3268         struct ftrace_func_command *p;
3269         int ret = -EINVAL;
3270
3271         func = strsep(&next, ":");
3272
3273         if (!next) {
3274                 ret = ftrace_match_records(hash, func, len);
3275                 if (!ret)
3276                         ret = -EINVAL;
3277                 if (ret < 0)
3278                         return ret;
3279                 return 0;
3280         }
3281
3282         /* command found */
3283
3284         command = strsep(&next, ":");
3285
3286         mutex_lock(&ftrace_cmd_mutex);
3287         list_for_each_entry(p, &ftrace_commands, list) {
3288                 if (strcmp(p->name, command) == 0) {
3289                         ret = p->func(hash, func, command, next, enable);
3290                         goto out_unlock;
3291                 }
3292         }
3293  out_unlock:
3294         mutex_unlock(&ftrace_cmd_mutex);
3295
3296         return ret;
3297 }
3298
3299 static ssize_t
3300 ftrace_regex_write(struct file *file, const char __user *ubuf,
3301                    size_t cnt, loff_t *ppos, int enable)
3302 {
3303         struct ftrace_iterator *iter;
3304         struct trace_parser *parser;
3305         ssize_t ret, read;
3306
3307         if (!cnt)
3308                 return 0;
3309
3310         if (file->f_mode & FMODE_READ) {
3311                 struct seq_file *m = file->private_data;
3312                 iter = m->private;
3313         } else
3314                 iter = file->private_data;
3315
3316         if (unlikely(ftrace_disabled))
3317                 return -ENODEV;
3318
3319         /* iter->hash is a local copy, so we don't need regex_lock */
3320
3321         parser = &iter->parser;
3322         read = trace_get_user(parser, ubuf, cnt, ppos);
3323
3324         if (read >= 0 && trace_parser_loaded(parser) &&
3325             !trace_parser_cont(parser)) {
3326                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3327                                            parser->idx, enable);
3328                 trace_parser_clear(parser);
3329                 if (ret < 0)
3330                         goto out;
3331         }
3332
3333         ret = read;
3334  out:
3335         return ret;
3336 }
3337
3338 ssize_t
3339 ftrace_filter_write(struct file *file, const char __user *ubuf,
3340                     size_t cnt, loff_t *ppos)
3341 {
3342         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3343 }
3344
3345 ssize_t
3346 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3347                      size_t cnt, loff_t *ppos)
3348 {
3349         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3350 }
3351
3352 static int
3353 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3354 {
3355         struct ftrace_func_entry *entry;
3356
3357         if (!ftrace_location(ip))
3358                 return -EINVAL;
3359
3360         if (remove) {
3361                 entry = ftrace_lookup_ip(hash, ip);
3362                 if (!entry)
3363                         return -ENOENT;
3364                 free_hash_entry(hash, entry);
3365                 return 0;
3366         }
3367
3368         return add_hash_entry(hash, ip);
3369 }
3370
3371 static int
3372 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3373                 unsigned long ip, int remove, int reset, int enable)
3374 {
3375         struct ftrace_hash **orig_hash;
3376         struct ftrace_hash *hash;
3377         int ret;
3378
3379         /* All global ops uses the global ops filters */
3380         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3381                 ops = &global_ops;
3382
3383         if (unlikely(ftrace_disabled))
3384                 return -ENODEV;
3385
3386         mutex_lock(&ops->regex_lock);
3387
3388         if (enable)
3389                 orig_hash = &ops->filter_hash;
3390         else
3391                 orig_hash = &ops->notrace_hash;
3392
3393         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3394         if (!hash) {
3395                 ret = -ENOMEM;
3396                 goto out_regex_unlock;
3397         }
3398
3399         if (reset)
3400                 ftrace_filter_reset(hash);
3401         if (buf && !ftrace_match_records(hash, buf, len)) {
3402                 ret = -EINVAL;
3403                 goto out_regex_unlock;
3404         }
3405         if (ip) {
3406                 ret = ftrace_match_addr(hash, ip, remove);
3407                 if (ret < 0)
3408                         goto out_regex_unlock;
3409         }
3410
3411         mutex_lock(&ftrace_lock);
3412         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3413         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3414             && ftrace_enabled)
3415                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3416
3417         mutex_unlock(&ftrace_lock);
3418
3419  out_regex_unlock:
3420         mutex_unlock(&ops->regex_lock);
3421
3422         free_ftrace_hash(hash);
3423         return ret;
3424 }
3425
3426 static int
3427 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3428                 int reset, int enable)
3429 {
3430         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3431 }
3432
3433 /**
3434  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3435  * @ops - the ops to set the filter with
3436  * @ip - the address to add to or remove from the filter.
3437  * @remove - non zero to remove the ip from the filter
3438  * @reset - non zero to reset all filters before applying this filter.
3439  *
3440  * Filters denote which functions should be enabled when tracing is enabled
3441  * If @ip is NULL, it failes to update filter.
3442  */
3443 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3444                          int remove, int reset)
3445 {
3446         ftrace_ops_init(ops);
3447         return ftrace_set_addr(ops, ip, remove, reset, 1);
3448 }
3449 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3450
3451 static int
3452 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3453                  int reset, int enable)
3454 {
3455         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3456 }
3457
3458 /**
3459  * ftrace_set_filter - set a function to filter on in ftrace
3460  * @ops - the ops to set the filter with
3461  * @buf - the string that holds the function filter text.
3462  * @len - the length of the string.
3463  * @reset - non zero to reset all filters before applying this filter.
3464  *
3465  * Filters denote which functions should be enabled when tracing is enabled.
3466  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3467  */
3468 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3469                        int len, int reset)
3470 {
3471         ftrace_ops_init(ops);
3472         return ftrace_set_regex(ops, buf, len, reset, 1);
3473 }
3474 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3475
3476 /**
3477  * ftrace_set_notrace - set a function to not trace in ftrace
3478  * @ops - the ops to set the notrace filter with
3479  * @buf - the string that holds the function notrace text.
3480  * @len - the length of the string.
3481  * @reset - non zero to reset all filters before applying this filter.
3482  *
3483  * Notrace Filters denote which functions should not be enabled when tracing
3484  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3485  * for tracing.
3486  */
3487 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3488                         int len, int reset)
3489 {
3490         ftrace_ops_init(ops);
3491         return ftrace_set_regex(ops, buf, len, reset, 0);
3492 }
3493 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3494 /**
3495  * ftrace_set_filter - set a function to filter on in ftrace
3496  * @ops - the ops to set the filter with
3497  * @buf - the string that holds the function filter text.
3498  * @len - the length of the string.
3499  * @reset - non zero to reset all filters before applying this filter.
3500  *
3501  * Filters denote which functions should be enabled when tracing is enabled.
3502  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3503  */
3504 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3505 {
3506         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3507 }
3508 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3509
3510 /**
3511  * ftrace_set_notrace - set a function to not trace in ftrace
3512  * @ops - the ops to set the notrace filter with
3513  * @buf - the string that holds the function notrace text.
3514  * @len - the length of the string.
3515  * @reset - non zero to reset all filters before applying this filter.
3516  *
3517  * Notrace Filters denote which functions should not be enabled when tracing
3518  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3519  * for tracing.
3520  */
3521 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3522 {
3523         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3524 }
3525 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3526
3527 /*
3528  * command line interface to allow users to set filters on boot up.
3529  */
3530 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3531 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3532 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3533
3534 static int __init set_ftrace_notrace(char *str)
3535 {
3536         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3537         return 1;
3538 }
3539 __setup("ftrace_notrace=", set_ftrace_notrace);
3540
3541 static int __init set_ftrace_filter(char *str)
3542 {
3543         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3544         return 1;
3545 }
3546 __setup("ftrace_filter=", set_ftrace_filter);
3547
3548 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3549 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3550 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3551
3552 static int __init set_graph_function(char *str)
3553 {
3554         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3555         return 1;
3556 }
3557 __setup("ftrace_graph_filter=", set_graph_function);
3558
3559 static void __init set_ftrace_early_graph(char *buf)
3560 {
3561         int ret;
3562         char *func;
3563
3564         while (buf) {
3565                 func = strsep(&buf, ",");
3566                 /* we allow only one expression at a time */
3567                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3568                                       func);
3569                 if (ret)
3570                         printk(KERN_DEBUG "ftrace: function %s not "
3571                                           "traceable\n", func);
3572         }
3573 }
3574 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3575
3576 void __init
3577 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3578 {
3579         char *func;
3580
3581         ftrace_ops_init(ops);
3582
3583         while (buf) {
3584                 func = strsep(&buf, ",");
3585                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3586         }
3587 }
3588
3589 static void __init set_ftrace_early_filters(void)
3590 {
3591         if (ftrace_filter_buf[0])
3592                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3593         if (ftrace_notrace_buf[0])
3594                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3595 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3596         if (ftrace_graph_buf[0])
3597                 set_ftrace_early_graph(ftrace_graph_buf);
3598 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3599 }
3600
3601 int ftrace_regex_release(struct inode *inode, struct file *file)
3602 {
3603         struct seq_file *m = (struct seq_file *)file->private_data;
3604         struct ftrace_iterator *iter;
3605         struct ftrace_hash **orig_hash;
3606         struct trace_parser *parser;
3607         int filter_hash;
3608         int ret;
3609
3610         if (file->f_mode & FMODE_READ) {
3611                 iter = m->private;
3612                 seq_release(inode, file);
3613         } else
3614                 iter = file->private_data;
3615
3616         parser = &iter->parser;
3617         if (trace_parser_loaded(parser)) {
3618                 parser->buffer[parser->idx] = 0;
3619                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3620         }
3621
3622         trace_parser_put(parser);
3623
3624         mutex_lock(&iter->ops->regex_lock);
3625
3626         if (file->f_mode & FMODE_WRITE) {
3627                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3628
3629                 if (filter_hash)
3630                         orig_hash = &iter->ops->filter_hash;
3631                 else
3632                         orig_hash = &iter->ops->notrace_hash;
3633
3634                 mutex_lock(&ftrace_lock);
3635                 ret = ftrace_hash_move(iter->ops, filter_hash,
3636                                        orig_hash, iter->hash);
3637                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3638                     && ftrace_enabled)
3639                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3640
3641                 mutex_unlock(&ftrace_lock);
3642         }
3643
3644         mutex_unlock(&iter->ops->regex_lock);
3645         free_ftrace_hash(iter->hash);
3646         kfree(iter);
3647
3648         return 0;
3649 }
3650
3651 static const struct file_operations ftrace_avail_fops = {
3652         .open = ftrace_avail_open,
3653         .read = seq_read,
3654         .llseek = seq_lseek,
3655         .release = seq_release_private,
3656 };
3657
3658 static const struct file_operations ftrace_enabled_fops = {
3659         .open = ftrace_enabled_open,
3660         .read = seq_read,
3661         .llseek = seq_lseek,
3662         .release = seq_release_private,
3663 };
3664
3665 static const struct file_operations ftrace_filter_fops = {
3666         .open = ftrace_filter_open,
3667         .read = seq_read,
3668         .write = ftrace_filter_write,
3669         .llseek = ftrace_filter_lseek,
3670         .release = ftrace_regex_release,
3671 };
3672
3673 static const struct file_operations ftrace_notrace_fops = {
3674         .open = ftrace_notrace_open,
3675         .read = seq_read,
3676         .write = ftrace_notrace_write,
3677         .llseek = ftrace_filter_lseek,
3678         .release = ftrace_regex_release,
3679 };
3680
3681 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3682
3683 static DEFINE_MUTEX(graph_lock);
3684
3685 int ftrace_graph_count;
3686 int ftrace_graph_filter_enabled;
3687 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3688
3689 static void *
3690 __g_next(struct seq_file *m, loff_t *pos)
3691 {
3692         if (*pos >= ftrace_graph_count)
3693                 return NULL;
3694         return &ftrace_graph_funcs[*pos];
3695 }
3696
3697 static void *
3698 g_next(struct seq_file *m, void *v, loff_t *pos)
3699 {
3700         (*pos)++;
3701         return __g_next(m, pos);
3702 }
3703
3704 static void *g_start(struct seq_file *m, loff_t *pos)
3705 {
3706         mutex_lock(&graph_lock);
3707
3708         /* Nothing, tell g_show to print all functions are enabled */
3709         if (!ftrace_graph_filter_enabled && !*pos)
3710                 return (void *)1;
3711
3712         return __g_next(m, pos);
3713 }
3714
3715 static void g_stop(struct seq_file *m, void *p)
3716 {
3717         mutex_unlock(&graph_lock);
3718 }
3719
3720 static int g_show(struct seq_file *m, void *v)
3721 {
3722         unsigned long *ptr = v;
3723
3724         if (!ptr)
3725                 return 0;
3726
3727         if (ptr == (unsigned long *)1) {
3728                 seq_printf(m, "#### all functions enabled ####\n");
3729                 return 0;
3730         }
3731
3732         seq_printf(m, "%ps\n", (void *)*ptr);
3733
3734         return 0;
3735 }
3736
3737 static const struct seq_operations ftrace_graph_seq_ops = {
3738         .start = g_start,
3739         .next = g_next,
3740         .stop = g_stop,
3741         .show = g_show,
3742 };
3743
3744 static int
3745 ftrace_graph_open(struct inode *inode, struct file *file)
3746 {
3747         int ret = 0;
3748
3749         if (unlikely(ftrace_disabled))
3750                 return -ENODEV;
3751
3752         mutex_lock(&graph_lock);
3753         if ((file->f_mode & FMODE_WRITE) &&
3754             (file->f_flags & O_TRUNC)) {
3755                 ftrace_graph_filter_enabled = 0;
3756                 ftrace_graph_count = 0;
3757                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3758         }
3759         mutex_unlock(&graph_lock);
3760
3761         if (file->f_mode & FMODE_READ)
3762                 ret = seq_open(file, &ftrace_graph_seq_ops);
3763
3764         return ret;
3765 }
3766
3767 static int
3768 ftrace_graph_release(struct inode *inode, struct file *file)
3769 {
3770         if (file->f_mode & FMODE_READ)
3771                 seq_release(inode, file);
3772         return 0;
3773 }
3774
3775 static int
3776 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3777 {
3778         struct dyn_ftrace *rec;
3779         struct ftrace_page *pg;
3780         int search_len;
3781         int fail = 1;
3782         int type, not;
3783         char *search;
3784         bool exists;
3785         int i;
3786
3787         /* decode regex */
3788         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3789         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3790                 return -EBUSY;
3791
3792         search_len = strlen(search);
3793
3794         mutex_lock(&ftrace_lock);
3795
3796         if (unlikely(ftrace_disabled)) {
3797                 mutex_unlock(&ftrace_lock);
3798                 return -ENODEV;
3799         }
3800
3801         do_for_each_ftrace_rec(pg, rec) {
3802
3803                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3804                         /* if it is in the array */
3805                         exists = false;
3806                         for (i = 0; i < *idx; i++) {
3807                                 if (array[i] == rec->ip) {
3808                                         exists = true;
3809                                         break;
3810                                 }
3811                         }
3812
3813                         if (!not) {
3814                                 fail = 0;
3815                                 if (!exists) {
3816                                         array[(*idx)++] = rec->ip;
3817                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3818                                                 goto out;
3819                                 }
3820                         } else {
3821                                 if (exists) {
3822                                         array[i] = array[--(*idx)];
3823                                         array[*idx] = 0;
3824                                         fail = 0;
3825                                 }
3826                         }
3827                 }
3828         } while_for_each_ftrace_rec();
3829 out:
3830         mutex_unlock(&ftrace_lock);
3831
3832         if (fail)
3833                 return -EINVAL;
3834
3835         ftrace_graph_filter_enabled = !!(*idx);
3836
3837         return 0;
3838 }
3839
3840 static ssize_t
3841 ftrace_graph_write(struct file *file, const char __user *ubuf,
3842                    size_t cnt, loff_t *ppos)
3843 {
3844         struct trace_parser parser;
3845         ssize_t read, ret;
3846
3847         if (!cnt)
3848                 return 0;
3849
3850         mutex_lock(&graph_lock);
3851
3852         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3853                 ret = -ENOMEM;
3854                 goto out_unlock;
3855         }
3856
3857         read = trace_get_user(&parser, ubuf, cnt, ppos);
3858
3859         if (read >= 0 && trace_parser_loaded((&parser))) {
3860                 parser.buffer[parser.idx] = 0;
3861
3862                 /* we allow only one expression at a time */
3863                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3864                                         parser.buffer);
3865                 if (ret)
3866                         goto out_free;
3867         }
3868
3869         ret = read;
3870
3871 out_free:
3872         trace_parser_put(&parser);
3873 out_unlock:
3874         mutex_unlock(&graph_lock);
3875
3876         return ret;
3877 }
3878
3879 static const struct file_operations ftrace_graph_fops = {
3880         .open           = ftrace_graph_open,
3881         .read           = seq_read,
3882         .write          = ftrace_graph_write,
3883         .llseek         = ftrace_filter_lseek,
3884         .release        = ftrace_graph_release,
3885 };
3886 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3887
3888 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3889 {
3890
3891         trace_create_file("available_filter_functions", 0444,
3892                         d_tracer, NULL, &ftrace_avail_fops);
3893
3894         trace_create_file("enabled_functions", 0444,
3895                         d_tracer, NULL, &ftrace_enabled_fops);
3896
3897         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3898                         NULL, &ftrace_filter_fops);
3899
3900         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3901                                     NULL, &ftrace_notrace_fops);
3902
3903 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3904         trace_create_file("set_graph_function", 0444, d_tracer,
3905                                     NULL,
3906                                     &ftrace_graph_fops);
3907 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3908
3909         return 0;
3910 }
3911
3912 static int ftrace_cmp_ips(const void *a, const void *b)
3913 {
3914         const unsigned long *ipa = a;
3915         const unsigned long *ipb = b;
3916
3917         if (*ipa > *ipb)
3918                 return 1;
3919         if (*ipa < *ipb)
3920                 return -1;
3921         return 0;
3922 }
3923
3924 static void ftrace_swap_ips(void *a, void *b, int size)
3925 {
3926         unsigned long *ipa = a;
3927         unsigned long *ipb = b;
3928         unsigned long t;
3929
3930         t = *ipa;
3931         *ipa = *ipb;
3932         *ipb = t;
3933 }
3934
3935 static int ftrace_process_locs(struct module *mod,
3936                                unsigned long *start,
3937                                unsigned long *end)
3938 {
3939         struct ftrace_page *start_pg;
3940         struct ftrace_page *pg;
3941         struct dyn_ftrace *rec;
3942         unsigned long count;
3943         unsigned long *p;
3944         unsigned long addr;
3945         unsigned long flags = 0; /* Shut up gcc */
3946         int ret = -ENOMEM;
3947
3948         count = end - start;
3949
3950         if (!count)
3951                 return 0;
3952
3953         sort(start, count, sizeof(*start),
3954              ftrace_cmp_ips, ftrace_swap_ips);
3955
3956         start_pg = ftrace_allocate_pages(count);
3957         if (!start_pg)
3958                 return -ENOMEM;
3959
3960         mutex_lock(&ftrace_lock);
3961
3962         /*
3963          * Core and each module needs their own pages, as
3964          * modules will free them when they are removed.
3965          * Force a new page to be allocated for modules.
3966          */
3967         if (!mod) {
3968                 WARN_ON(ftrace_pages || ftrace_pages_start);
3969                 /* First initialization */
3970                 ftrace_pages = ftrace_pages_start = start_pg;
3971         } else {
3972                 if (!ftrace_pages)
3973                         goto out;
3974
3975                 if (WARN_ON(ftrace_pages->next)) {
3976                         /* Hmm, we have free pages? */
3977                         while (ftrace_pages->next)
3978                                 ftrace_pages = ftrace_pages->next;
3979                 }
3980
3981                 ftrace_pages->next = start_pg;
3982         }
3983
3984         p = start;
3985         pg = start_pg;
3986         while (p < end) {
3987                 addr = ftrace_call_adjust(*p++);
3988                 /*
3989                  * Some architecture linkers will pad between
3990                  * the different mcount_loc sections of different
3991                  * object files to satisfy alignments.
3992                  * Skip any NULL pointers.
3993                  */
3994                 if (!addr)
3995                         continue;
3996
3997                 if (pg->index == pg->size) {
3998                         /* We should have allocated enough */
3999                         if (WARN_ON(!pg->next))
4000                                 break;
4001                         pg = pg->next;
4002                 }
4003
4004                 rec = &pg->records[pg->index++];
4005                 rec->ip = addr;
4006         }
4007
4008         /* We should have used all pages */
4009         WARN_ON(pg->next);
4010
4011         /* Assign the last page to ftrace_pages */
4012         ftrace_pages = pg;
4013
4014         /* These new locations need to be initialized */
4015         ftrace_new_pgs = start_pg;
4016
4017         /*
4018          * We only need to disable interrupts on start up
4019          * because we are modifying code that an interrupt
4020          * may execute, and the modification is not atomic.
4021          * But for modules, nothing runs the code we modify
4022          * until we are finished with it, and there's no
4023          * reason to cause large interrupt latencies while we do it.
4024          */
4025         if (!mod)
4026                 local_irq_save(flags);
4027         ftrace_update_code(mod);
4028         if (!mod)
4029                 local_irq_restore(flags);
4030         ret = 0;
4031  out:
4032         mutex_unlock(&ftrace_lock);
4033
4034         return ret;
4035 }
4036
4037 #ifdef CONFIG_MODULES
4038
4039 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4040
4041 void ftrace_release_mod(struct module *mod)
4042 {
4043         struct dyn_ftrace *rec;
4044         struct ftrace_page **last_pg;
4045         struct ftrace_page *pg;
4046         int order;
4047
4048         mutex_lock(&ftrace_lock);
4049
4050         if (ftrace_disabled)
4051                 goto out_unlock;
4052
4053         /*
4054          * Each module has its own ftrace_pages, remove
4055          * them from the list.
4056          */
4057         last_pg = &ftrace_pages_start;
4058         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4059                 rec = &pg->records[0];
4060                 if (within_module_core(rec->ip, mod)) {
4061                         /*
4062                          * As core pages are first, the first
4063                          * page should never be a module page.
4064                          */
4065                         if (WARN_ON(pg == ftrace_pages_start))
4066                                 goto out_unlock;
4067
4068                         /* Check if we are deleting the last page */
4069                         if (pg == ftrace_pages)
4070                                 ftrace_pages = next_to_ftrace_page(last_pg);
4071
4072                         *last_pg = pg->next;
4073                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4074                         free_pages((unsigned long)pg->records, order);
4075                         kfree(pg);
4076                 } else
4077                         last_pg = &pg->next;
4078         }
4079  out_unlock:
4080         mutex_unlock(&ftrace_lock);
4081 }
4082
4083 static void ftrace_init_module(struct module *mod,
4084                                unsigned long *start, unsigned long *end)
4085 {
4086         if (ftrace_disabled || start == end)
4087                 return;
4088         ftrace_process_locs(mod, start, end);
4089 }
4090
4091 static int ftrace_module_notify_enter(struct notifier_block *self,
4092                                       unsigned long val, void *data)
4093 {
4094         struct module *mod = data;
4095
4096         if (val == MODULE_STATE_COMING)
4097                 ftrace_init_module(mod, mod->ftrace_callsites,
4098                                    mod->ftrace_callsites +
4099                                    mod->num_ftrace_callsites);
4100         return 0;
4101 }
4102
4103 static int ftrace_module_notify_exit(struct notifier_block *self,
4104                                      unsigned long val, void *data)
4105 {
4106         struct module *mod = data;
4107
4108         if (val == MODULE_STATE_GOING)
4109                 ftrace_release_mod(mod);
4110
4111         return 0;
4112 }
4113 #else
4114 static int ftrace_module_notify_enter(struct notifier_block *self,
4115                                       unsigned long val, void *data)
4116 {
4117         return 0;
4118 }
4119 static int ftrace_module_notify_exit(struct notifier_block *self,
4120                                      unsigned long val, void *data)
4121 {
4122         return 0;
4123 }
4124 #endif /* CONFIG_MODULES */
4125
4126 struct notifier_block ftrace_module_enter_nb = {
4127         .notifier_call = ftrace_module_notify_enter,
4128         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4129 };
4130
4131 struct notifier_block ftrace_module_exit_nb = {
4132         .notifier_call = ftrace_module_notify_exit,
4133         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4134 };
4135
4136 extern unsigned long __start_mcount_loc[];
4137 extern unsigned long __stop_mcount_loc[];
4138
4139 void __init ftrace_init(void)
4140 {
4141         unsigned long count, addr, flags;
4142         int ret;
4143
4144         /* Keep the ftrace pointer to the stub */
4145         addr = (unsigned long)ftrace_stub;
4146
4147         local_irq_save(flags);
4148         ftrace_dyn_arch_init(&addr);
4149         local_irq_restore(flags);
4150
4151         /* ftrace_dyn_arch_init places the return code in addr */
4152         if (addr)
4153                 goto failed;
4154
4155         count = __stop_mcount_loc - __start_mcount_loc;
4156
4157         ret = ftrace_dyn_table_alloc(count);
4158         if (ret)
4159                 goto failed;
4160
4161         last_ftrace_enabled = ftrace_enabled = 1;
4162
4163         ret = ftrace_process_locs(NULL,
4164                                   __start_mcount_loc,
4165                                   __stop_mcount_loc);
4166
4167         ret = register_module_notifier(&ftrace_module_enter_nb);
4168         if (ret)
4169                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4170
4171         ret = register_module_notifier(&ftrace_module_exit_nb);
4172         if (ret)
4173                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4174
4175         set_ftrace_early_filters();
4176
4177         return;
4178  failed:
4179         ftrace_disabled = 1;
4180 }
4181
4182 #else
4183
4184 static struct ftrace_ops global_ops = {
4185         .func                   = ftrace_stub,
4186         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4187         INIT_REGEX_LOCK(global_ops)
4188 };
4189
4190 static int __init ftrace_nodyn_init(void)
4191 {
4192         ftrace_enabled = 1;
4193         return 0;
4194 }
4195 core_initcall(ftrace_nodyn_init);
4196
4197 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4198 static inline void ftrace_startup_enable(int command) { }
4199 /* Keep as macros so we do not need to define the commands */
4200 # define ftrace_startup(ops, command)                   \
4201         ({                                              \
4202                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4203                 0;                                      \
4204         })
4205 # define ftrace_shutdown(ops, command)  do { } while (0)
4206 # define ftrace_startup_sysctl()        do { } while (0)
4207 # define ftrace_shutdown_sysctl()       do { } while (0)
4208
4209 static inline int
4210 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4211 {
4212         return 1;
4213 }
4214
4215 #endif /* CONFIG_DYNAMIC_FTRACE */
4216
4217 static void
4218 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4219                         struct ftrace_ops *op, struct pt_regs *regs)
4220 {
4221         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4222                 return;
4223
4224         /*
4225          * Some of the ops may be dynamically allocated,
4226          * they must be freed after a synchronize_sched().
4227          */
4228         preempt_disable_notrace();
4229         trace_recursion_set(TRACE_CONTROL_BIT);
4230         do_for_each_ftrace_op(op, ftrace_control_list) {
4231                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4232                     !ftrace_function_local_disabled(op) &&
4233                     ftrace_ops_test(op, ip))
4234                         op->func(ip, parent_ip, op, regs);
4235         } while_for_each_ftrace_op(op);
4236         trace_recursion_clear(TRACE_CONTROL_BIT);
4237         preempt_enable_notrace();
4238 }
4239
4240 static struct ftrace_ops control_ops = {
4241         .func   = ftrace_ops_control_func,
4242         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4243         INIT_REGEX_LOCK(control_ops)
4244 };
4245
4246 static inline void
4247 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4248                        struct ftrace_ops *ignored, struct pt_regs *regs)
4249 {
4250         struct ftrace_ops *op;
4251         int bit;
4252
4253         if (function_trace_stop)
4254                 return;
4255
4256         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4257         if (bit < 0)
4258                 return;
4259
4260         /*
4261          * Some of the ops may be dynamically allocated,
4262          * they must be freed after a synchronize_sched().
4263          */
4264         preempt_disable_notrace();
4265         do_for_each_ftrace_op(op, ftrace_ops_list) {
4266                 if (ftrace_ops_test(op, ip))
4267                         op->func(ip, parent_ip, op, regs);
4268         } while_for_each_ftrace_op(op);
4269         preempt_enable_notrace();
4270         trace_clear_recursion(bit);
4271 }
4272
4273 /*
4274  * Some archs only support passing ip and parent_ip. Even though
4275  * the list function ignores the op parameter, we do not want any
4276  * C side effects, where a function is called without the caller
4277  * sending a third parameter.
4278  * Archs are to support both the regs and ftrace_ops at the same time.
4279  * If they support ftrace_ops, it is assumed they support regs.
4280  * If call backs want to use regs, they must either check for regs
4281  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4282  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4283  * An architecture can pass partial regs with ftrace_ops and still
4284  * set the ARCH_SUPPORT_FTARCE_OPS.
4285  */
4286 #if ARCH_SUPPORTS_FTRACE_OPS
4287 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4288                                  struct ftrace_ops *op, struct pt_regs *regs)
4289 {
4290         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4291 }
4292 #else
4293 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4294 {
4295         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4296 }
4297 #endif
4298
4299 static void clear_ftrace_swapper(void)
4300 {
4301         struct task_struct *p;
4302         int cpu;
4303
4304         get_online_cpus();
4305         for_each_online_cpu(cpu) {
4306                 p = idle_task(cpu);
4307                 clear_tsk_trace_trace(p);
4308         }
4309         put_online_cpus();
4310 }
4311
4312 static void set_ftrace_swapper(void)
4313 {
4314         struct task_struct *p;
4315         int cpu;
4316
4317         get_online_cpus();
4318         for_each_online_cpu(cpu) {
4319                 p = idle_task(cpu);
4320                 set_tsk_trace_trace(p);
4321         }
4322         put_online_cpus();
4323 }
4324
4325 static void clear_ftrace_pid(struct pid *pid)
4326 {
4327         struct task_struct *p;
4328
4329         rcu_read_lock();
4330         do_each_pid_task(pid, PIDTYPE_PID, p) {
4331                 clear_tsk_trace_trace(p);
4332         } while_each_pid_task(pid, PIDTYPE_PID, p);
4333         rcu_read_unlock();
4334
4335         put_pid(pid);
4336 }
4337
4338 static void set_ftrace_pid(struct pid *pid)
4339 {
4340         struct task_struct *p;
4341
4342         rcu_read_lock();
4343         do_each_pid_task(pid, PIDTYPE_PID, p) {
4344                 set_tsk_trace_trace(p);
4345         } while_each_pid_task(pid, PIDTYPE_PID, p);
4346         rcu_read_unlock();
4347 }
4348
4349 static void clear_ftrace_pid_task(struct pid *pid)
4350 {
4351         if (pid == ftrace_swapper_pid)
4352                 clear_ftrace_swapper();
4353         else
4354                 clear_ftrace_pid(pid);
4355 }
4356
4357 static void set_ftrace_pid_task(struct pid *pid)
4358 {
4359         if (pid == ftrace_swapper_pid)
4360                 set_ftrace_swapper();
4361         else
4362                 set_ftrace_pid(pid);
4363 }
4364
4365 static int ftrace_pid_add(int p)
4366 {
4367         struct pid *pid;
4368         struct ftrace_pid *fpid;
4369         int ret = -EINVAL;
4370
4371         mutex_lock(&ftrace_lock);
4372
4373         if (!p)
4374                 pid = ftrace_swapper_pid;
4375         else
4376                 pid = find_get_pid(p);
4377
4378         if (!pid)
4379                 goto out;
4380
4381         ret = 0;
4382
4383         list_for_each_entry(fpid, &ftrace_pids, list)
4384                 if (fpid->pid == pid)
4385                         goto out_put;
4386
4387         ret = -ENOMEM;
4388
4389         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4390         if (!fpid)
4391                 goto out_put;
4392
4393         list_add(&fpid->list, &ftrace_pids);
4394         fpid->pid = pid;
4395
4396         set_ftrace_pid_task(pid);
4397
4398         ftrace_update_pid_func();
4399         ftrace_startup_enable(0);
4400
4401         mutex_unlock(&ftrace_lock);
4402         return 0;
4403
4404 out_put:
4405         if (pid != ftrace_swapper_pid)
4406                 put_pid(pid);
4407
4408 out:
4409         mutex_unlock(&ftrace_lock);
4410         return ret;
4411 }
4412
4413 static void ftrace_pid_reset(void)
4414 {
4415         struct ftrace_pid *fpid, *safe;
4416
4417         mutex_lock(&ftrace_lock);
4418         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4419                 struct pid *pid = fpid->pid;
4420
4421                 clear_ftrace_pid_task(pid);
4422
4423                 list_del(&fpid->list);
4424                 kfree(fpid);
4425         }
4426
4427         ftrace_update_pid_func();
4428         ftrace_startup_enable(0);
4429
4430         mutex_unlock(&ftrace_lock);
4431 }
4432
4433 static void *fpid_start(struct seq_file *m, loff_t *pos)
4434 {
4435         mutex_lock(&ftrace_lock);
4436
4437         if (list_empty(&ftrace_pids) && (!*pos))
4438                 return (void *) 1;
4439
4440         return seq_list_start(&ftrace_pids, *pos);
4441 }
4442
4443 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4444 {
4445         if (v == (void *)1)
4446                 return NULL;
4447
4448         return seq_list_next(v, &ftrace_pids, pos);
4449 }
4450
4451 static void fpid_stop(struct seq_file *m, void *p)
4452 {
4453         mutex_unlock(&ftrace_lock);
4454 }
4455
4456 static int fpid_show(struct seq_file *m, void *v)
4457 {
4458         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4459
4460         if (v == (void *)1) {
4461                 seq_printf(m, "no pid\n");
4462                 return 0;
4463         }
4464
4465         if (fpid->pid == ftrace_swapper_pid)
4466                 seq_printf(m, "swapper tasks\n");
4467         else
4468                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4469
4470         return 0;
4471 }
4472
4473 static const struct seq_operations ftrace_pid_sops = {
4474         .start = fpid_start,
4475         .next = fpid_next,
4476         .stop = fpid_stop,
4477         .show = fpid_show,
4478 };
4479
4480 static int
4481 ftrace_pid_open(struct inode *inode, struct file *file)
4482 {
4483         int ret = 0;
4484
4485         if ((file->f_mode & FMODE_WRITE) &&
4486             (file->f_flags & O_TRUNC))
4487                 ftrace_pid_reset();
4488
4489         if (file->f_mode & FMODE_READ)
4490                 ret = seq_open(file, &ftrace_pid_sops);
4491
4492         return ret;
4493 }
4494
4495 static ssize_t
4496 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4497                    size_t cnt, loff_t *ppos)
4498 {
4499         char buf[64], *tmp;
4500         long val;
4501         int ret;
4502
4503         if (cnt >= sizeof(buf))
4504                 return -EINVAL;
4505
4506         if (copy_from_user(&buf, ubuf, cnt))
4507                 return -EFAULT;
4508
4509         buf[cnt] = 0;
4510
4511         /*
4512          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4513          * to clean the filter quietly.
4514          */
4515         tmp = strstrip(buf);
4516         if (strlen(tmp) == 0)
4517                 return 1;
4518
4519         ret = kstrtol(tmp, 10, &val);
4520         if (ret < 0)
4521                 return ret;
4522
4523         ret = ftrace_pid_add(val);
4524
4525         return ret ? ret : cnt;
4526 }
4527
4528 static int
4529 ftrace_pid_release(struct inode *inode, struct file *file)
4530 {
4531         if (file->f_mode & FMODE_READ)
4532                 seq_release(inode, file);
4533
4534         return 0;
4535 }
4536
4537 static const struct file_operations ftrace_pid_fops = {
4538         .open           = ftrace_pid_open,
4539         .write          = ftrace_pid_write,
4540         .read           = seq_read,
4541         .llseek         = ftrace_filter_lseek,
4542         .release        = ftrace_pid_release,
4543 };
4544
4545 static __init int ftrace_init_debugfs(void)
4546 {
4547         struct dentry *d_tracer;
4548
4549         d_tracer = tracing_init_dentry();
4550         if (!d_tracer)
4551                 return 0;
4552
4553         ftrace_init_dyn_debugfs(d_tracer);
4554
4555         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4556                             NULL, &ftrace_pid_fops);
4557
4558         ftrace_profile_debugfs(d_tracer);
4559
4560         return 0;
4561 }
4562 fs_initcall(ftrace_init_debugfs);
4563
4564 /**
4565  * ftrace_kill - kill ftrace
4566  *
4567  * This function should be used by panic code. It stops ftrace
4568  * but in a not so nice way. If you need to simply kill ftrace
4569  * from a non-atomic section, use ftrace_kill.
4570  */
4571 void ftrace_kill(void)
4572 {
4573         ftrace_disabled = 1;
4574         ftrace_enabled = 0;
4575         clear_ftrace_function();
4576 }
4577
4578 /**
4579  * Test if ftrace is dead or not.
4580  */
4581 int ftrace_is_dead(void)
4582 {
4583         return ftrace_disabled;
4584 }
4585
4586 /**
4587  * register_ftrace_function - register a function for profiling
4588  * @ops - ops structure that holds the function for profiling.
4589  *
4590  * Register a function to be called by all functions in the
4591  * kernel.
4592  *
4593  * Note: @ops->func and all the functions it calls must be labeled
4594  *       with "notrace", otherwise it will go into a
4595  *       recursive loop.
4596  */
4597 int register_ftrace_function(struct ftrace_ops *ops)
4598 {
4599         int ret = -1;
4600
4601         ftrace_ops_init(ops);
4602
4603         mutex_lock(&ftrace_lock);
4604
4605         ret = __register_ftrace_function(ops);
4606         if (!ret)
4607                 ret = ftrace_startup(ops, 0);
4608
4609         mutex_unlock(&ftrace_lock);
4610
4611         return ret;
4612 }
4613 EXPORT_SYMBOL_GPL(register_ftrace_function);
4614
4615 /**
4616  * unregister_ftrace_function - unregister a function for profiling.
4617  * @ops - ops structure that holds the function to unregister
4618  *
4619  * Unregister a function that was added to be called by ftrace profiling.
4620  */
4621 int unregister_ftrace_function(struct ftrace_ops *ops)
4622 {
4623         int ret;
4624
4625         mutex_lock(&ftrace_lock);
4626         ret = __unregister_ftrace_function(ops);
4627         if (!ret)
4628                 ftrace_shutdown(ops, 0);
4629         mutex_unlock(&ftrace_lock);
4630
4631         return ret;
4632 }
4633 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4634
4635 int
4636 ftrace_enable_sysctl(struct ctl_table *table, int write,
4637                      void __user *buffer, size_t *lenp,
4638                      loff_t *ppos)
4639 {
4640         int ret = -ENODEV;
4641
4642         mutex_lock(&ftrace_lock);
4643
4644         if (unlikely(ftrace_disabled))
4645                 goto out;
4646
4647         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4648
4649         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4650                 goto out;
4651
4652         last_ftrace_enabled = !!ftrace_enabled;
4653
4654         if (ftrace_enabled) {
4655
4656                 ftrace_startup_sysctl();
4657
4658                 /* we are starting ftrace again */
4659                 if (ftrace_ops_list != &ftrace_list_end)
4660                         update_ftrace_function();
4661
4662         } else {
4663                 /* stopping ftrace calls (just send to ftrace_stub) */
4664                 ftrace_trace_function = ftrace_stub;
4665
4666                 ftrace_shutdown_sysctl();
4667         }
4668
4669  out:
4670         mutex_unlock(&ftrace_lock);
4671         return ret;
4672 }
4673
4674 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4675
4676 static int ftrace_graph_active;
4677 static struct notifier_block ftrace_suspend_notifier;
4678
4679 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4680 {
4681         return 0;
4682 }
4683
4684 /* The callbacks that hook a function */
4685 trace_func_graph_ret_t ftrace_graph_return =
4686                         (trace_func_graph_ret_t)ftrace_stub;
4687 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4688
4689 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4690 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4691 {
4692         int i;
4693         int ret = 0;
4694         unsigned long flags;
4695         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4696         struct task_struct *g, *t;
4697
4698         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4699                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4700                                         * sizeof(struct ftrace_ret_stack),
4701                                         GFP_KERNEL);
4702                 if (!ret_stack_list[i]) {
4703                         start = 0;
4704                         end = i;
4705                         ret = -ENOMEM;
4706                         goto free;
4707                 }
4708         }
4709
4710         read_lock_irqsave(&tasklist_lock, flags);
4711         do_each_thread(g, t) {
4712                 if (start == end) {
4713                         ret = -EAGAIN;
4714                         goto unlock;
4715                 }
4716
4717                 if (t->ret_stack == NULL) {
4718                         atomic_set(&t->tracing_graph_pause, 0);
4719                         atomic_set(&t->trace_overrun, 0);
4720                         t->curr_ret_stack = -1;
4721                         /* Make sure the tasks see the -1 first: */
4722                         smp_wmb();
4723                         t->ret_stack = ret_stack_list[start++];
4724                 }
4725         } while_each_thread(g, t);
4726
4727 unlock:
4728         read_unlock_irqrestore(&tasklist_lock, flags);
4729 free:
4730         for (i = start; i < end; i++)
4731                 kfree(ret_stack_list[i]);
4732         return ret;
4733 }
4734
4735 static void
4736 ftrace_graph_probe_sched_switch(void *ignore,
4737                         struct task_struct *prev, struct task_struct *next)
4738 {
4739         unsigned long long timestamp;
4740         int index;
4741
4742         /*
4743          * Does the user want to count the time a function was asleep.
4744          * If so, do not update the time stamps.
4745          */
4746         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4747                 return;
4748
4749         timestamp = trace_clock_local();
4750
4751         prev->ftrace_timestamp = timestamp;
4752
4753         /* only process tasks that we timestamped */
4754         if (!next->ftrace_timestamp)
4755                 return;
4756
4757         /*
4758          * Update all the counters in next to make up for the
4759          * time next was sleeping.
4760          */
4761         timestamp -= next->ftrace_timestamp;
4762
4763         for (index = next->curr_ret_stack; index >= 0; index--)
4764                 next->ret_stack[index].calltime += timestamp;
4765 }
4766
4767 /* Allocate a return stack for each task */
4768 static int start_graph_tracing(void)
4769 {
4770         struct ftrace_ret_stack **ret_stack_list;
4771         int ret, cpu;
4772
4773         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4774                                 sizeof(struct ftrace_ret_stack *),
4775                                 GFP_KERNEL);
4776
4777         if (!ret_stack_list)
4778                 return -ENOMEM;
4779
4780         /* The cpu_boot init_task->ret_stack will never be freed */
4781         for_each_online_cpu(cpu) {
4782                 if (!idle_task(cpu)->ret_stack)
4783                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4784         }
4785
4786         do {
4787                 ret = alloc_retstack_tasklist(ret_stack_list);
4788         } while (ret == -EAGAIN);
4789
4790         if (!ret) {
4791                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4792                 if (ret)
4793                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4794                                 " probe to kernel_sched_switch\n");
4795         }
4796
4797         kfree(ret_stack_list);
4798         return ret;
4799 }
4800
4801 /*
4802  * Hibernation protection.
4803  * The state of the current task is too much unstable during
4804  * suspend/restore to disk. We want to protect against that.
4805  */
4806 static int
4807 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4808                                                         void *unused)
4809 {
4810         switch (state) {
4811         case PM_HIBERNATION_PREPARE:
4812                 pause_graph_tracing();
4813                 break;
4814
4815         case PM_POST_HIBERNATION:
4816                 unpause_graph_tracing();
4817                 break;
4818         }
4819         return NOTIFY_DONE;
4820 }
4821
4822 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4823                         trace_func_graph_ent_t entryfunc)
4824 {
4825         int ret = 0;
4826
4827         mutex_lock(&ftrace_lock);
4828
4829         /* we currently allow only one tracer registered at a time */
4830         if (ftrace_graph_active) {
4831                 ret = -EBUSY;
4832                 goto out;
4833         }
4834
4835         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4836         register_pm_notifier(&ftrace_suspend_notifier);
4837
4838         ftrace_graph_active++;
4839         ret = start_graph_tracing();
4840         if (ret) {
4841                 ftrace_graph_active--;
4842                 goto out;
4843         }
4844
4845         ftrace_graph_return = retfunc;
4846         ftrace_graph_entry = entryfunc;
4847
4848         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4849
4850 out:
4851         mutex_unlock(&ftrace_lock);
4852         return ret;
4853 }
4854
4855 void unregister_ftrace_graph(void)
4856 {
4857         mutex_lock(&ftrace_lock);
4858
4859         if (unlikely(!ftrace_graph_active))
4860                 goto out;
4861
4862         ftrace_graph_active--;
4863         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4864         ftrace_graph_entry = ftrace_graph_entry_stub;
4865         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4866         unregister_pm_notifier(&ftrace_suspend_notifier);
4867         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4868
4869  out:
4870         mutex_unlock(&ftrace_lock);
4871 }
4872
4873 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4874
4875 static void
4876 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4877 {
4878         atomic_set(&t->tracing_graph_pause, 0);
4879         atomic_set(&t->trace_overrun, 0);
4880         t->ftrace_timestamp = 0;
4881         /* make curr_ret_stack visible before we add the ret_stack */
4882         smp_wmb();
4883         t->ret_stack = ret_stack;
4884 }
4885
4886 /*
4887  * Allocate a return stack for the idle task. May be the first
4888  * time through, or it may be done by CPU hotplug online.
4889  */
4890 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4891 {
4892         t->curr_ret_stack = -1;
4893         /*
4894          * The idle task has no parent, it either has its own
4895          * stack or no stack at all.
4896          */
4897         if (t->ret_stack)
4898                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4899
4900         if (ftrace_graph_active) {
4901                 struct ftrace_ret_stack *ret_stack;
4902
4903                 ret_stack = per_cpu(idle_ret_stack, cpu);
4904                 if (!ret_stack) {
4905                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4906                                             * sizeof(struct ftrace_ret_stack),
4907                                             GFP_KERNEL);
4908                         if (!ret_stack)
4909                                 return;
4910                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4911                 }
4912                 graph_init_task(t, ret_stack);
4913         }
4914 }
4915
4916 /* Allocate a return stack for newly created task */
4917 void ftrace_graph_init_task(struct task_struct *t)
4918 {
4919         /* Make sure we do not use the parent ret_stack */
4920         t->ret_stack = NULL;
4921         t->curr_ret_stack = -1;
4922
4923         if (ftrace_graph_active) {
4924                 struct ftrace_ret_stack *ret_stack;
4925
4926                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4927                                 * sizeof(struct ftrace_ret_stack),
4928                                 GFP_KERNEL);
4929                 if (!ret_stack)
4930                         return;
4931                 graph_init_task(t, ret_stack);
4932         }
4933 }
4934
4935 void ftrace_graph_exit_task(struct task_struct *t)
4936 {
4937         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4938
4939         t->ret_stack = NULL;
4940         /* NULL must become visible to IRQs before we free it: */
4941         barrier();
4942
4943         kfree(ret_stack);
4944 }
4945
4946 void ftrace_graph_stop(void)
4947 {
4948         ftrace_stop();
4949 }
4950 #endif