ftrace: Pass ftrace_ops as third parameter to function trace callback
[profile/ivi/kernel-adaptation-intel-automotive.git] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int                      ftrace_function_enabled;
22
23 static struct trace_array       *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30         func_trace = tr;
31         tr->cpu = get_cpu();
32         put_cpu();
33
34         tracing_start_cmdline_record();
35         tracing_start_function_trace();
36         return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41         tracing_stop_function_trace();
42         tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47         tracing_reset_online_cpus(tr);
48 }
49
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52                                  struct ftrace_ops *op)
53 {
54         struct trace_array *tr = func_trace;
55         struct trace_array_cpu *data;
56         unsigned long flags;
57         long disabled;
58         int cpu;
59         int pc;
60
61         if (unlikely(!ftrace_function_enabled))
62                 return;
63
64         pc = preempt_count();
65         preempt_disable_notrace();
66         local_save_flags(flags);
67         cpu = raw_smp_processor_id();
68         data = tr->data[cpu];
69         disabled = atomic_inc_return(&data->disabled);
70
71         if (likely(disabled == 1))
72                 trace_function(tr, ip, parent_ip, flags, pc);
73
74         atomic_dec(&data->disabled);
75         preempt_enable_notrace();
76 }
77
78 static void
79 function_trace_call(unsigned long ip, unsigned long parent_ip,
80                     struct ftrace_ops *op)
81 {
82         struct trace_array *tr = func_trace;
83         struct trace_array_cpu *data;
84         unsigned long flags;
85         long disabled;
86         int cpu;
87         int pc;
88
89         if (unlikely(!ftrace_function_enabled))
90                 return;
91
92         /*
93          * Need to use raw, since this must be called before the
94          * recursive protection is performed.
95          */
96         local_irq_save(flags);
97         cpu = raw_smp_processor_id();
98         data = tr->data[cpu];
99         disabled = atomic_inc_return(&data->disabled);
100
101         if (likely(disabled == 1)) {
102                 pc = preempt_count();
103                 trace_function(tr, ip, parent_ip, flags, pc);
104         }
105
106         atomic_dec(&data->disabled);
107         local_irq_restore(flags);
108 }
109
110 static void
111 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
112                           struct ftrace_ops *op)
113 {
114         struct trace_array *tr = func_trace;
115         struct trace_array_cpu *data;
116         unsigned long flags;
117         long disabled;
118         int cpu;
119         int pc;
120
121         if (unlikely(!ftrace_function_enabled))
122                 return;
123
124         /*
125          * Need to use raw, since this must be called before the
126          * recursive protection is performed.
127          */
128         local_irq_save(flags);
129         cpu = raw_smp_processor_id();
130         data = tr->data[cpu];
131         disabled = atomic_inc_return(&data->disabled);
132
133         if (likely(disabled == 1)) {
134                 pc = preempt_count();
135                 trace_function(tr, ip, parent_ip, flags, pc);
136                 /*
137                  * skip over 5 funcs:
138                  *    __ftrace_trace_stack,
139                  *    __trace_stack,
140                  *    function_stack_trace_call
141                  *    ftrace_list_func
142                  *    ftrace_call
143                  */
144                 __trace_stack(tr, flags, 5, pc);
145         }
146
147         atomic_dec(&data->disabled);
148         local_irq_restore(flags);
149 }
150
151
152 static struct ftrace_ops trace_ops __read_mostly =
153 {
154         .func = function_trace_call,
155         .flags = FTRACE_OPS_FL_GLOBAL,
156 };
157
158 static struct ftrace_ops trace_stack_ops __read_mostly =
159 {
160         .func = function_stack_trace_call,
161         .flags = FTRACE_OPS_FL_GLOBAL,
162 };
163
164 /* Our two options */
165 enum {
166         TRACE_FUNC_OPT_STACK = 0x1,
167 };
168
169 static struct tracer_opt func_opts[] = {
170 #ifdef CONFIG_STACKTRACE
171         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
172 #endif
173         { } /* Always set a last empty entry */
174 };
175
176 static struct tracer_flags func_flags = {
177         .val = 0, /* By default: all flags disabled */
178         .opts = func_opts
179 };
180
181 static void tracing_start_function_trace(void)
182 {
183         ftrace_function_enabled = 0;
184
185         if (trace_flags & TRACE_ITER_PREEMPTONLY)
186                 trace_ops.func = function_trace_call_preempt_only;
187         else
188                 trace_ops.func = function_trace_call;
189
190         if (func_flags.val & TRACE_FUNC_OPT_STACK)
191                 register_ftrace_function(&trace_stack_ops);
192         else
193                 register_ftrace_function(&trace_ops);
194
195         ftrace_function_enabled = 1;
196 }
197
198 static void tracing_stop_function_trace(void)
199 {
200         ftrace_function_enabled = 0;
201
202         if (func_flags.val & TRACE_FUNC_OPT_STACK)
203                 unregister_ftrace_function(&trace_stack_ops);
204         else
205                 unregister_ftrace_function(&trace_ops);
206 }
207
208 static int func_set_flag(u32 old_flags, u32 bit, int set)
209 {
210         if (bit == TRACE_FUNC_OPT_STACK) {
211                 /* do nothing if already set */
212                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
213                         return 0;
214
215                 if (set) {
216                         unregister_ftrace_function(&trace_ops);
217                         register_ftrace_function(&trace_stack_ops);
218                 } else {
219                         unregister_ftrace_function(&trace_stack_ops);
220                         register_ftrace_function(&trace_ops);
221                 }
222
223                 return 0;
224         }
225
226         return -EINVAL;
227 }
228
229 static struct tracer function_trace __read_mostly =
230 {
231         .name           = "function",
232         .init           = function_trace_init,
233         .reset          = function_trace_reset,
234         .start          = function_trace_start,
235         .wait_pipe      = poll_wait_pipe,
236         .flags          = &func_flags,
237         .set_flag       = func_set_flag,
238 #ifdef CONFIG_FTRACE_SELFTEST
239         .selftest       = trace_selftest_startup_function,
240 #endif
241 };
242
243 #ifdef CONFIG_DYNAMIC_FTRACE
244 static void
245 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
246 {
247         long *count = (long *)data;
248
249         if (tracing_is_on())
250                 return;
251
252         if (!*count)
253                 return;
254
255         if (*count != -1)
256                 (*count)--;
257
258         tracing_on();
259 }
260
261 static void
262 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
263 {
264         long *count = (long *)data;
265
266         if (!tracing_is_on())
267                 return;
268
269         if (!*count)
270                 return;
271
272         if (*count != -1)
273                 (*count)--;
274
275         tracing_off();
276 }
277
278 static int
279 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
280                          struct ftrace_probe_ops *ops, void *data);
281
282 static struct ftrace_probe_ops traceon_probe_ops = {
283         .func                   = ftrace_traceon,
284         .print                  = ftrace_trace_onoff_print,
285 };
286
287 static struct ftrace_probe_ops traceoff_probe_ops = {
288         .func                   = ftrace_traceoff,
289         .print                  = ftrace_trace_onoff_print,
290 };
291
292 static int
293 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
294                          struct ftrace_probe_ops *ops, void *data)
295 {
296         long count = (long)data;
297
298         seq_printf(m, "%ps:", (void *)ip);
299
300         if (ops == &traceon_probe_ops)
301                 seq_printf(m, "traceon");
302         else
303                 seq_printf(m, "traceoff");
304
305         if (count == -1)
306                 seq_printf(m, ":unlimited\n");
307         else
308                 seq_printf(m, ":count=%ld\n", count);
309
310         return 0;
311 }
312
313 static int
314 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
315 {
316         struct ftrace_probe_ops *ops;
317
318         /* we register both traceon and traceoff to this callback */
319         if (strcmp(cmd, "traceon") == 0)
320                 ops = &traceon_probe_ops;
321         else
322                 ops = &traceoff_probe_ops;
323
324         unregister_ftrace_function_probe_func(glob, ops);
325
326         return 0;
327 }
328
329 static int
330 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
331                             char *glob, char *cmd, char *param, int enable)
332 {
333         struct ftrace_probe_ops *ops;
334         void *count = (void *)-1;
335         char *number;
336         int ret;
337
338         /* hash funcs only work with set_ftrace_filter */
339         if (!enable)
340                 return -EINVAL;
341
342         if (glob[0] == '!')
343                 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
344
345         /* we register both traceon and traceoff to this callback */
346         if (strcmp(cmd, "traceon") == 0)
347                 ops = &traceon_probe_ops;
348         else
349                 ops = &traceoff_probe_ops;
350
351         if (!param)
352                 goto out_reg;
353
354         number = strsep(&param, ":");
355
356         if (!strlen(number))
357                 goto out_reg;
358
359         /*
360          * We use the callback data field (which is a pointer)
361          * as our counter.
362          */
363         ret = strict_strtoul(number, 0, (unsigned long *)&count);
364         if (ret)
365                 return ret;
366
367  out_reg:
368         ret = register_ftrace_function_probe(glob, ops, count);
369
370         return ret < 0 ? ret : 0;
371 }
372
373 static struct ftrace_func_command ftrace_traceon_cmd = {
374         .name                   = "traceon",
375         .func                   = ftrace_trace_onoff_callback,
376 };
377
378 static struct ftrace_func_command ftrace_traceoff_cmd = {
379         .name                   = "traceoff",
380         .func                   = ftrace_trace_onoff_callback,
381 };
382
383 static int __init init_func_cmd_traceon(void)
384 {
385         int ret;
386
387         ret = register_ftrace_command(&ftrace_traceoff_cmd);
388         if (ret)
389                 return ret;
390
391         ret = register_ftrace_command(&ftrace_traceon_cmd);
392         if (ret)
393                 unregister_ftrace_command(&ftrace_traceoff_cmd);
394         return ret;
395 }
396 #else
397 static inline int init_func_cmd_traceon(void)
398 {
399         return 0;
400 }
401 #endif /* CONFIG_DYNAMIC_FTRACE */
402
403 static __init int init_function_trace(void)
404 {
405         init_func_cmd_traceon();
406         return register_tracer(&function_trace);
407 }
408 device_initcall(init_function_trace);
409