2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
20 /* function tracing enabled */
21 static int ftrace_function_enabled;
23 static struct trace_array *func_trace;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array *tr)
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
39 static void function_trace_reset(struct trace_array *tr)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(tr);
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op)
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
61 if (unlikely(!ftrace_function_enabled))
65 preempt_disable_notrace();
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
69 disabled = atomic_inc_return(&data->disabled);
71 if (likely(disabled == 1))
72 trace_function(tr, ip, parent_ip, flags, pc);
74 atomic_dec(&data->disabled);
75 preempt_enable_notrace();
79 function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op)
82 struct trace_array *tr = func_trace;
83 struct trace_array_cpu *data;
89 if (unlikely(!ftrace_function_enabled))
93 * Need to use raw, since this must be called before the
94 * recursive protection is performed.
96 local_irq_save(flags);
97 cpu = raw_smp_processor_id();
99 disabled = atomic_inc_return(&data->disabled);
101 if (likely(disabled == 1)) {
102 pc = preempt_count();
103 trace_function(tr, ip, parent_ip, flags, pc);
106 atomic_dec(&data->disabled);
107 local_irq_restore(flags);
111 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op)
114 struct trace_array *tr = func_trace;
115 struct trace_array_cpu *data;
121 if (unlikely(!ftrace_function_enabled))
125 * Need to use raw, since this must be called before the
126 * recursive protection is performed.
128 local_irq_save(flags);
129 cpu = raw_smp_processor_id();
130 data = tr->data[cpu];
131 disabled = atomic_inc_return(&data->disabled);
133 if (likely(disabled == 1)) {
134 pc = preempt_count();
135 trace_function(tr, ip, parent_ip, flags, pc);
138 * __ftrace_trace_stack,
140 * function_stack_trace_call
144 __trace_stack(tr, flags, 5, pc);
147 atomic_dec(&data->disabled);
148 local_irq_restore(flags);
152 static struct ftrace_ops trace_ops __read_mostly =
154 .func = function_trace_call,
155 .flags = FTRACE_OPS_FL_GLOBAL,
158 static struct ftrace_ops trace_stack_ops __read_mostly =
160 .func = function_stack_trace_call,
161 .flags = FTRACE_OPS_FL_GLOBAL,
164 /* Our two options */
166 TRACE_FUNC_OPT_STACK = 0x1,
169 static struct tracer_opt func_opts[] = {
170 #ifdef CONFIG_STACKTRACE
171 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
173 { } /* Always set a last empty entry */
176 static struct tracer_flags func_flags = {
177 .val = 0, /* By default: all flags disabled */
181 static void tracing_start_function_trace(void)
183 ftrace_function_enabled = 0;
185 if (trace_flags & TRACE_ITER_PREEMPTONLY)
186 trace_ops.func = function_trace_call_preempt_only;
188 trace_ops.func = function_trace_call;
190 if (func_flags.val & TRACE_FUNC_OPT_STACK)
191 register_ftrace_function(&trace_stack_ops);
193 register_ftrace_function(&trace_ops);
195 ftrace_function_enabled = 1;
198 static void tracing_stop_function_trace(void)
200 ftrace_function_enabled = 0;
202 if (func_flags.val & TRACE_FUNC_OPT_STACK)
203 unregister_ftrace_function(&trace_stack_ops);
205 unregister_ftrace_function(&trace_ops);
208 static int func_set_flag(u32 old_flags, u32 bit, int set)
210 if (bit == TRACE_FUNC_OPT_STACK) {
211 /* do nothing if already set */
212 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
216 unregister_ftrace_function(&trace_ops);
217 register_ftrace_function(&trace_stack_ops);
219 unregister_ftrace_function(&trace_stack_ops);
220 register_ftrace_function(&trace_ops);
229 static struct tracer function_trace __read_mostly =
232 .init = function_trace_init,
233 .reset = function_trace_reset,
234 .start = function_trace_start,
235 .wait_pipe = poll_wait_pipe,
236 .flags = &func_flags,
237 .set_flag = func_set_flag,
238 #ifdef CONFIG_FTRACE_SELFTEST
239 .selftest = trace_selftest_startup_function,
243 #ifdef CONFIG_DYNAMIC_FTRACE
245 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
247 long *count = (long *)data;
262 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
264 long *count = (long *)data;
266 if (!tracing_is_on())
279 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
280 struct ftrace_probe_ops *ops, void *data);
282 static struct ftrace_probe_ops traceon_probe_ops = {
283 .func = ftrace_traceon,
284 .print = ftrace_trace_onoff_print,
287 static struct ftrace_probe_ops traceoff_probe_ops = {
288 .func = ftrace_traceoff,
289 .print = ftrace_trace_onoff_print,
293 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
294 struct ftrace_probe_ops *ops, void *data)
296 long count = (long)data;
298 seq_printf(m, "%ps:", (void *)ip);
300 if (ops == &traceon_probe_ops)
301 seq_printf(m, "traceon");
303 seq_printf(m, "traceoff");
306 seq_printf(m, ":unlimited\n");
308 seq_printf(m, ":count=%ld\n", count);
314 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
316 struct ftrace_probe_ops *ops;
318 /* we register both traceon and traceoff to this callback */
319 if (strcmp(cmd, "traceon") == 0)
320 ops = &traceon_probe_ops;
322 ops = &traceoff_probe_ops;
324 unregister_ftrace_function_probe_func(glob, ops);
330 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
331 char *glob, char *cmd, char *param, int enable)
333 struct ftrace_probe_ops *ops;
334 void *count = (void *)-1;
338 /* hash funcs only work with set_ftrace_filter */
343 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
345 /* we register both traceon and traceoff to this callback */
346 if (strcmp(cmd, "traceon") == 0)
347 ops = &traceon_probe_ops;
349 ops = &traceoff_probe_ops;
354 number = strsep(¶m, ":");
360 * We use the callback data field (which is a pointer)
363 ret = strict_strtoul(number, 0, (unsigned long *)&count);
368 ret = register_ftrace_function_probe(glob, ops, count);
370 return ret < 0 ? ret : 0;
373 static struct ftrace_func_command ftrace_traceon_cmd = {
375 .func = ftrace_trace_onoff_callback,
378 static struct ftrace_func_command ftrace_traceoff_cmd = {
380 .func = ftrace_trace_onoff_callback,
383 static int __init init_func_cmd_traceon(void)
387 ret = register_ftrace_command(&ftrace_traceoff_cmd);
391 ret = register_ftrace_command(&ftrace_traceon_cmd);
393 unregister_ftrace_command(&ftrace_traceoff_cmd);
397 static inline int init_func_cmd_traceon(void)
401 #endif /* CONFIG_DYNAMIC_FTRACE */
403 static __init int init_function_trace(void)
405 init_func_cmd_traceon();
406 return register_tracer(&function_trace);
408 device_initcall(init_function_trace);