2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
17 #include <asm/setup.h>
21 #define STACK_TRACE_ENTRIES 500
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
24 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
25 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
27 static struct stack_trace max_stack_trace = {
28 .max_entries = STACK_TRACE_ENTRIES,
29 .entries = stack_dump_trace,
32 static unsigned long max_stack_size;
33 static arch_spinlock_t max_stack_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36 static DEFINE_PER_CPU(int, trace_active);
37 static DEFINE_MUTEX(stack_sysctl_mutex);
39 int stack_tracer_enabled;
40 static int last_stack_tracer_enabled;
42 static inline void check_stack(void)
44 unsigned long this_size, flags;
45 unsigned long *p, *top, *start;
48 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
49 this_size = THREAD_SIZE - this_size;
51 if (this_size <= max_stack_size)
54 /* we do not handle interrupt stacks yet */
55 if (!object_is_on_stack(&this_size))
58 local_irq_save(flags);
59 arch_spin_lock(&max_stack_lock);
61 /* a race could have already updated it */
62 if (this_size <= max_stack_size)
65 max_stack_size = this_size;
67 max_stack_trace.nr_entries = 0;
68 max_stack_trace.skip = 3;
70 save_stack_trace(&max_stack_trace);
73 * Now find where in the stack these are.
77 top = (unsigned long *)
78 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
81 * Loop through all the entries. One of the entries may
82 * for some reason be missed on the stack, so we may
83 * have to account for them. If they are all there, this
84 * loop will only happen once. This code only takes place
85 * on a new max, so it is far from a fast path.
87 while (i < max_stack_trace.nr_entries) {
90 stack_dump_index[i] = this_size;
93 for (; p < top && i < max_stack_trace.nr_entries; p++) {
94 if (*p == stack_dump_trace[i]) {
95 this_size = stack_dump_index[i++] =
96 (top - p) * sizeof(unsigned long);
98 /* Start the search from here */
108 arch_spin_unlock(&max_stack_lock);
109 local_irq_restore(flags);
113 stack_trace_call(unsigned long ip, unsigned long parent_ip,
114 struct ftrace_ops *op, struct pt_regs *pt_regs)
118 preempt_disable_notrace();
120 cpu = raw_smp_processor_id();
121 /* no atomic needed, we only modify this variable by this cpu */
122 if (per_cpu(trace_active, cpu)++ != 0)
128 per_cpu(trace_active, cpu)--;
129 /* prevent recursion in schedule */
130 preempt_enable_notrace();
133 static struct ftrace_ops trace_ops __read_mostly =
135 .func = stack_trace_call,
136 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
140 stack_max_size_read(struct file *filp, char __user *ubuf,
141 size_t count, loff_t *ppos)
143 unsigned long *ptr = filp->private_data;
147 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
150 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
154 stack_max_size_write(struct file *filp, const char __user *ubuf,
155 size_t count, loff_t *ppos)
157 long *ptr = filp->private_data;
158 unsigned long val, flags;
162 ret = kstrtoul_from_user(ubuf, count, 10, &val);
166 local_irq_save(flags);
169 * In case we trace inside arch_spin_lock() or after (NMI),
170 * we will cause circular lock, so we also need to increase
171 * the percpu trace_active here.
173 cpu = smp_processor_id();
174 per_cpu(trace_active, cpu)++;
176 arch_spin_lock(&max_stack_lock);
178 arch_spin_unlock(&max_stack_lock);
180 per_cpu(trace_active, cpu)--;
181 local_irq_restore(flags);
186 static const struct file_operations stack_max_size_fops = {
187 .open = tracing_open_generic,
188 .read = stack_max_size_read,
189 .write = stack_max_size_write,
190 .llseek = default_llseek,
194 __next(struct seq_file *m, loff_t *pos)
198 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
201 m->private = (void *)n;
206 t_next(struct seq_file *m, void *v, loff_t *pos)
209 return __next(m, pos);
212 static void *t_start(struct seq_file *m, loff_t *pos)
218 cpu = smp_processor_id();
219 per_cpu(trace_active, cpu)++;
221 arch_spin_lock(&max_stack_lock);
224 return SEQ_START_TOKEN;
226 return __next(m, pos);
229 static void t_stop(struct seq_file *m, void *p)
233 arch_spin_unlock(&max_stack_lock);
235 cpu = smp_processor_id();
236 per_cpu(trace_active, cpu)--;
241 static int trace_lookup_stack(struct seq_file *m, long i)
243 unsigned long addr = stack_dump_trace[i];
245 return seq_printf(m, "%pS\n", (void *)addr);
248 static void print_disabled(struct seq_file *m)
251 "# Stack tracer disabled\n"
253 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254 "# kernel command line\n"
255 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
259 static int t_show(struct seq_file *m, void *v)
264 if (v == SEQ_START_TOKEN) {
265 seq_printf(m, " Depth Size Location"
267 " ----- ---- --------\n",
268 max_stack_trace.nr_entries - 1);
270 if (!stack_tracer_enabled && !max_stack_size)
278 if (i >= max_stack_trace.nr_entries ||
279 stack_dump_trace[i] == ULONG_MAX)
282 if (i+1 == max_stack_trace.nr_entries ||
283 stack_dump_trace[i+1] == ULONG_MAX)
284 size = stack_dump_index[i];
286 size = stack_dump_index[i] - stack_dump_index[i+1];
288 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
290 trace_lookup_stack(m, i);
295 static const struct seq_operations stack_trace_seq_ops = {
302 static int stack_trace_open(struct inode *inode, struct file *file)
304 return seq_open(file, &stack_trace_seq_ops);
307 static const struct file_operations stack_trace_fops = {
308 .open = stack_trace_open,
311 .release = seq_release,
315 stack_trace_filter_open(struct inode *inode, struct file *file)
317 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
321 static const struct file_operations stack_trace_filter_fops = {
322 .open = stack_trace_filter_open,
324 .write = ftrace_filter_write,
325 .llseek = ftrace_regex_lseek,
326 .release = ftrace_regex_release,
330 stack_trace_sysctl(struct ctl_table *table, int write,
331 void __user *buffer, size_t *lenp,
336 mutex_lock(&stack_sysctl_mutex);
338 ret = proc_dointvec(table, write, buffer, lenp, ppos);
341 (last_stack_tracer_enabled == !!stack_tracer_enabled))
344 last_stack_tracer_enabled = !!stack_tracer_enabled;
346 if (stack_tracer_enabled)
347 register_ftrace_function(&trace_ops);
349 unregister_ftrace_function(&trace_ops);
352 mutex_unlock(&stack_sysctl_mutex);
356 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
358 static __init int enable_stacktrace(char *str)
360 if (strncmp(str, "_filter=", 8) == 0)
361 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
363 stack_tracer_enabled = 1;
364 last_stack_tracer_enabled = 1;
367 __setup("stacktrace", enable_stacktrace);
369 static __init int stack_trace_init(void)
371 struct dentry *d_tracer;
373 d_tracer = tracing_init_dentry();
375 trace_create_file("stack_max_size", 0644, d_tracer,
376 &max_stack_size, &stack_max_size_fops);
378 trace_create_file("stack_trace", 0444, d_tracer,
379 NULL, &stack_trace_fops);
381 trace_create_file("stack_trace_filter", 0444, d_tracer,
382 NULL, &stack_trace_filter_fops);
384 if (stack_trace_filter_buf[0])
385 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
387 if (stack_tracer_enabled)
388 register_ftrace_function(&trace_ops);
393 device_initcall(stack_trace_init);