2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
18 #define STACK_TRACE_ENTRIES 500
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24 static struct stack_trace max_stack_trace = {
25 .max_entries = STACK_TRACE_ENTRIES,
26 .entries = stack_dump_trace,
29 static unsigned long max_stack_size;
30 static arch_spinlock_t max_stack_lock =
31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 static int stack_trace_disabled __read_mostly;
34 static DEFINE_PER_CPU(int, trace_active);
35 static DEFINE_MUTEX(stack_sysctl_mutex);
37 int stack_tracer_enabled;
38 static int last_stack_tracer_enabled;
40 static inline void check_stack(void)
42 unsigned long this_size, flags;
43 unsigned long *p, *top, *start;
46 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
47 this_size = THREAD_SIZE - this_size;
49 if (this_size <= max_stack_size)
52 /* we do not handle interrupt stacks yet */
53 if (!object_is_on_stack(&this_size))
56 local_irq_save(flags);
57 arch_spin_lock(&max_stack_lock);
59 /* a race could have already updated it */
60 if (this_size <= max_stack_size)
63 max_stack_size = this_size;
65 max_stack_trace.nr_entries = 0;
66 max_stack_trace.skip = 3;
68 save_stack_trace(&max_stack_trace);
71 * Now find where in the stack these are.
75 top = (unsigned long *)
76 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
79 * Loop through all the entries. One of the entries may
80 * for some reason be missed on the stack, so we may
81 * have to account for them. If they are all there, this
82 * loop will only happen once. This code only takes place
83 * on a new max, so it is far from a fast path.
85 while (i < max_stack_trace.nr_entries) {
88 stack_dump_index[i] = this_size;
91 for (; p < top && i < max_stack_trace.nr_entries; p++) {
92 if (*p == stack_dump_trace[i]) {
93 this_size = stack_dump_index[i++] =
94 (top - p) * sizeof(unsigned long);
96 /* Start the search from here */
106 arch_spin_unlock(&max_stack_lock);
107 local_irq_restore(flags);
111 stack_trace_call(unsigned long ip, unsigned long parent_ip)
115 if (unlikely(!ftrace_enabled || stack_trace_disabled))
118 preempt_disable_notrace();
120 cpu = raw_smp_processor_id();
121 /* no atomic needed, we only modify this variable by this cpu */
122 if (per_cpu(trace_active, cpu)++ != 0)
128 per_cpu(trace_active, cpu)--;
129 /* prevent recursion in schedule */
130 preempt_enable_notrace();
133 static struct ftrace_ops trace_ops __read_mostly =
135 .func = stack_trace_call,
139 stack_max_size_read(struct file *filp, char __user *ubuf,
140 size_t count, loff_t *ppos)
142 unsigned long *ptr = filp->private_data;
146 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
149 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
153 stack_max_size_write(struct file *filp, const char __user *ubuf,
154 size_t count, loff_t *ppos)
156 long *ptr = filp->private_data;
157 unsigned long val, flags;
162 if (count >= sizeof(buf))
165 if (copy_from_user(&buf, ubuf, count))
170 ret = strict_strtoul(buf, 10, &val);
174 local_irq_save(flags);
177 * In case we trace inside arch_spin_lock() or after (NMI),
178 * we will cause circular lock, so we also need to increase
179 * the percpu trace_active here.
181 cpu = smp_processor_id();
182 per_cpu(trace_active, cpu)++;
184 arch_spin_lock(&max_stack_lock);
186 arch_spin_unlock(&max_stack_lock);
188 per_cpu(trace_active, cpu)--;
189 local_irq_restore(flags);
194 static const struct file_operations stack_max_size_fops = {
195 .open = tracing_open_generic,
196 .read = stack_max_size_read,
197 .write = stack_max_size_write,
198 .llseek = default_llseek,
202 __next(struct seq_file *m, loff_t *pos)
206 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
209 m->private = (void *)n;
214 t_next(struct seq_file *m, void *v, loff_t *pos)
217 return __next(m, pos);
220 static void *t_start(struct seq_file *m, loff_t *pos)
226 cpu = smp_processor_id();
227 per_cpu(trace_active, cpu)++;
229 arch_spin_lock(&max_stack_lock);
232 return SEQ_START_TOKEN;
234 return __next(m, pos);
237 static void t_stop(struct seq_file *m, void *p)
241 arch_spin_unlock(&max_stack_lock);
243 cpu = smp_processor_id();
244 per_cpu(trace_active, cpu)--;
249 static int trace_lookup_stack(struct seq_file *m, long i)
251 unsigned long addr = stack_dump_trace[i];
253 return seq_printf(m, "%pS\n", (void *)addr);
256 static void print_disabled(struct seq_file *m)
259 "# Stack tracer disabled\n"
261 "# To enable the stack tracer, either add 'stacktrace' to the\n"
262 "# kernel command line\n"
263 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
267 static int t_show(struct seq_file *m, void *v)
272 if (v == SEQ_START_TOKEN) {
273 seq_printf(m, " Depth Size Location"
275 " ----- ---- --------\n",
276 max_stack_trace.nr_entries - 1);
278 if (!stack_tracer_enabled && !max_stack_size)
286 if (i >= max_stack_trace.nr_entries ||
287 stack_dump_trace[i] == ULONG_MAX)
290 if (i+1 == max_stack_trace.nr_entries ||
291 stack_dump_trace[i+1] == ULONG_MAX)
292 size = stack_dump_index[i];
294 size = stack_dump_index[i] - stack_dump_index[i+1];
296 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
298 trace_lookup_stack(m, i);
303 static const struct seq_operations stack_trace_seq_ops = {
310 static int stack_trace_open(struct inode *inode, struct file *file)
312 return seq_open(file, &stack_trace_seq_ops);
315 static const struct file_operations stack_trace_fops = {
316 .open = stack_trace_open,
319 .release = seq_release,
323 stack_trace_sysctl(struct ctl_table *table, int write,
324 void __user *buffer, size_t *lenp,
329 mutex_lock(&stack_sysctl_mutex);
331 ret = proc_dointvec(table, write, buffer, lenp, ppos);
334 (last_stack_tracer_enabled == !!stack_tracer_enabled))
337 last_stack_tracer_enabled = !!stack_tracer_enabled;
339 if (stack_tracer_enabled)
340 register_ftrace_function(&trace_ops);
342 unregister_ftrace_function(&trace_ops);
345 mutex_unlock(&stack_sysctl_mutex);
349 static __init int enable_stacktrace(char *str)
351 stack_tracer_enabled = 1;
352 last_stack_tracer_enabled = 1;
355 __setup("stacktrace", enable_stacktrace);
357 static __init int stack_trace_init(void)
359 struct dentry *d_tracer;
361 d_tracer = tracing_init_dentry();
363 trace_create_file("stack_max_size", 0644, d_tracer,
364 &max_stack_size, &stack_max_size_fops);
366 trace_create_file("stack_trace", 0444, d_tracer,
367 NULL, &stack_trace_fops);
369 if (stack_tracer_enabled)
370 register_ftrace_function(&trace_ops);
375 device_initcall(stack_trace_init);