2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
18 #define STACK_TRACE_ENTRIES 500
20 #ifdef CC_USING_FENTRY
26 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
27 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
28 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
31 * Reserve one entry for the passed in ip. This will allow
32 * us to remove most or all of the stack size overhead
33 * added by the stack tracer itself.
35 static struct stack_trace max_stack_trace = {
36 .max_entries = STACK_TRACE_ENTRIES - 1,
37 .entries = &stack_dump_trace[1],
40 static unsigned long max_stack_size;
41 static arch_spinlock_t max_stack_lock =
42 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
44 static int stack_trace_disabled __read_mostly;
45 static DEFINE_PER_CPU(int, trace_active);
46 static DEFINE_MUTEX(stack_sysctl_mutex);
48 int stack_tracer_enabled;
49 static int last_stack_tracer_enabled;
52 check_stack(unsigned long ip, unsigned long *stack)
54 unsigned long this_size, flags;
55 unsigned long *p, *top, *start;
56 static int tracer_frame;
57 int frame_size = ACCESS_ONCE(tracer_frame);
60 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
61 this_size = THREAD_SIZE - this_size;
62 /* Remove the frame of the tracer */
63 this_size -= frame_size;
65 if (this_size <= max_stack_size)
68 /* we do not handle interrupt stacks yet */
69 if (!object_is_on_stack(stack))
72 local_irq_save(flags);
73 arch_spin_lock(&max_stack_lock);
75 /* In case another CPU set the tracer_frame on us */
76 if (unlikely(!frame_size))
77 this_size -= tracer_frame;
79 /* a race could have already updated it */
80 if (this_size <= max_stack_size)
83 max_stack_size = this_size;
85 max_stack_trace.nr_entries = 0;
86 max_stack_trace.skip = 3;
88 save_stack_trace(&max_stack_trace);
91 * Add the passed in ip from the function tracer.
92 * Searching for this on the stack will skip over
93 * most of the overhead from the stack tracer itself.
95 stack_dump_trace[0] = ip;
96 max_stack_trace.nr_entries++;
99 * Now find where in the stack these are.
103 top = (unsigned long *)
104 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
107 * Loop through all the entries. One of the entries may
108 * for some reason be missed on the stack, so we may
109 * have to account for them. If they are all there, this
110 * loop will only happen once. This code only takes place
111 * on a new max, so it is far from a fast path.
113 while (i < max_stack_trace.nr_entries) {
116 stack_dump_index[i] = this_size;
119 for (; p < top && i < max_stack_trace.nr_entries; p++) {
120 if (*p == stack_dump_trace[i]) {
121 this_size = stack_dump_index[i++] =
122 (top - p) * sizeof(unsigned long);
124 /* Start the search from here */
127 * We do not want to show the overhead
128 * of the stack tracer stack in the
129 * max stack. If we haven't figured
130 * out what that is, then figure it out
133 if (unlikely(!tracer_frame) && i == 1) {
134 tracer_frame = (p - stack) *
135 sizeof(unsigned long);
136 max_stack_size -= tracer_frame;
146 arch_spin_unlock(&max_stack_lock);
147 local_irq_restore(flags);
151 stack_trace_call(unsigned long ip, unsigned long parent_ip)
156 if (unlikely(!ftrace_enabled || stack_trace_disabled))
159 preempt_disable_notrace();
161 cpu = raw_smp_processor_id();
162 /* no atomic needed, we only modify this variable by this cpu */
163 if (per_cpu(trace_active, cpu)++ != 0)
167 * When fentry is used, the traced function does not get
168 * its stack frame set up, and we lose the parent.
169 * The ip is pretty useless because the function tracer
170 * was called before that function set up its stack frame.
171 * In this case, we use the parent ip.
173 * By adding the return address of either the parent ip
174 * or the current ip we can disregard most of the stack usage
175 * caused by the stack tracer itself.
177 * The function tracer always reports the address of where the
178 * mcount call was, but the stack will hold the return address.
183 ip += MCOUNT_INSN_SIZE;
185 check_stack(ip, &stack);
188 per_cpu(trace_active, cpu)--;
189 /* prevent recursion in schedule */
190 preempt_enable_notrace();
193 static struct ftrace_ops trace_ops __read_mostly =
195 .func = stack_trace_call,
196 .flags = FTRACE_OPS_FL_GLOBAL,
200 stack_max_size_read(struct file *filp, char __user *ubuf,
201 size_t count, loff_t *ppos)
203 unsigned long *ptr = filp->private_data;
207 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
210 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
214 stack_max_size_write(struct file *filp, const char __user *ubuf,
215 size_t count, loff_t *ppos)
217 long *ptr = filp->private_data;
218 unsigned long val, flags;
223 if (count >= sizeof(buf))
226 if (copy_from_user(&buf, ubuf, count))
231 ret = strict_strtoul(buf, 10, &val);
235 local_irq_save(flags);
238 * In case we trace inside arch_spin_lock() or after (NMI),
239 * we will cause circular lock, so we also need to increase
240 * the percpu trace_active here.
242 cpu = smp_processor_id();
243 per_cpu(trace_active, cpu)++;
245 arch_spin_lock(&max_stack_lock);
247 arch_spin_unlock(&max_stack_lock);
249 per_cpu(trace_active, cpu)--;
250 local_irq_restore(flags);
255 static const struct file_operations stack_max_size_fops = {
256 .open = tracing_open_generic,
257 .read = stack_max_size_read,
258 .write = stack_max_size_write,
259 .llseek = default_llseek,
263 __next(struct seq_file *m, loff_t *pos)
267 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
270 m->private = (void *)n;
275 t_next(struct seq_file *m, void *v, loff_t *pos)
278 return __next(m, pos);
281 static void *t_start(struct seq_file *m, loff_t *pos)
287 cpu = smp_processor_id();
288 per_cpu(trace_active, cpu)++;
290 arch_spin_lock(&max_stack_lock);
293 return SEQ_START_TOKEN;
295 return __next(m, pos);
298 static void t_stop(struct seq_file *m, void *p)
302 arch_spin_unlock(&max_stack_lock);
304 cpu = smp_processor_id();
305 per_cpu(trace_active, cpu)--;
310 static int trace_lookup_stack(struct seq_file *m, long i)
312 unsigned long addr = stack_dump_trace[i];
314 return seq_printf(m, "%pS\n", (void *)addr);
317 static void print_disabled(struct seq_file *m)
320 "# Stack tracer disabled\n"
322 "# To enable the stack tracer, either add 'stacktrace' to the\n"
323 "# kernel command line\n"
324 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
328 static int t_show(struct seq_file *m, void *v)
333 if (v == SEQ_START_TOKEN) {
334 seq_printf(m, " Depth Size Location"
336 " ----- ---- --------\n",
337 max_stack_trace.nr_entries - 1);
339 if (!stack_tracer_enabled && !max_stack_size)
347 if (i >= max_stack_trace.nr_entries ||
348 stack_dump_trace[i] == ULONG_MAX)
351 if (i+1 == max_stack_trace.nr_entries ||
352 stack_dump_trace[i+1] == ULONG_MAX)
353 size = stack_dump_index[i];
355 size = stack_dump_index[i] - stack_dump_index[i+1];
357 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
359 trace_lookup_stack(m, i);
364 static const struct seq_operations stack_trace_seq_ops = {
371 static int stack_trace_open(struct inode *inode, struct file *file)
373 return seq_open(file, &stack_trace_seq_ops);
376 static const struct file_operations stack_trace_fops = {
377 .open = stack_trace_open,
380 .release = seq_release,
384 stack_trace_sysctl(struct ctl_table *table, int write,
385 void __user *buffer, size_t *lenp,
390 mutex_lock(&stack_sysctl_mutex);
392 ret = proc_dointvec(table, write, buffer, lenp, ppos);
395 (last_stack_tracer_enabled == !!stack_tracer_enabled))
398 last_stack_tracer_enabled = !!stack_tracer_enabled;
400 if (stack_tracer_enabled)
401 register_ftrace_function(&trace_ops);
403 unregister_ftrace_function(&trace_ops);
406 mutex_unlock(&stack_sysctl_mutex);
410 static __init int enable_stacktrace(char *str)
412 stack_tracer_enabled = 1;
413 last_stack_tracer_enabled = 1;
416 __setup("stacktrace", enable_stacktrace);
418 static __init int stack_trace_init(void)
420 struct dentry *d_tracer;
422 d_tracer = tracing_init_dentry();
426 trace_create_file("stack_max_size", 0644, d_tracer,
427 &max_stack_size, &stack_max_size_fops);
429 trace_create_file("stack_trace", 0444, d_tracer,
430 NULL, &stack_trace_fops);
432 if (stack_tracer_enabled)
433 register_ftrace_function(&trace_ops);
438 device_initcall(stack_trace_init);