1 // SPDX-License-Identifier: GPL-2.0-only
3 * stacktrace.c : stacktracing APIs needed by rest of kernel
4 * (wrappers over ARC dwarf based unwinder)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
9 * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
10 * for displaying task's kernel mode call stack in /proc/<pid>/stack
11 * -Iterator based approach to have single copy of unwinding core and APIs
12 * needing unwinding, implement the logic in iterator regarding:
13 * = which frame onwards to start capture
14 * = which frame to stop capturing (wchan)
15 * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
18 * -Implemented correct versions of thread_saved_pc() and __get_wchan()
21 * -Initial implementation
24 #include <linux/ptrace.h>
25 #include <linux/export.h>
26 #include <linux/stacktrace.h>
27 #include <linux/kallsyms.h>
28 #include <linux/sched/debug.h>
30 #include <asm/arcregs.h>
31 #include <asm/unwind.h>
32 #include <asm/stacktrace.h>
33 #include <asm/switch_to.h>
35 /*-------------------------------------------------------------------------
37 *-------------------------------------------------------------------------
40 #ifdef CONFIG_ARC_DW2_UNWIND
43 seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
44 struct unwind_frame_info *frame_info)
48 * Asynchronous unwinding of intr/exception
49 * - Just uses the pt_regs passed
51 frame_info->task = tsk;
53 frame_info->regs.r27 = regs->fp;
54 frame_info->regs.r28 = regs->sp;
55 frame_info->regs.r31 = regs->blink;
56 frame_info->regs.r63 = regs->ret;
57 frame_info->call_frame = 0;
58 } else if (tsk == NULL || tsk == current) {
60 * synchronous unwinding (e.g. dump_stack)
61 * - uses current values of SP and friends
63 unsigned long fp, sp, blink, ret;
64 frame_info->task = current;
71 : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
74 frame_info->regs.r27 = fp;
75 frame_info->regs.r28 = sp;
76 frame_info->regs.r31 = blink;
77 frame_info->regs.r63 = ret;
78 frame_info->call_frame = 0;
81 * Asynchronous unwinding of a likely sleeping task
82 * - first ensure it is actually sleeping
83 * - if so, it will be in __switch_to, kernel mode SP of task
84 * is safe-kept and BLINK at a well known location in there
87 if (task_is_running(tsk))
90 frame_info->task = tsk;
92 frame_info->regs.r27 = TSK_K_FP(tsk);
93 frame_info->regs.r28 = TSK_K_ESP(tsk);
94 frame_info->regs.r31 = TSK_K_BLINK(tsk);
95 frame_info->regs.r63 = (unsigned int)__switch_to;
97 /* In the prologue of __switch_to, first FP is saved on stack
98 * and then SP is copied to FP. Dwarf assumes cfa as FP based
99 * but we didn't save FP. The value retrieved above is FP's
100 * state in previous frame.
101 * As a work around for this, we unwind from __switch_to start
102 * and adjust SP accordingly. The other limitation is that
103 * __switch_to macro is dwarf rules are not generated for inline
106 frame_info->regs.r27 = 0;
107 frame_info->regs.r28 += 60;
108 frame_info->call_frame = 0;
116 notrace noinline unsigned int
117 arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
118 int (*consumer_fn) (unsigned int, void *), void *arg)
120 #ifdef CONFIG_ARC_DW2_UNWIND
121 int ret = 0, cnt = 0;
122 unsigned int address;
123 struct unwind_frame_info frame_info;
125 if (seed_unwind_frame_info(tsk, regs, &frame_info))
129 address = UNW_PC(&frame_info);
131 if (!address || !__kernel_text_address(address))
134 if (consumer_fn(address, arg) == -1)
137 ret = arc_unwind(&frame_info);
141 frame_info.regs.r63 = frame_info.regs.r31;
144 printk("unwinder looping too long, aborting !\n");
149 return address; /* return the last address it saw */
151 /* On ARC, only Dward based unwinder works. fp based backtracing is
152 * not possible (-fno-omit-frame-pointer) because of the way function
153 * prologue is setup (callee regs saved and then fp set and not other
156 pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
162 /*-------------------------------------------------------------------------
163 * callbacks called by unwinder iterator to implement kernel APIs
165 * The callback can return -1 to force the iterator to stop, which by default
166 * keeps going till the bottom-most frame.
167 *-------------------------------------------------------------------------
170 /* Call-back which plugs into unwinding core to dump the stack in
171 * case of panic/OOPs/BUG etc
173 static int __print_sym(unsigned int address, void *arg)
175 const char *loglvl = arg;
177 printk("%s %pS\n", loglvl, (void *)address);
181 #ifdef CONFIG_STACKTRACE
183 /* Call-back which plugs into unwinding core to capture the
184 * traces needed by kernel on /proc/<pid>/stack
186 static int __collect_all(unsigned int address, void *arg)
188 struct stack_trace *trace = arg;
193 trace->entries[trace->nr_entries++] = address;
195 if (trace->nr_entries >= trace->max_entries)
201 static int __collect_all_but_sched(unsigned int address, void *arg)
203 struct stack_trace *trace = arg;
205 if (in_sched_functions(address))
211 trace->entries[trace->nr_entries++] = address;
213 if (trace->nr_entries >= trace->max_entries)
221 static int __get_first_nonsched(unsigned int address, void *unused)
223 if (in_sched_functions(address))
229 /*-------------------------------------------------------------------------
230 * APIs expected by various kernel sub-systems
231 *-------------------------------------------------------------------------
234 noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
237 printk("%s\nStack Trace:\n", loglvl);
238 arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
240 EXPORT_SYMBOL(show_stacktrace);
242 /* Expected by sched Code */
243 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
245 show_stacktrace(tsk, NULL, loglvl);
248 /* Another API expected by schedular, shows up in "ps" as Wait Channel
249 * Of course just returning schedule( ) would be pointless so unwind until
250 * the function is not in schedular code
252 unsigned int __get_wchan(struct task_struct *tsk)
254 return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
257 #ifdef CONFIG_STACKTRACE
260 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
261 * A typical use is when /proc/<pid>/stack is queried by userland
263 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
265 /* Assumes @tsk is sleeping so unwinds from __switch_to */
266 arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
269 void save_stack_trace(struct stack_trace *trace)
271 /* Pass NULL for task so it unwinds the current call frame */
272 arc_unwind_core(NULL, NULL, __collect_all, trace);
274 EXPORT_SYMBOL_GPL(save_stack_trace);