1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/objtool.h>
3 #include <linux/module.h>
4 #include <linux/sort.h>
5 #include <asm/ptrace.h>
6 #include <asm/stacktrace.h>
7 #include <asm/unwind.h>
8 #include <asm/orc_types.h>
9 #include <asm/orc_lookup.h>
11 #define orc_warn(fmt, ...) \
12 printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
14 #define orc_warn_current(args...) \
16 if (state->task == current) \
20 extern int __start_orc_unwind_ip[];
21 extern int __stop_orc_unwind_ip[];
22 extern struct orc_entry __start_orc_unwind[];
23 extern struct orc_entry __stop_orc_unwind[];
25 static bool orc_init __ro_after_init;
26 static unsigned int lookup_num_blocks __ro_after_init;
28 static inline unsigned long orc_ip(const int *ip)
30 return (unsigned long)ip + *ip;
33 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
34 unsigned int num_entries, unsigned long ip)
36 int *first = ip_table;
37 int *last = ip_table + num_entries - 1;
38 int *mid = first, *found = first;
44 * Do a binary range search to find the rightmost duplicate of a given
45 * starting address. Some entries are section terminators which are
46 * "weak" entries for ensuring there are no gaps. They should be
47 * ignored when they conflict with a real entry.
49 while (first <= last) {
50 mid = first + ((last - first) / 2);
52 if (orc_ip(mid) <= ip) {
59 return u_table + (found - ip_table);
63 static struct orc_entry *orc_module_find(unsigned long ip)
67 mod = __module_address(ip);
68 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
70 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
71 mod->arch.num_orcs, ip);
74 static struct orc_entry *orc_module_find(unsigned long ip)
80 #ifdef CONFIG_DYNAMIC_FTRACE
81 static struct orc_entry *orc_find(unsigned long ip);
84 * Ftrace dynamic trampolines do not have orc entries of their own.
85 * But they are copies of the ftrace entries that are static and
86 * defined in ftrace_*.S, which do have orc entries.
88 * If the unwinder comes across a ftrace trampoline, then find the
89 * ftrace function that was used to create it, and use that ftrace
90 * function's orc entry, as the placement of the return code in
91 * the stack will be identical.
93 static struct orc_entry *orc_ftrace_find(unsigned long ip)
95 struct ftrace_ops *ops;
98 ops = ftrace_ops_trampoline(ip);
102 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
103 caller = (unsigned long)ftrace_regs_call;
105 caller = (unsigned long)ftrace_call;
107 /* Prevent unlikely recursion */
111 return orc_find(caller);
114 static struct orc_entry *orc_ftrace_find(unsigned long ip)
121 * If we crash with IP==0, the last successfully executed instruction
122 * was probably an indirect function call with a NULL function pointer,
123 * and we don't have unwind information for NULL.
124 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
125 * pointer into its parent and then continue normally from there.
127 static struct orc_entry null_orc_entry = {
128 .sp_offset = sizeof(long),
129 .sp_reg = ORC_REG_SP,
130 .bp_reg = ORC_REG_UNDEFINED,
131 .type = UNWIND_HINT_TYPE_CALL
134 /* Fake frame pointer entry -- used as a fallback for generated code */
135 static struct orc_entry orc_fp_entry = {
136 .type = UNWIND_HINT_TYPE_CALL,
137 .sp_reg = ORC_REG_BP,
139 .bp_reg = ORC_REG_PREV_SP,
144 static struct orc_entry *orc_find(unsigned long ip)
146 static struct orc_entry *orc;
149 return &null_orc_entry;
151 /* For non-init vmlinux addresses, use the fast lookup table: */
152 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
153 unsigned int idx, start, stop;
155 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
157 if (unlikely((idx >= lookup_num_blocks-1))) {
158 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
159 idx, lookup_num_blocks, (void *)ip);
163 start = orc_lookup[idx];
164 stop = orc_lookup[idx + 1] + 1;
166 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
167 (__start_orc_unwind + stop > __stop_orc_unwind))) {
168 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
169 idx, lookup_num_blocks, start, stop, (void *)ip);
173 return __orc_find(__start_orc_unwind_ip + start,
174 __start_orc_unwind + start, stop - start, ip);
177 /* vmlinux .init slow lookup: */
178 if (init_kernel_text(ip))
179 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
180 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
183 orc = orc_module_find(ip);
187 return orc_ftrace_find(ip);
190 #ifdef CONFIG_MODULES
192 static DEFINE_MUTEX(sort_mutex);
193 static int *cur_orc_ip_table = __start_orc_unwind_ip;
194 static struct orc_entry *cur_orc_table = __start_orc_unwind;
196 static void orc_sort_swap(void *_a, void *_b, int size)
198 struct orc_entry *orc_a, *orc_b;
199 struct orc_entry orc_tmp;
200 int *a = _a, *b = _b, tmp;
203 /* Swap the .orc_unwind_ip entries: */
208 /* Swap the corresponding .orc_unwind entries: */
209 orc_a = cur_orc_table + (a - cur_orc_ip_table);
210 orc_b = cur_orc_table + (b - cur_orc_ip_table);
216 static int orc_sort_cmp(const void *_a, const void *_b)
218 struct orc_entry *orc_a;
219 const int *a = _a, *b = _b;
220 unsigned long a_val = orc_ip(a);
221 unsigned long b_val = orc_ip(b);
229 * The "weak" section terminator entries need to always be on the left
230 * to ensure the lookup code skips them in favor of real entries.
231 * These terminator entries exist to handle any gaps created by
232 * whitelisted .o files which didn't get objtool generation.
234 orc_a = cur_orc_table + (a - cur_orc_ip_table);
235 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
238 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
239 void *_orc, size_t orc_size)
241 int *orc_ip = _orc_ip;
242 struct orc_entry *orc = _orc;
243 unsigned int num_entries = orc_ip_size / sizeof(int);
245 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
246 orc_size % sizeof(*orc) != 0 ||
247 num_entries != orc_size / sizeof(*orc));
250 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
251 * associate an .orc_unwind_ip table entry with its corresponding
252 * .orc_unwind entry so they can both be swapped.
254 mutex_lock(&sort_mutex);
255 cur_orc_ip_table = orc_ip;
257 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
258 mutex_unlock(&sort_mutex);
260 mod->arch.orc_unwind_ip = orc_ip;
261 mod->arch.orc_unwind = orc;
262 mod->arch.num_orcs = num_entries;
266 void __init unwind_init(void)
268 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
269 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
270 size_t num_entries = orc_ip_size / sizeof(int);
271 struct orc_entry *orc;
274 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
275 orc_size % sizeof(struct orc_entry) != 0 ||
276 num_entries != orc_size / sizeof(struct orc_entry)) {
277 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
282 * Note, the orc_unwind and orc_unwind_ip tables were already
283 * sorted at build time via the 'sorttable' tool.
284 * It's ready for binary search straight away, no need to sort it.
287 /* Initialize the fast lookup table: */
288 lookup_num_blocks = orc_lookup_end - orc_lookup;
289 for (i = 0; i < lookup_num_blocks-1; i++) {
290 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
292 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
294 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
298 orc_lookup[i] = orc - __start_orc_unwind;
301 /* Initialize the ending block: */
302 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
305 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
308 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
313 unsigned long unwind_get_return_address(struct unwind_state *state)
315 if (unwind_done(state))
318 return __kernel_text_address(state->ip) ? state->ip : 0;
320 EXPORT_SYMBOL_GPL(unwind_get_return_address);
322 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
324 if (unwind_done(state))
328 return &state->regs->ip;
331 return (unsigned long *)state->sp - 1;
336 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
339 struct stack_info *info = &state->stack_info;
340 void *addr = (void *)_addr;
342 if (!on_stack(info, addr, len) &&
343 (get_stack_info(addr, state->task, info, &state->stack_mask)))
349 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
352 if (!stack_access_ok(state, addr, sizeof(long)))
355 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
359 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
360 unsigned long *ip, unsigned long *sp)
362 struct pt_regs *regs = (struct pt_regs *)addr;
364 /* x86-32 support will be more complicated due to the ®s->sp hack */
365 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
367 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
375 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
376 unsigned long *ip, unsigned long *sp)
378 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
380 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
389 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
390 * value from state->regs.
392 * Otherwise, if state->regs just points to IRET regs, and the previous frame
393 * had full regs, it's safe to get the value from the previous regs. This can
394 * happen when early/late IRQ entry code gets interrupted by an NMI.
396 static bool get_reg(struct unwind_state *state, unsigned int reg_off,
399 unsigned int reg = reg_off/8;
404 if (state->full_regs) {
405 *val = ((unsigned long *)state->regs)[reg];
409 if (state->prev_regs) {
410 *val = ((unsigned long *)state->prev_regs)[reg];
417 bool unwind_next_frame(struct unwind_state *state)
419 unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
420 enum stack_type prev_type = state->stack_info.type;
421 struct orc_entry *orc;
422 bool indirect = false;
424 if (unwind_done(state))
427 /* Don't let modules unload while we're reading their ORC data. */
430 /* End-of-stack check for user tasks: */
431 if (state->regs && user_mode(state->regs))
435 * Find the orc_entry associated with the text address.
437 * For a call frame (as opposed to a signal frame), state->ip points to
438 * the instruction after the call. That instruction's stack layout
439 * could be different from the call instruction's layout, for example
440 * if the call was to a noreturn function. So get the ORC data for the
441 * call instruction itself.
443 orc = orc_find(state->signal ? state->ip : state->ip - 1);
446 * As a fallback, try to assume this code uses a frame pointer.
447 * This is useful for generated code, like BPF, which ORC
448 * doesn't know about. This is just a guess, so the rest of
449 * the unwind is no longer considered reliable.
455 /* End-of-stack check for kernel threads: */
456 if (orc->sp_reg == ORC_REG_UNDEFINED) {
463 /* Find the previous frame's stack: */
464 switch (orc->sp_reg) {
466 sp = state->sp + orc->sp_offset;
470 sp = state->bp + orc->sp_offset;
473 case ORC_REG_SP_INDIRECT:
474 sp = state->sp + orc->sp_offset;
478 case ORC_REG_BP_INDIRECT:
479 sp = state->bp + orc->sp_offset;
484 if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
485 orc_warn_current("missing R10 value at %pB\n",
492 if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
493 orc_warn_current("missing R13 value at %pB\n",
500 if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
501 orc_warn_current("missing RDI value at %pB\n",
508 if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
509 orc_warn_current("missing DX value at %pB\n",
516 orc_warn("unknown SP base reg %d at %pB\n",
517 orc->sp_reg, (void *)state->ip);
522 if (!deref_stack_reg(state, sp, &sp))
526 /* Find IP, SP and possibly regs: */
528 case UNWIND_HINT_TYPE_CALL:
529 ip_p = sp - sizeof(long);
531 if (!deref_stack_reg(state, ip_p, &state->ip))
534 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
535 state->ip, (void *)ip_p);
539 state->prev_regs = NULL;
540 state->signal = false;
543 case UNWIND_HINT_TYPE_REGS:
544 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
545 orc_warn_current("can't access registers at %pB\n",
550 state->regs = (struct pt_regs *)sp;
551 state->prev_regs = NULL;
552 state->full_regs = true;
553 state->signal = true;
556 case UNWIND_HINT_TYPE_REGS_PARTIAL:
557 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
558 orc_warn_current("can't access iret registers at %pB\n",
563 if (state->full_regs)
564 state->prev_regs = state->regs;
565 state->regs = (void *)sp - IRET_FRAME_OFFSET;
566 state->full_regs = false;
567 state->signal = true;
571 orc_warn("unknown .orc_unwind entry type %d at %pB\n",
572 orc->type, (void *)orig_ip);
577 switch (orc->bp_reg) {
578 case ORC_REG_UNDEFINED:
579 if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
583 case ORC_REG_PREV_SP:
584 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
589 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
594 orc_warn("unknown BP base reg %d for ip %pB\n",
595 orc->bp_reg, (void *)orig_ip);
599 /* Prevent a recursive loop due to bad ORC data: */
600 if (state->stack_info.type == prev_type &&
601 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
602 state->sp <= prev_sp) {
603 orc_warn_current("stack going in the wrong direction? at %pB\n",
616 state->stack_info.type = STACK_TYPE_UNKNOWN;
619 EXPORT_SYMBOL_GPL(unwind_next_frame);
621 void __unwind_start(struct unwind_state *state, struct task_struct *task,
622 struct pt_regs *regs, unsigned long *first_frame)
624 memset(state, 0, sizeof(*state));
631 * Refuse to unwind the stack of a task while it's executing on another
632 * CPU. This check is racy, but that's ok: the unwinder has other
633 * checks to prevent it from going off the rails.
635 if (task_on_another_cpu(task))
642 state->ip = regs->ip;
643 state->sp = regs->sp;
644 state->bp = regs->bp;
646 state->full_regs = true;
647 state->signal = true;
649 } else if (task == current) {
650 asm volatile("lea (%%rip), %0\n\t"
653 : "=r" (state->ip), "=r" (state->sp),
657 struct inactive_task_frame *frame = (void *)task->thread.sp;
659 state->sp = task->thread.sp + sizeof(*frame);
660 state->bp = READ_ONCE_NOCHECK(frame->bp);
661 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
662 state->signal = (void *)state->ip == ret_from_fork;
665 if (get_stack_info((unsigned long *)state->sp, state->task,
666 &state->stack_info, &state->stack_mask)) {
668 * We weren't on a valid stack. It's possible that
669 * we overflowed a valid stack into a guard page.
670 * See if the next page up is valid so that we can
671 * generate some kind of backtrace if this happens.
673 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
675 if (get_stack_info(next_page, state->task, &state->stack_info,
681 * The caller can provide the address of the first frame directly
682 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
683 * to start unwinding at. Skip ahead until we reach it.
686 /* When starting from regs, skip the regs frame: */
688 unwind_next_frame(state);
692 /* Otherwise, skip ahead to the user-specified starting frame: */
693 while (!unwind_done(state) &&
694 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
695 state->sp < (unsigned long)first_frame))
696 unwind_next_frame(state);
703 state->stack_info.type = STACK_TYPE_UNKNOWN;
705 EXPORT_SYMBOL_GPL(__unwind_start);