1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/uprobes.h>
17 #include <asm/uprobes.h>
19 #include <linux/module.h>
20 #include <linux/irq.h>
21 #include <linux/kexec.h>
22 #include <linux/entry-common.h>
24 #include <asm/asm-prototypes.h>
28 #include <asm/processor.h>
29 #include <asm/ptrace.h>
30 #include <asm/syscall.h>
31 #include <asm/thread_info.h>
32 #include <asm/vector.h>
33 #include <asm/irq_stack.h>
35 int show_unhandled_signals = 1;
37 static DEFINE_SPINLOCK(die_lock);
39 static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
41 char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
42 const u16 *insns = (u16 *)instruction_pointer(regs);
47 for (i = -10; i < 2; i++) {
48 bad = get_kernel_nofault(val, &insns[i]);
50 p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
52 printk("%sCode: Unable to access instruction at 0x%px.\n",
57 printk("%sCode: %s\n", loglvl, str);
60 void die(struct pt_regs *regs, const char *str)
62 static int die_counter;
69 spin_lock_irqsave(&die_lock, flags);
73 pr_emerg("%s [#%d]\n", str, ++die_counter);
77 dump_kernel_instr(KERN_EMERG, regs);
80 cause = regs ? regs->cause : -1;
81 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
83 if (kexec_should_crash(current))
87 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
88 spin_unlock_irqrestore(&die_lock, flags);
92 panic("Fatal exception in interrupt");
94 panic("Fatal exception");
95 if (ret != NOTIFY_STOP)
96 make_task_dead(SIGSEGV);
99 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
101 struct task_struct *tsk = current;
103 if (show_unhandled_signals && unhandled_signal(tsk, signo)
104 && printk_ratelimit()) {
105 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
106 tsk->comm, task_pid_nr(tsk), signo, code, addr);
107 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
112 force_sig_fault(signo, code, (void __user *)addr);
115 static void do_trap_error(struct pt_regs *regs, int signo, int code,
116 unsigned long addr, const char *str)
118 current->thread.bad_cause = regs->cause;
120 if (user_mode(regs)) {
121 do_trap(regs, signo, code, addr);
123 if (!fixup_exception(regs))
128 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
129 #define __trap_section __noinstr_section(".xip.traps")
131 #define __trap_section noinstr
133 #define DO_ERROR_INFO(name, signo, code, str) \
134 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
136 if (user_mode(regs)) { \
137 irqentry_enter_from_user_mode(regs); \
138 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
139 irqentry_exit_to_user_mode(regs); \
141 irqentry_state_t state = irqentry_nmi_enter(regs); \
142 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
143 irqentry_nmi_exit(regs, state); \
147 DO_ERROR_INFO(do_trap_unknown,
148 SIGILL, ILL_ILLTRP, "unknown exception");
149 DO_ERROR_INFO(do_trap_insn_misaligned,
150 SIGBUS, BUS_ADRALN, "instruction address misaligned");
151 DO_ERROR_INFO(do_trap_insn_fault,
152 SIGSEGV, SEGV_ACCERR, "instruction access fault");
154 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
158 if (user_mode(regs)) {
159 irqentry_enter_from_user_mode(regs);
163 handled = riscv_v_first_use_handler(regs);
168 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
169 "Oops - illegal instruction");
171 irqentry_exit_to_user_mode(regs);
173 irqentry_state_t state = irqentry_nmi_enter(regs);
175 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
176 "Oops - illegal instruction");
178 irqentry_nmi_exit(regs, state);
182 DO_ERROR_INFO(do_trap_load_fault,
183 SIGSEGV, SEGV_ACCERR, "load access fault");
184 #ifndef CONFIG_RISCV_M_MODE
185 DO_ERROR_INFO(do_trap_load_misaligned,
186 SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
187 DO_ERROR_INFO(do_trap_store_misaligned,
188 SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
190 int handle_misaligned_load(struct pt_regs *regs);
191 int handle_misaligned_store(struct pt_regs *regs);
193 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
195 if (user_mode(regs)) {
196 irqentry_enter_from_user_mode(regs);
198 if (handle_misaligned_load(regs))
199 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
200 "Oops - load address misaligned");
202 irqentry_exit_to_user_mode(regs);
204 irqentry_state_t state = irqentry_nmi_enter(regs);
206 if (handle_misaligned_load(regs))
207 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
208 "Oops - load address misaligned");
210 irqentry_nmi_exit(regs, state);
214 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
216 if (user_mode(regs)) {
217 irqentry_enter_from_user_mode(regs);
219 if (handle_misaligned_store(regs))
220 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
221 "Oops - store (or AMO) address misaligned");
223 irqentry_exit_to_user_mode(regs);
225 irqentry_state_t state = irqentry_nmi_enter(regs);
227 if (handle_misaligned_store(regs))
228 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
229 "Oops - store (or AMO) address misaligned");
231 irqentry_nmi_exit(regs, state);
235 DO_ERROR_INFO(do_trap_store_fault,
236 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
237 DO_ERROR_INFO(do_trap_ecall_s,
238 SIGILL, ILL_ILLTRP, "environment call from S-mode");
239 DO_ERROR_INFO(do_trap_ecall_m,
240 SIGILL, ILL_ILLTRP, "environment call from M-mode");
242 static inline unsigned long get_break_insn_length(unsigned long pc)
246 if (get_kernel_nofault(insn, (bug_insn_t *)pc))
249 return GET_INSN_LENGTH(insn);
252 static bool probe_single_step_handler(struct pt_regs *regs)
254 bool user = user_mode(regs);
256 return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
259 static bool probe_breakpoint_handler(struct pt_regs *regs)
261 bool user = user_mode(regs);
263 return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
266 void handle_break(struct pt_regs *regs)
268 if (probe_single_step_handler(regs))
271 if (probe_breakpoint_handler(regs))
274 current->thread.bad_cause = regs->cause;
277 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
279 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
283 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
284 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
285 regs->epc += get_break_insn_length(regs->epc);
287 die(regs, "Kernel BUG");
290 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
292 if (user_mode(regs)) {
293 irqentry_enter_from_user_mode(regs);
297 irqentry_exit_to_user_mode(regs);
299 irqentry_state_t state = irqentry_nmi_enter(regs);
303 irqentry_nmi_exit(regs, state);
307 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
309 if (user_mode(regs)) {
310 long syscall = regs->a7;
313 regs->orig_a0 = regs->a0;
315 riscv_v_vstate_discard(regs);
317 syscall = syscall_enter_from_user_mode(regs, syscall);
319 if (syscall >= 0 && syscall < NR_syscalls)
320 syscall_handler(regs, syscall);
321 else if (syscall != -1)
324 syscall_exit_to_user_mode(regs);
326 irqentry_state_t state = irqentry_nmi_enter(regs);
328 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
329 "Oops - environment call from U-mode");
331 irqentry_nmi_exit(regs, state);
337 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
339 irqentry_state_t state = irqentry_enter(regs);
341 handle_page_fault(regs);
345 irqentry_exit(regs, state);
349 static void noinstr handle_riscv_irq(struct pt_regs *regs)
351 struct pt_regs *old_regs;
354 old_regs = set_irq_regs(regs);
355 handle_arch_irq(regs);
356 set_irq_regs(old_regs);
360 asmlinkage void noinstr do_irq(struct pt_regs *regs)
362 irqentry_state_t state = irqentry_enter(regs);
363 #ifdef CONFIG_IRQ_STACKS
364 if (on_thread_stack()) {
365 ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
366 + IRQ_STACK_SIZE/sizeof(ulong);
368 "addi sp, sp, -"RISCV_SZPTR "\n"
370 "addi sp, sp, -"RISCV_SZPTR "\n"
372 "addi s0, sp, 2*"RISCV_SZPTR "\n"
374 "move a0, %[regs] \n"
375 "call handle_riscv_irq \n"
376 "addi sp, s0, -2*"RISCV_SZPTR"\n"
378 "addi sp, sp, "RISCV_SZPTR "\n"
380 "addi sp, sp, "RISCV_SZPTR "\n"
382 : [sp] "r" (sp), [regs] "r" (regs)
383 : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
384 "t0", "t1", "t2", "t3", "t4", "t5", "t6",
385 #ifndef CONFIG_FRAME_POINTER
391 handle_riscv_irq(regs);
393 irqentry_exit(regs, state);
396 #ifdef CONFIG_GENERIC_BUG
397 int is_valid_bugaddr(unsigned long pc)
401 if (pc < VMALLOC_START)
403 if (get_kernel_nofault(insn, (bug_insn_t *)pc))
405 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
406 return (insn == __BUG_INSN_32);
408 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
410 #endif /* CONFIG_GENERIC_BUG */
412 #ifdef CONFIG_VMAP_STACK
414 * Extra stack space that allows us to provide panic messages when the kernel
415 * has overflowed its stack.
417 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
418 overflow_stack)__aligned(16);
420 * A temporary stack for use by handle_kernel_stack_overflow. This is used so
421 * we can call into C code to get the per-hart overflow stack. Usage of this
422 * stack must be protected by spin_shadow_stack.
424 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
427 * A pseudo spinlock to protect the shadow stack from being used by multiple
428 * harts concurrently. This isn't a real spinlock because the lock side must
429 * be taken without a valid stack and only a single register, it's only taken
430 * while in the process of panicing anyway so the performance and error
431 * checking a proper spinlock gives us doesn't matter.
433 unsigned long spin_shadow_stack;
435 asmlinkage unsigned long get_overflow_stack(void)
437 return (unsigned long)this_cpu_ptr(overflow_stack) +
441 asmlinkage void handle_bad_stack(struct pt_regs *regs)
443 unsigned long tsk_stk = (unsigned long)current->stack;
444 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
447 * We're done with the shadow stack by this point, as we're on the
448 * overflow stack. Tell any other concurrent overflowing harts that
449 * they can proceed with panicing by releasing the pseudo-spinlock.
451 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
453 smp_store_release(&spin_shadow_stack, 0);
457 pr_emerg("Insufficient stack space to handle exception!\n");
458 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
459 tsk_stk, tsk_stk + THREAD_SIZE);
460 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
461 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
464 panic("Kernel stack overflow");
467 wait_for_interrupt();