1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/traps.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/kallsyms.h>
13 #include <linux/kprobes.h>
14 #include <linux/spinlock.h>
15 #include <linux/uaccess.h>
16 #include <linux/hardirq.h>
17 #include <linux/kdebug.h>
18 #include <linux/module.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/efi.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
30 #include <linux/ubsan.h>
31 #include <linux/cfi.h>
33 #include <asm/atomic.h>
35 #include <asm/cpufeature.h>
36 #include <asm/daifflags.h>
37 #include <asm/debug-monitors.h>
40 #include <asm/exception.h>
41 #include <asm/extable.h>
43 #include <asm/kprobes.h>
44 #include <asm/patching.h>
45 #include <asm/traps.h>
47 #include <asm/stack_pointer.h>
48 #include <asm/stacktrace.h>
49 #include <asm/system_misc.h>
50 #include <asm/sysreg.h>
52 static bool __kprobes __check_eq(unsigned long pstate)
54 return (pstate & PSR_Z_BIT) != 0;
57 static bool __kprobes __check_ne(unsigned long pstate)
59 return (pstate & PSR_Z_BIT) == 0;
62 static bool __kprobes __check_cs(unsigned long pstate)
64 return (pstate & PSR_C_BIT) != 0;
67 static bool __kprobes __check_cc(unsigned long pstate)
69 return (pstate & PSR_C_BIT) == 0;
72 static bool __kprobes __check_mi(unsigned long pstate)
74 return (pstate & PSR_N_BIT) != 0;
77 static bool __kprobes __check_pl(unsigned long pstate)
79 return (pstate & PSR_N_BIT) == 0;
82 static bool __kprobes __check_vs(unsigned long pstate)
84 return (pstate & PSR_V_BIT) != 0;
87 static bool __kprobes __check_vc(unsigned long pstate)
89 return (pstate & PSR_V_BIT) == 0;
92 static bool __kprobes __check_hi(unsigned long pstate)
94 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
95 return (pstate & PSR_C_BIT) != 0;
98 static bool __kprobes __check_ls(unsigned long pstate)
100 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
101 return (pstate & PSR_C_BIT) == 0;
104 static bool __kprobes __check_ge(unsigned long pstate)
106 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
107 return (pstate & PSR_N_BIT) == 0;
110 static bool __kprobes __check_lt(unsigned long pstate)
112 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
113 return (pstate & PSR_N_BIT) != 0;
116 static bool __kprobes __check_gt(unsigned long pstate)
118 /*PSR_N_BIT ^= PSR_V_BIT */
119 unsigned long temp = pstate ^ (pstate << 3);
121 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
122 return (temp & PSR_N_BIT) == 0;
125 static bool __kprobes __check_le(unsigned long pstate)
127 /*PSR_N_BIT ^= PSR_V_BIT */
128 unsigned long temp = pstate ^ (pstate << 3);
130 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
131 return (temp & PSR_N_BIT) != 0;
134 static bool __kprobes __check_al(unsigned long pstate)
140 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
141 * it behaves identically to 0b1110 ("al").
143 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
144 __check_eq, __check_ne, __check_cs, __check_cc,
145 __check_mi, __check_pl, __check_vs, __check_vc,
146 __check_hi, __check_ls, __check_ge, __check_lt,
147 __check_gt, __check_le, __check_al, __check_al
150 int show_unhandled_signals = 0;
152 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
154 unsigned long addr = instruction_pointer(regs);
155 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
161 for (i = -4; i < 1; i++) {
162 unsigned int val, bad;
164 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
167 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
169 p += sprintf(p, i == 0 ? "(????????) " : "???????? ");
172 printk("%sCode: %s\n", lvl, str);
175 #ifdef CONFIG_PREEMPT
176 #define S_PREEMPT " PREEMPT"
177 #elif defined(CONFIG_PREEMPT_RT)
178 #define S_PREEMPT " PREEMPT_RT"
185 static int __die(const char *str, long err, struct pt_regs *regs)
187 static int die_counter;
190 pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
191 str, err, ++die_counter);
193 /* trap and error numbers are mostly meaningless on ARM */
194 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
195 if (ret == NOTIFY_STOP)
201 dump_kernel_instr(KERN_EMERG, regs);
206 static DEFINE_RAW_SPINLOCK(die_lock);
209 * This function is protected against re-entrancy.
211 void die(const char *str, struct pt_regs *regs, long err)
216 raw_spin_lock_irqsave(&die_lock, flags);
222 ret = __die(str, err, regs);
224 if (regs && kexec_should_crash(current))
228 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
232 panic("%s: Fatal exception in interrupt", str);
234 panic("%s: Fatal exception", str);
236 raw_spin_unlock_irqrestore(&die_lock, flags);
238 if (ret != NOTIFY_STOP)
239 make_task_dead(SIGSEGV);
242 static void arm64_show_signal(int signo, const char *str)
244 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
245 DEFAULT_RATELIMIT_BURST);
246 struct task_struct *tsk = current;
247 unsigned long esr = tsk->thread.fault_code;
248 struct pt_regs *regs = task_pt_regs(tsk);
250 /* Leave if the signal won't be shown */
251 if (!show_unhandled_signals ||
252 !unhandled_signal(tsk, signo) ||
256 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
258 pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr);
261 print_vma_addr(KERN_CONT " in ", regs->pc);
266 void arm64_force_sig_fault(int signo, int code, unsigned long far,
269 arm64_show_signal(signo, str);
270 if (signo == SIGKILL)
273 force_sig_fault(signo, code, (void __user *)far);
276 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
279 arm64_show_signal(SIGBUS, str);
280 force_sig_mceerr(code, (void __user *)far, lsb);
283 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
286 arm64_show_signal(SIGTRAP, str);
287 force_sig_ptrace_errno_trap(errno, (void __user *)far);
290 void arm64_notify_die(const char *str, struct pt_regs *regs,
291 int signo, int sicode, unsigned long far,
294 if (user_mode(regs)) {
295 WARN_ON(regs != current_pt_regs());
296 current->thread.fault_address = 0;
297 current->thread.fault_code = err;
299 arm64_force_sig_fault(signo, sicode, far, str);
306 #define PSTATE_IT_1_0_SHIFT 25
307 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
308 #define PSTATE_IT_7_2_SHIFT 10
309 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
311 static u32 compat_get_it_state(struct pt_regs *regs)
313 u32 it, pstate = regs->pstate;
315 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
316 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
321 static void compat_set_it_state(struct pt_regs *regs, u32 it)
325 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
326 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
328 regs->pstate &= ~PSR_AA32_IT_MASK;
329 regs->pstate |= pstate_it;
332 static void advance_itstate(struct pt_regs *regs)
337 if (!(regs->pstate & PSR_AA32_T_BIT) ||
338 !(regs->pstate & PSR_AA32_IT_MASK))
341 it = compat_get_it_state(regs);
344 * If this is the last instruction of the block, wipe the IT
345 * state. Otherwise advance it.
350 it = (it & 0xe0) | ((it << 1) & 0x1f);
352 compat_set_it_state(regs, it);
355 static void advance_itstate(struct pt_regs *regs)
360 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
365 * If we were single stepping, we want to get the step exception after
366 * we return from the trap.
369 user_fastforward_single_step(current);
371 if (compat_user_mode(regs))
372 advance_itstate(regs);
374 regs->pstate &= ~PSR_BTYPE_MASK;
377 static int user_insn_read(struct pt_regs *regs, u32 *insnp)
380 unsigned long pc = instruction_pointer(regs);
382 if (compat_thumb_mode(regs)) {
383 /* 16-bit Thumb instruction */
385 if (get_user(instr_le, (__le16 __user *)pc))
387 instr = le16_to_cpu(instr_le);
388 if (aarch32_insn_is_wide(instr)) {
391 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
393 instr2 = le16_to_cpu(instr_le);
394 instr = (instr << 16) | instr2;
397 /* 32-bit ARM instruction */
399 if (get_user(instr_le, (__le32 __user *)pc))
401 instr = le32_to_cpu(instr_le);
408 void force_signal_inject(int signal, int code, unsigned long address, unsigned long err)
411 struct pt_regs *regs = current_pt_regs();
413 if (WARN_ON(!user_mode(regs)))
418 desc = "undefined instruction";
421 desc = "illegal memory access";
424 desc = "unknown or unrecoverable error";
428 /* Force signals we don't understand to SIGKILL */
429 if (WARN_ON(signal != SIGKILL &&
430 siginfo_layout(signal, code) != SIL_FAULT)) {
434 arm64_notify_die(desc, regs, signal, code, address, err);
438 * Set up process info to signal segmentation fault - called on access error.
440 void arm64_notify_segfault(unsigned long addr)
444 mmap_read_lock(current->mm);
445 if (find_vma(current->mm, untagged_addr(addr)) == NULL)
449 mmap_read_unlock(current->mm);
451 force_signal_inject(SIGSEGV, code, addr, 0);
454 void do_el0_undef(struct pt_regs *regs, unsigned long esr)
458 /* check for AArch32 breakpoint instructions */
459 if (!aarch32_break_handler(regs))
462 if (user_insn_read(regs, &insn))
465 if (try_emulate_mrs(regs, insn))
468 if (try_emulate_armv8_deprecated(regs, insn))
472 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
475 void do_el1_undef(struct pt_regs *regs, unsigned long esr)
479 if (aarch64_insn_read((void *)regs->pc, &insn))
482 if (try_emulate_el1_ssbs(regs, insn))
486 die("Oops - Undefined instruction", regs, esr);
489 void do_el0_bti(struct pt_regs *regs)
491 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
494 void do_el1_bti(struct pt_regs *regs, unsigned long esr)
496 if (efi_runtime_fixup_exception(regs, "BTI violation")) {
497 regs->pstate &= ~PSR_BTYPE_MASK;
500 die("Oops - BTI", regs, esr);
503 void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
505 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
508 void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
511 * Unexpected FPAC exception in the kernel: kill the task before it
512 * does any more harm.
514 die("Oops - FPAC", regs, esr);
517 #define __user_cache_maint(insn, address, res) \
518 if (address >= TASK_SIZE_MAX) { \
521 uaccess_ttbr0_enable(); \
523 "1: " insn ", %1\n" \
526 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
529 uaccess_ttbr0_disable(); \
532 static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs)
534 unsigned long tagged_address, address;
535 int rt = ESR_ELx_SYS64_ISS_RT(esr);
536 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
539 tagged_address = pt_regs_read_reg(regs, rt);
540 address = untagged_addr(tagged_address);
543 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
544 __user_cache_maint("dc civac", address, ret);
546 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
547 __user_cache_maint("dc civac", address, ret);
549 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
550 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
552 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
553 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
555 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
556 __user_cache_maint("dc civac", address, ret);
558 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
559 __user_cache_maint("ic ivau", address, ret);
562 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
567 arm64_notify_segfault(tagged_address);
569 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
572 static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
574 int rt = ESR_ELx_SYS64_ISS_RT(esr);
575 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
577 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
578 /* Hide DIC so that we can trap the unnecessary maintenance...*/
579 val &= ~BIT(CTR_EL0_DIC_SHIFT);
581 /* ... and fake IminLine to reduce the number of traps. */
582 val &= ~CTR_EL0_IminLine_MASK;
583 val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK;
586 pt_regs_write_reg(regs, rt, val);
588 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
591 static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
593 int rt = ESR_ELx_SYS64_ISS_RT(esr);
595 pt_regs_write_reg(regs, rt, arch_timer_read_counter());
596 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
599 static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
601 int rt = ESR_ELx_SYS64_ISS_RT(esr);
603 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
604 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
607 static void mrs_handler(unsigned long esr, struct pt_regs *regs)
611 rt = ESR_ELx_SYS64_ISS_RT(esr);
612 sysreg = esr_sys64_to_sysreg(esr);
614 if (do_emulate_mrs(regs, sysreg, rt) != 0)
615 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
618 static void wfi_handler(unsigned long esr, struct pt_regs *regs)
620 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
624 unsigned long esr_mask;
625 unsigned long esr_val;
626 void (*handler)(unsigned long esr, struct pt_regs *regs);
629 static const struct sys64_hook sys64_hooks[] = {
631 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
632 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
633 .handler = user_cache_maint_handler,
636 /* Trap read access to CTR_EL0 */
637 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
638 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
639 .handler = ctr_read_handler,
642 /* Trap read access to CNTVCT_EL0 */
643 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
644 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
645 .handler = cntvct_read_handler,
648 /* Trap read access to CNTVCTSS_EL0 */
649 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
650 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS,
651 .handler = cntvct_read_handler,
654 /* Trap read access to CNTFRQ_EL0 */
655 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
656 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
657 .handler = cntfrq_read_handler,
660 /* Trap read access to CPUID registers */
661 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
662 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
663 .handler = mrs_handler,
666 /* Trap WFI instructions executed in userspace */
667 .esr_mask = ESR_ELx_WFx_MASK,
668 .esr_val = ESR_ELx_WFx_WFI_VAL,
669 .handler = wfi_handler,
675 static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs)
679 /* Only a T32 instruction can trap without CV being set */
680 if (!(esr & ESR_ELx_CV)) {
683 it = compat_get_it_state(regs);
689 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
692 return aarch32_opcode_cond_checks[cond](regs->pstate);
695 static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
697 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
699 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
700 arm64_skip_faulting_instruction(regs, 4);
703 static const struct sys64_hook cp15_32_hooks[] = {
705 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
706 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
707 .handler = compat_cntfrq_read_handler,
712 static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
714 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
715 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
716 u64 val = arch_timer_read_counter();
718 pt_regs_write_reg(regs, rt, lower_32_bits(val));
719 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
720 arm64_skip_faulting_instruction(regs, 4);
723 static const struct sys64_hook cp15_64_hooks[] = {
725 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
726 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
727 .handler = compat_cntvct_read_handler,
730 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
731 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS,
732 .handler = compat_cntvct_read_handler,
737 void do_el0_cp15(unsigned long esr, struct pt_regs *regs)
739 const struct sys64_hook *hook, *hook_base;
741 if (!cp15_cond_valid(esr, regs)) {
743 * There is no T16 variant of a CP access, so we
744 * always advance PC by 4 bytes.
746 arm64_skip_faulting_instruction(regs, 4);
750 switch (ESR_ELx_EC(esr)) {
751 case ESR_ELx_EC_CP15_32:
752 hook_base = cp15_32_hooks;
754 case ESR_ELx_EC_CP15_64:
755 hook_base = cp15_64_hooks;
758 do_el0_undef(regs, esr);
762 for (hook = hook_base; hook->handler; hook++)
763 if ((hook->esr_mask & esr) == hook->esr_val) {
764 hook->handler(esr, regs);
769 * New cp15 instructions may previously have been undefined at
770 * EL0. Fall back to our usual undefined instruction handler
771 * so that we handle these consistently.
773 do_el0_undef(regs, esr);
777 void do_el0_sys(unsigned long esr, struct pt_regs *regs)
779 const struct sys64_hook *hook;
781 for (hook = sys64_hooks; hook->handler; hook++)
782 if ((hook->esr_mask & esr) == hook->esr_val) {
783 hook->handler(esr, regs);
788 * New SYS instructions may previously have been undefined at EL0. Fall
789 * back to our usual undefined instruction handler so that we handle
790 * these consistently.
792 do_el0_undef(regs, esr);
795 static const char *esr_class_str[] = {
796 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
797 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
798 [ESR_ELx_EC_WFx] = "WFI/WFE",
799 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
800 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
801 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
802 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
803 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
804 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
805 [ESR_ELx_EC_PAC] = "PAC",
806 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
807 [ESR_ELx_EC_BTI] = "BTI",
808 [ESR_ELx_EC_ILL] = "PSTATE.IL",
809 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
810 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
811 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
812 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
813 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
814 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
815 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
816 [ESR_ELx_EC_SVE] = "SVE",
817 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
818 [ESR_ELx_EC_FPAC] = "FPAC",
819 [ESR_ELx_EC_SME] = "SME",
820 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
821 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
822 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
823 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
824 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
825 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
826 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
827 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
828 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
829 [ESR_ELx_EC_SERROR] = "SError",
830 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
831 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
832 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
833 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
834 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
835 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
836 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
837 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
838 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
841 const char *esr_get_class_string(unsigned long esr)
843 return esr_class_str[ESR_ELx_EC(esr)];
847 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
848 * exceptions taken from EL0.
850 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
852 unsigned long pc = instruction_pointer(regs);
854 current->thread.fault_address = 0;
855 current->thread.fault_code = esr;
857 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
858 "Bad EL0 synchronous exception");
861 #ifdef CONFIG_VMAP_STACK
863 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
866 void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
868 unsigned long tsk_stk = (unsigned long)current->stack;
869 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
870 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
873 pr_emerg("Insufficient stack space to handle exception!");
875 pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr));
876 pr_emerg("FAR: 0x%016lx\n", far);
878 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
879 tsk_stk, tsk_stk + THREAD_SIZE);
880 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
881 irq_stk, irq_stk + IRQ_STACK_SIZE);
882 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
883 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
888 * We use nmi_panic to limit the potential for recusive overflows, and
889 * to get a better stack trace.
891 nmi_panic(NULL, "kernel stack overflow");
896 void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
900 pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
901 smp_processor_id(), esr, esr_get_class_string(esr));
905 nmi_panic(regs, "Asynchronous SError Interrupt");
911 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
913 unsigned long aet = arm64_ras_serror_get_severity(esr);
916 case ESR_ELx_AET_CE: /* corrected error */
917 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
919 * The CPU can make progress. We may take UEO again as
920 * a more severe error.
924 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
925 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
927 * The CPU can't make progress. The exception may have
930 * Neoverse-N1 #1349291 means a non-KVM SError reported as
931 * Unrecoverable should be treated as Uncontainable. We
932 * call arm64_serror_panic() in both cases.
936 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
938 /* Error has been silently propagated */
939 arm64_serror_panic(regs, esr);
943 void do_serror(struct pt_regs *regs, unsigned long esr)
945 /* non-RAS errors are not containable */
946 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
947 arm64_serror_panic(regs, esr);
950 /* GENERIC_BUG traps */
952 int is_valid_bugaddr(unsigned long addr)
955 * bug_handler() only called for BRK #BUG_BRK_IMM.
956 * So the answer is trivial -- any spurious instances with no
957 * bug table entry will be rejected by report_bug() and passed
958 * back to the debug-monitors code and handled as a fatal
959 * unexpected debug exception.
964 static int bug_handler(struct pt_regs *regs, unsigned long esr)
966 switch (report_bug(regs->pc, regs)) {
967 case BUG_TRAP_TYPE_BUG:
968 die("Oops - BUG", regs, esr);
971 case BUG_TRAP_TYPE_WARN:
975 /* unknown/unrecognised bug trap type */
976 return DBG_HOOK_ERROR;
979 /* If thread survives, skip over the BUG instruction and continue: */
980 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
981 return DBG_HOOK_HANDLED;
984 static struct break_hook bug_break_hook = {
989 #ifdef CONFIG_CFI_CLANG
990 static int cfi_handler(struct pt_regs *regs, unsigned long esr)
992 unsigned long target;
995 target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr));
996 type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr));
998 switch (report_cfi_failure(regs, regs->pc, &target, type)) {
999 case BUG_TRAP_TYPE_BUG:
1000 die("Oops - CFI", regs, esr);
1003 case BUG_TRAP_TYPE_WARN:
1007 return DBG_HOOK_ERROR;
1010 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1011 return DBG_HOOK_HANDLED;
1014 static struct break_hook cfi_break_hook = {
1016 .imm = CFI_BRK_IMM_BASE,
1017 .mask = CFI_BRK_IMM_MASK,
1019 #endif /* CONFIG_CFI_CLANG */
1021 static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
1023 pr_err("%s generated an invalid instruction at %pS!\n",
1024 "Kernel text patching",
1025 (void *)instruction_pointer(regs));
1027 /* We cannot handle this */
1028 return DBG_HOOK_ERROR;
1031 static struct break_hook fault_break_hook = {
1032 .fn = reserved_fault_handler,
1033 .imm = FAULT_BRK_IMM,
1036 #ifdef CONFIG_KASAN_SW_TAGS
1038 #define KASAN_ESR_RECOVER 0x20
1039 #define KASAN_ESR_WRITE 0x10
1040 #define KASAN_ESR_SIZE_MASK 0x0f
1041 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
1043 static int kasan_handler(struct pt_regs *regs, unsigned long esr)
1045 bool recover = esr & KASAN_ESR_RECOVER;
1046 bool write = esr & KASAN_ESR_WRITE;
1047 size_t size = KASAN_ESR_SIZE(esr);
1048 u64 addr = regs->regs[0];
1051 kasan_report(addr, size, write, pc);
1054 * The instrumentation allows to control whether we can proceed after
1055 * a crash was detected. This is done by passing the -recover flag to
1056 * the compiler. Disabling recovery allows to generate more compact
1059 * Unfortunately disabling recovery doesn't work for the kernel right
1060 * now. KASAN reporting is disabled in some contexts (for example when
1061 * the allocator accesses slab object metadata; this is controlled by
1062 * current->kasan_depth). All these accesses are detected by the tool,
1063 * even though the reports for them are not printed.
1065 * This is something that might be fixed at some point in the future.
1068 die("Oops - KASAN", regs, esr);
1070 /* If thread survives, skip over the brk instruction and continue: */
1071 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1072 return DBG_HOOK_HANDLED;
1075 static struct break_hook kasan_break_hook = {
1076 .fn = kasan_handler,
1077 .imm = KASAN_BRK_IMM,
1078 .mask = KASAN_BRK_MASK,
1082 #ifdef CONFIG_UBSAN_TRAP
1083 static int ubsan_handler(struct pt_regs *regs, unsigned long esr)
1085 die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr);
1086 return DBG_HOOK_HANDLED;
1089 static struct break_hook ubsan_break_hook = {
1090 .fn = ubsan_handler,
1091 .imm = UBSAN_BRK_IMM,
1092 .mask = UBSAN_BRK_MASK,
1096 #define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
1099 * Initial handler for AArch64 BRK exceptions
1100 * This handler only used until debug_traps_init().
1102 int __init early_brk64(unsigned long addr, unsigned long esr,
1103 struct pt_regs *regs)
1105 #ifdef CONFIG_CFI_CLANG
1106 if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
1107 return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
1109 #ifdef CONFIG_KASAN_SW_TAGS
1110 if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1111 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1113 #ifdef CONFIG_UBSAN_TRAP
1114 if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
1115 return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
1117 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1120 void __init trap_init(void)
1122 register_kernel_break_hook(&bug_break_hook);
1123 #ifdef CONFIG_CFI_CLANG
1124 register_kernel_break_hook(&cfi_break_hook);
1126 register_kernel_break_hook(&fault_break_hook);
1127 #ifdef CONFIG_KASAN_SW_TAGS
1128 register_kernel_break_hook(&kasan_break_hook);
1130 #ifdef CONFIG_UBSAN_TRAP
1131 register_kernel_break_hook(&ubsan_break_hook);