For various reasons we'd like to convert the bulk of arm64's exception
triage logic to C. As a step towards that, this patch converts the EL1
and EL0 SError triage logic to C.
Separate C functions are added for the native and compat cases so that
in subsequent patches we can handle native/compat differences in C.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
}
asmlinkage void el1_sync_handler(struct pt_regs *regs);
+asmlinkage void el1_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_handler(struct pt_regs *regs);
+asmlinkage void el0_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+asmlinkage void el0_error_compat_handler(struct pt_regs *regs);
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
+void do_serror(struct pt_regs *regs, unsigned int esr);
#endif /* __ASM_EXCEPTION_H */
}
}
+asmlinkage void noinstr el1_error_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+}
+
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
}
}
+static void __el0_error_handler_common(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+ local_daif_restore(DAIF_PROCCTX);
+}
+
+asmlinkage void noinstr el0_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
el0_inv(regs, esr);
}
}
+
+asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
#endif /* CONFIG_COMPAT */
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
- b el0_error_naked
+ mov x0, sp
+ bl el0_error_compat_handler
+ b ret_to_user
SYM_CODE_END(el0_error_compat)
#endif
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
- mrs x1, esr_el1
- enable_dbg
mov x0, sp
- bl do_serror
+ bl el1_error_handler
kernel_exit 1
SYM_CODE_END(el1_error)
SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
-el0_error_naked:
- mrs x25, esr_el1
- user_exit_irqoff
- enable_dbg
mov x0, sp
- mov x1, x25
- bl do_serror
- enable_daif
+ bl el0_error_handler
b ret_to_user
SYM_CODE_END(el0_error)
}
}
-asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned int esr)
{
- arm64_enter_nmi(regs);
-
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
-
- arm64_exit_nmi(regs);
}
/* GENERIC_BUG traps */