1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/thread_info.h>
17 #include <asm/cpufeature.h>
18 #include <asm/daifflags.h>
20 #include <asm/exception.h>
21 #include <asm/irq_regs.h>
22 #include <asm/kprobes.h>
24 #include <asm/processor.h>
26 #include <asm/stacktrace.h>
27 #include <asm/sysreg.h>
28 #include <asm/system_misc.h>
31 * Handle IRQ/context state management when entering from kernel mode.
32 * Before this function is called it is not safe to call regular kernel code,
33 * instrumentable code, or any code which may trigger an exception.
35 * This is intended to match the logic in irqentry_enter(), handling the kernel
36 * mode transitions only.
38 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
40 regs->exit_rcu = false;
42 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
43 lockdep_hardirqs_off(CALLER_ADDR0);
45 trace_hardirqs_off_finish();
47 regs->exit_rcu = true;
51 lockdep_hardirqs_off(CALLER_ADDR0);
52 rcu_irq_enter_check_tick();
53 trace_hardirqs_off_finish();
56 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
58 __enter_from_kernel_mode(regs);
59 mte_check_tfsr_entry();
60 mte_disable_tco_entry(current);
64 * Handle IRQ/context state management when exiting to kernel mode.
65 * After this function returns it is not safe to call regular kernel code,
66 * instrumentable code, or any code which may trigger an exception.
68 * This is intended to match the logic in irqentry_exit(), handling the kernel
69 * mode transitions only, and with preemption handled elsewhere.
71 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
73 lockdep_assert_irqs_disabled();
75 if (interrupts_enabled(regs)) {
77 trace_hardirqs_on_prepare();
78 lockdep_hardirqs_on_prepare();
80 lockdep_hardirqs_on(CALLER_ADDR0);
91 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
93 mte_check_tfsr_exit();
94 __exit_to_kernel_mode(regs);
98 * Handle IRQ/context state management when entering from user mode.
99 * Before this function is called it is not safe to call regular kernel code,
100 * instrumentable code, or any code which may trigger an exception.
102 static __always_inline void __enter_from_user_mode(void)
104 lockdep_hardirqs_off(CALLER_ADDR0);
105 CT_WARN_ON(ct_state() != CONTEXT_USER);
107 trace_hardirqs_off_finish();
108 mte_disable_tco_entry(current);
111 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
113 __enter_from_user_mode();
117 * Handle IRQ/context state management when exiting to user mode.
118 * After this function returns it is not safe to call regular kernel code,
119 * instrumentable code, or any code which may trigger an exception.
121 static __always_inline void __exit_to_user_mode(void)
123 trace_hardirqs_on_prepare();
124 lockdep_hardirqs_on_prepare();
126 lockdep_hardirqs_on(CALLER_ADDR0);
129 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
135 flags = read_thread_flags();
136 if (unlikely(flags & _TIF_WORK_MASK))
137 do_notify_resume(regs, flags);
140 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
142 prepare_exit_to_user_mode(regs);
143 mte_check_tfsr_exit();
144 __exit_to_user_mode();
147 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
149 exit_to_user_mode(regs);
153 * Handle IRQ/context state management when entering an NMI from user/kernel
154 * mode. Before this function is called it is not safe to call regular kernel
155 * code, instrumentable code, or any code which may trigger an exception.
157 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
159 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
162 lockdep_hardirqs_off(CALLER_ADDR0);
163 lockdep_hardirq_enter();
166 trace_hardirqs_off_finish();
171 * Handle IRQ/context state management when exiting an NMI from user/kernel
172 * mode. After this function returns it is not safe to call regular kernel
173 * code, instrumentable code, or any code which may trigger an exception.
175 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
177 bool restore = regs->lockdep_hardirqs;
181 trace_hardirqs_on_prepare();
182 lockdep_hardirqs_on_prepare();
186 lockdep_hardirq_exit();
188 lockdep_hardirqs_on(CALLER_ADDR0);
193 * Handle IRQ/context state management when entering a debug exception from
194 * kernel mode. Before this function is called it is not safe to call regular
195 * kernel code, instrumentable code, or any code which may trigger an exception.
197 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
199 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
201 lockdep_hardirqs_off(CALLER_ADDR0);
204 trace_hardirqs_off_finish();
208 * Handle IRQ/context state management when exiting a debug exception from
209 * kernel mode. After this function returns it is not safe to call regular
210 * kernel code, instrumentable code, or any code which may trigger an exception.
212 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
214 bool restore = regs->lockdep_hardirqs;
217 trace_hardirqs_on_prepare();
218 lockdep_hardirqs_on_prepare();
223 lockdep_hardirqs_on(CALLER_ADDR0);
226 #ifdef CONFIG_PREEMPT_DYNAMIC
227 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
228 #define need_irq_preemption() \
229 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
231 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
234 static void __sched arm64_preempt_schedule_irq(void)
236 if (!need_irq_preemption())
240 * Note: thread_info::preempt_count includes both thread_info::count
241 * and thread_info::need_resched, and is not equivalent to
244 if (READ_ONCE(current_thread_info()->preempt_count) != 0)
248 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
249 * priority masking is used the GIC irqchip driver will clear DAIF.IF
250 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
251 * DAIF we must have handled an NMI, so skip preemption.
253 if (system_uses_irq_prio_masking() && read_sysreg(daif))
257 * Preempting a task from an IRQ means we leave copies of PSTATE
258 * on the stack. cpufeature's enable calls may modify PSTATE, but
259 * resuming one of these preempted tasks would undo those changes.
261 * Only allow a task to be preempted once cpufeatures have been
264 if (system_capabilities_finalized())
265 preempt_schedule_irq();
268 static void do_interrupt_handler(struct pt_regs *regs,
269 void (*handler)(struct pt_regs *))
271 struct pt_regs *old_regs = set_irq_regs(regs);
273 if (on_thread_stack())
274 call_on_irq_stack(regs, handler);
278 set_irq_regs(old_regs);
281 extern void (*handle_arch_irq)(struct pt_regs *);
282 extern void (*handle_arch_fiq)(struct pt_regs *);
284 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
287 arm64_enter_nmi(regs);
291 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
292 vector, smp_processor_id(), esr,
293 esr_get_class_string(esr));
296 panic("Unhandled exception");
299 #define UNHANDLED(el, regsize, vector) \
300 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
302 const char *desc = #regsize "-bit " #el " " #vector; \
303 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
306 #ifdef CONFIG_ARM64_ERRATUM_1463225
307 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
309 static void cortex_a76_erratum_1463225_svc_handler(void)
313 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
316 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
319 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
320 reg = read_sysreg(mdscr_el1);
321 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
322 write_sysreg(val, mdscr_el1);
323 asm volatile("msr daifclr, #8");
326 /* We will have taken a single-step exception by this point */
328 write_sysreg(reg, mdscr_el1);
329 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
332 static __always_inline bool
333 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
335 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
339 * We've taken a dummy step exception from the kernel to ensure
340 * that interrupts are re-enabled on the syscall path. Return back
341 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
342 * masked so that we can safely restore the mdscr and get on with
343 * handling the syscall.
345 regs->pstate |= PSR_D_BIT;
348 #else /* CONFIG_ARM64_ERRATUM_1463225 */
349 static void cortex_a76_erratum_1463225_svc_handler(void) { }
350 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
354 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
356 UNHANDLED(el1t, 64, sync)
357 UNHANDLED(el1t, 64, irq)
358 UNHANDLED(el1t, 64, fiq)
359 UNHANDLED(el1t, 64, error)
361 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
363 unsigned long far = read_sysreg(far_el1);
365 enter_from_kernel_mode(regs);
366 local_daif_inherit(regs);
367 do_mem_abort(far, esr, regs);
369 exit_to_kernel_mode(regs);
372 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
374 unsigned long far = read_sysreg(far_el1);
376 enter_from_kernel_mode(regs);
377 local_daif_inherit(regs);
378 do_sp_pc_abort(far, esr, regs);
380 exit_to_kernel_mode(regs);
383 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
385 enter_from_kernel_mode(regs);
386 local_daif_inherit(regs);
387 do_el1_undef(regs, esr);
389 exit_to_kernel_mode(regs);
392 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
394 enter_from_kernel_mode(regs);
395 local_daif_inherit(regs);
396 do_el1_bti(regs, esr);
398 exit_to_kernel_mode(regs);
401 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
403 unsigned long far = read_sysreg(far_el1);
405 arm64_enter_el1_dbg(regs);
406 if (!cortex_a76_erratum_1463225_debug_handler(regs))
407 do_debug_exception(far, esr, regs);
408 arm64_exit_el1_dbg(regs);
411 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
413 enter_from_kernel_mode(regs);
414 local_daif_inherit(regs);
415 do_el1_fpac(regs, esr);
417 exit_to_kernel_mode(regs);
420 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
422 unsigned long esr = read_sysreg(esr_el1);
424 switch (ESR_ELx_EC(esr)) {
425 case ESR_ELx_EC_DABT_CUR:
426 case ESR_ELx_EC_IABT_CUR:
427 el1_abort(regs, esr);
430 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
431 * recursive exception when trying to push the initial pt_regs.
433 case ESR_ELx_EC_PC_ALIGN:
436 case ESR_ELx_EC_SYS64:
437 case ESR_ELx_EC_UNKNOWN:
438 el1_undef(regs, esr);
443 case ESR_ELx_EC_BREAKPT_CUR:
444 case ESR_ELx_EC_SOFTSTP_CUR:
445 case ESR_ELx_EC_WATCHPT_CUR:
446 case ESR_ELx_EC_BRK64:
449 case ESR_ELx_EC_FPAC:
453 __panic_unhandled(regs, "64-bit el1h sync", esr);
457 static __always_inline void __el1_pnmi(struct pt_regs *regs,
458 void (*handler)(struct pt_regs *))
460 arm64_enter_nmi(regs);
461 do_interrupt_handler(regs, handler);
462 arm64_exit_nmi(regs);
465 static __always_inline void __el1_irq(struct pt_regs *regs,
466 void (*handler)(struct pt_regs *))
468 enter_from_kernel_mode(regs);
471 do_interrupt_handler(regs, handler);
474 arm64_preempt_schedule_irq();
476 exit_to_kernel_mode(regs);
478 static void noinstr el1_interrupt(struct pt_regs *regs,
479 void (*handler)(struct pt_regs *))
481 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
483 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
484 __el1_pnmi(regs, handler);
486 __el1_irq(regs, handler);
489 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
491 el1_interrupt(regs, handle_arch_irq);
494 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
496 el1_interrupt(regs, handle_arch_fiq);
499 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
501 unsigned long esr = read_sysreg(esr_el1);
503 local_daif_restore(DAIF_ERRCTX);
504 arm64_enter_nmi(regs);
505 do_serror(regs, esr);
506 arm64_exit_nmi(regs);
509 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
511 unsigned long far = read_sysreg(far_el1);
513 enter_from_user_mode(regs);
514 local_daif_restore(DAIF_PROCCTX);
515 do_mem_abort(far, esr, regs);
516 exit_to_user_mode(regs);
519 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
521 unsigned long far = read_sysreg(far_el1);
524 * We've taken an instruction abort from userspace and not yet
525 * re-enabled IRQs. If the address is a kernel address, apply
526 * BP hardening prior to enabling IRQs and pre-emption.
528 if (!is_ttbr0_addr(far))
529 arm64_apply_bp_hardening();
531 enter_from_user_mode(regs);
532 local_daif_restore(DAIF_PROCCTX);
533 do_mem_abort(far, esr, regs);
534 exit_to_user_mode(regs);
537 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
539 enter_from_user_mode(regs);
540 local_daif_restore(DAIF_PROCCTX);
541 do_fpsimd_acc(esr, regs);
542 exit_to_user_mode(regs);
545 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
547 enter_from_user_mode(regs);
548 local_daif_restore(DAIF_PROCCTX);
549 do_sve_acc(esr, regs);
550 exit_to_user_mode(regs);
553 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
555 enter_from_user_mode(regs);
556 local_daif_restore(DAIF_PROCCTX);
557 do_sme_acc(esr, regs);
558 exit_to_user_mode(regs);
561 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
563 enter_from_user_mode(regs);
564 local_daif_restore(DAIF_PROCCTX);
565 do_fpsimd_exc(esr, regs);
566 exit_to_user_mode(regs);
569 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
571 enter_from_user_mode(regs);
572 local_daif_restore(DAIF_PROCCTX);
573 do_el0_sys(esr, regs);
574 exit_to_user_mode(regs);
577 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
579 unsigned long far = read_sysreg(far_el1);
581 if (!is_ttbr0_addr(instruction_pointer(regs)))
582 arm64_apply_bp_hardening();
584 enter_from_user_mode(regs);
585 local_daif_restore(DAIF_PROCCTX);
586 do_sp_pc_abort(far, esr, regs);
587 exit_to_user_mode(regs);
590 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
592 enter_from_user_mode(regs);
593 local_daif_restore(DAIF_PROCCTX);
594 do_sp_pc_abort(regs->sp, esr, regs);
595 exit_to_user_mode(regs);
598 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
600 enter_from_user_mode(regs);
601 local_daif_restore(DAIF_PROCCTX);
602 do_el0_undef(regs, esr);
603 exit_to_user_mode(regs);
606 static void noinstr el0_bti(struct pt_regs *regs)
608 enter_from_user_mode(regs);
609 local_daif_restore(DAIF_PROCCTX);
611 exit_to_user_mode(regs);
614 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
616 enter_from_user_mode(regs);
617 local_daif_restore(DAIF_PROCCTX);
618 bad_el0_sync(regs, 0, esr);
619 exit_to_user_mode(regs);
622 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
624 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
625 unsigned long far = read_sysreg(far_el1);
627 enter_from_user_mode(regs);
628 do_debug_exception(far, esr, regs);
629 local_daif_restore(DAIF_PROCCTX);
630 exit_to_user_mode(regs);
633 static void noinstr el0_svc(struct pt_regs *regs)
635 enter_from_user_mode(regs);
636 cortex_a76_erratum_1463225_svc_handler();
638 exit_to_user_mode(regs);
641 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
643 enter_from_user_mode(regs);
644 local_daif_restore(DAIF_PROCCTX);
645 do_el0_fpac(regs, esr);
646 exit_to_user_mode(regs);
649 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
651 unsigned long esr = read_sysreg(esr_el1);
653 switch (ESR_ELx_EC(esr)) {
654 case ESR_ELx_EC_SVC64:
657 case ESR_ELx_EC_DABT_LOW:
660 case ESR_ELx_EC_IABT_LOW:
663 case ESR_ELx_EC_FP_ASIMD:
664 el0_fpsimd_acc(regs, esr);
667 el0_sve_acc(regs, esr);
670 el0_sme_acc(regs, esr);
672 case ESR_ELx_EC_FP_EXC64:
673 el0_fpsimd_exc(regs, esr);
675 case ESR_ELx_EC_SYS64:
679 case ESR_ELx_EC_SP_ALIGN:
682 case ESR_ELx_EC_PC_ALIGN:
685 case ESR_ELx_EC_UNKNOWN:
686 el0_undef(regs, esr);
691 case ESR_ELx_EC_BREAKPT_LOW:
692 case ESR_ELx_EC_SOFTSTP_LOW:
693 case ESR_ELx_EC_WATCHPT_LOW:
694 case ESR_ELx_EC_BRK64:
697 case ESR_ELx_EC_FPAC:
705 static void noinstr el0_interrupt(struct pt_regs *regs,
706 void (*handler)(struct pt_regs *))
708 enter_from_user_mode(regs);
710 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
712 if (regs->pc & BIT(55))
713 arm64_apply_bp_hardening();
716 do_interrupt_handler(regs, handler);
719 exit_to_user_mode(regs);
722 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
724 el0_interrupt(regs, handle_arch_irq);
727 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
729 __el0_irq_handler_common(regs);
732 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
734 el0_interrupt(regs, handle_arch_fiq);
737 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
739 __el0_fiq_handler_common(regs);
742 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
744 unsigned long esr = read_sysreg(esr_el1);
746 enter_from_user_mode(regs);
747 local_daif_restore(DAIF_ERRCTX);
748 arm64_enter_nmi(regs);
749 do_serror(regs, esr);
750 arm64_exit_nmi(regs);
751 local_daif_restore(DAIF_PROCCTX);
752 exit_to_user_mode(regs);
755 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
757 __el0_error_handler_common(regs);
761 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
763 enter_from_user_mode(regs);
764 local_daif_restore(DAIF_PROCCTX);
765 do_el0_cp15(esr, regs);
766 exit_to_user_mode(regs);
769 static void noinstr el0_svc_compat(struct pt_regs *regs)
771 enter_from_user_mode(regs);
772 cortex_a76_erratum_1463225_svc_handler();
773 do_el0_svc_compat(regs);
774 exit_to_user_mode(regs);
777 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
779 unsigned long esr = read_sysreg(esr_el1);
781 switch (ESR_ELx_EC(esr)) {
782 case ESR_ELx_EC_SVC32:
783 el0_svc_compat(regs);
785 case ESR_ELx_EC_DABT_LOW:
788 case ESR_ELx_EC_IABT_LOW:
791 case ESR_ELx_EC_FP_ASIMD:
792 el0_fpsimd_acc(regs, esr);
794 case ESR_ELx_EC_FP_EXC32:
795 el0_fpsimd_exc(regs, esr);
797 case ESR_ELx_EC_PC_ALIGN:
800 case ESR_ELx_EC_UNKNOWN:
801 case ESR_ELx_EC_CP14_MR:
802 case ESR_ELx_EC_CP14_LS:
803 case ESR_ELx_EC_CP14_64:
804 el0_undef(regs, esr);
806 case ESR_ELx_EC_CP15_32:
807 case ESR_ELx_EC_CP15_64:
810 case ESR_ELx_EC_BREAKPT_LOW:
811 case ESR_ELx_EC_SOFTSTP_LOW:
812 case ESR_ELx_EC_WATCHPT_LOW:
813 case ESR_ELx_EC_BKPT32:
821 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
823 __el0_irq_handler_common(regs);
826 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
828 __el0_fiq_handler_common(regs);
831 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
833 __el0_error_handler_common(regs);
835 #else /* CONFIG_COMPAT */
836 UNHANDLED(el0t, 32, sync)
837 UNHANDLED(el0t, 32, irq)
838 UNHANDLED(el0t, 32, fiq)
839 UNHANDLED(el0t, 32, error)
840 #endif /* CONFIG_COMPAT */
842 #ifdef CONFIG_VMAP_STACK
843 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
845 unsigned long esr = read_sysreg(esr_el1);
846 unsigned long far = read_sysreg(far_el1);
848 arm64_enter_nmi(regs);
849 panic_bad_stack(regs, esr, far);
851 #endif /* CONFIG_VMAP_STACK */
853 #ifdef CONFIG_ARM_SDE_INTERFACE
854 asmlinkage noinstr unsigned long
855 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
860 * We didn't take an exception to get here, so the HW hasn't
861 * set/cleared bits in PSTATE that we may rely on.
863 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
864 * whether PSTATE bits are inherited unchanged or generated from
865 * scratch, and the TF-A implementation always clears PAN and always
866 * clears UAO. There are no other known implementations.
868 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
869 * PSTATE is modified upon architectural exceptions, and so PAN is
870 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
873 * We must explicitly reset PAN to the expected state, including
874 * clearing it when the host isn't using it, in case a VM had it set.
876 if (system_uses_hw_pan())
878 else if (cpu_has_pan())
881 arm64_enter_nmi(regs);
882 ret = do_sdei_event(regs, arg);
883 arm64_exit_nmi(regs);
887 #endif /* CONFIG_ARM_SDE_INTERFACE */