1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
10 #include <hyp/adjust_pc.h>
11 #include <hyp/fault.h>
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
19 #include <kvm/arm_psci.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_nested.h>
30 #include <asm/fpsimd.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/processor.h>
34 struct kvm_exception_table_entry {
38 extern struct kvm_exception_table_entry __start___kvm_ex_table;
39 extern struct kvm_exception_table_entry __stop___kvm_ex_table;
41 /* Check whether the FP regs are owned by the guest */
42 static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
44 return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
47 /* Save the 32-bit only FPSIMD system register state */
48 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
50 if (!vcpu_el1_is_32bit(vcpu))
53 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
56 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
59 * We are about to set CPTR_EL2.TFP to trap all floating point
60 * register accesses to EL2, however, the ARM ARM clearly states that
61 * traps are only taken to EL2 if the operation would not otherwise
62 * trap to EL1. Therefore, always make sure that for 32-bit guests,
63 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
64 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
65 * it will cause an exception.
67 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
68 write_sysreg(1 << 30, fpexc32_el2);
73 static inline bool __hfgxtr_traps_required(void)
75 if (cpus_have_final_cap(ARM64_SME))
78 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
84 static inline void __activate_traps_hfgxtr(void)
86 u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
88 if (cpus_have_final_cap(ARM64_SME)) {
89 tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
96 * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
98 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
99 w_set |= HFGxTR_EL2_TCR_EL1_MASK;
101 sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
102 sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
105 static inline void __deactivate_traps_hfgxtr(void)
107 u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
109 if (cpus_have_final_cap(ARM64_SME)) {
110 tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
116 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
117 w_clr |= HFGxTR_EL2_TCR_EL1_MASK;
119 sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
120 sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
123 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
125 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
126 write_sysreg(1 << 15, hstr_el2);
129 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
130 * PMSELR_EL0 to make sure it never contains the cycle
131 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
132 * EL1 instead of being trapped to EL2.
134 if (kvm_arm_support_pmu_v3()) {
135 struct kvm_cpu_context *hctxt;
137 write_sysreg(0, pmselr_el0);
139 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
140 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
141 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
142 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
145 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
146 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
148 if (__hfgxtr_traps_required())
149 __activate_traps_hfgxtr();
152 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
154 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
156 write_sysreg(0, hstr_el2);
157 if (kvm_arm_support_pmu_v3()) {
158 struct kvm_cpu_context *hctxt;
160 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
161 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
162 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
165 if (__hfgxtr_traps_required())
166 __deactivate_traps_hfgxtr();
169 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
171 u64 hcr = vcpu->arch.hcr_el2;
173 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
176 write_sysreg(hcr, hcr_el2);
178 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
179 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
182 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
185 * If we pended a virtual abort, preserve it until it gets
186 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
187 * the crucial bit is "On taking a vSError interrupt,
188 * HCR_EL2.VSE is cleared to 0."
190 if (vcpu->arch.hcr_el2 & HCR_VSE) {
191 vcpu->arch.hcr_el2 &= ~HCR_VSE;
192 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
196 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
198 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
201 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
203 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
204 __sve_restore_state(vcpu_sve_pffr(vcpu),
205 &vcpu->arch.ctxt.fp_regs.fpsr);
206 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
210 * We trap the first access to the FP/SIMD to save the host context and
211 * restore the guest context lazily.
212 * If FP/SIMD is not implemented, handle the trap and inject an undefined
213 * instruction exception to the guest. Similarly for trapped SVE accesses.
215 static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
221 if (!system_supports_fpsimd())
224 sve_guest = vcpu_has_sve(vcpu);
225 esr_ec = kvm_vcpu_trap_get_class(vcpu);
227 /* Only handle traps the vCPU can support here: */
229 case ESR_ELx_EC_FP_ASIMD:
239 /* Valid trap. Switch the context: */
241 /* First disable enough traps to allow us to update the registers */
242 if (has_vhe() || has_hvhe()) {
243 reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
245 reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
247 sysreg_clear_set(cpacr_el1, 0, reg);
253 sysreg_clear_set(cptr_el2, reg, 0);
257 /* Write out the host state if it's in the registers */
258 if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
259 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
261 /* Restore the guest state */
263 __hyp_sve_restore_guest(vcpu);
265 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
267 /* Skip restoring fpexc32 for AArch64 guests */
268 if (!(read_sysreg(hcr_el2) & HCR_RW))
269 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
271 vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
276 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
278 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
279 int rt = kvm_vcpu_sys_get_rt(vcpu);
280 u64 val = vcpu_get_reg(vcpu, rt);
283 * The normal sysreg handling code expects to see the traps,
284 * let's not do anything here.
286 if (vcpu->arch.hcr_el2 & HCR_TVM)
291 write_sysreg_el1(val, SYS_SCTLR);
294 write_sysreg_el1(val, SYS_TTBR0);
297 write_sysreg_el1(val, SYS_TTBR1);
300 write_sysreg_el1(val, SYS_TCR);
303 write_sysreg_el1(val, SYS_ESR);
306 write_sysreg_el1(val, SYS_FAR);
309 write_sysreg_el1(val, SYS_AFSR0);
312 write_sysreg_el1(val, SYS_AFSR1);
315 write_sysreg_el1(val, SYS_MAIR);
318 write_sysreg_el1(val, SYS_AMAIR);
320 case SYS_CONTEXTIDR_EL1:
321 write_sysreg_el1(val, SYS_CONTEXTIDR);
327 __kvm_skip_instr(vcpu);
331 static inline bool esr_is_ptrauth_trap(u64 esr)
333 switch (esr_sys64_to_sysreg(esr)) {
334 case SYS_APIAKEYLO_EL1:
335 case SYS_APIAKEYHI_EL1:
336 case SYS_APIBKEYLO_EL1:
337 case SYS_APIBKEYHI_EL1:
338 case SYS_APDAKEYLO_EL1:
339 case SYS_APDAKEYHI_EL1:
340 case SYS_APDBKEYLO_EL1:
341 case SYS_APDBKEYHI_EL1:
342 case SYS_APGAKEYLO_EL1:
343 case SYS_APGAKEYHI_EL1:
350 #define __ptrauth_save_key(ctxt, key) \
353 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
354 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
355 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
356 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
359 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
361 static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
363 struct kvm_cpu_context *ctxt;
366 if (!vcpu_has_ptrauth(vcpu))
369 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
370 __ptrauth_save_key(ctxt, APIA);
371 __ptrauth_save_key(ctxt, APIB);
372 __ptrauth_save_key(ctxt, APDA);
373 __ptrauth_save_key(ctxt, APDB);
374 __ptrauth_save_key(ctxt, APGA);
376 vcpu_ptrauth_enable(vcpu);
378 val = read_sysreg(hcr_el2);
379 val |= (HCR_API | HCR_APK);
380 write_sysreg(val, hcr_el2);
385 static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
387 struct arch_timer_context *ctxt;
392 * We only get here for 64bit guests, 32bit guests will hit
393 * the long and winding road all the way to the standard
394 * handling. Yes, it sucks to be irrelevant.
396 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
400 case SYS_CNTPCTSS_EL0:
401 if (vcpu_has_nv(vcpu)) {
402 if (is_hyp_ctxt(vcpu)) {
403 ctxt = vcpu_hptimer(vcpu);
407 /* Check for guest hypervisor trapping */
408 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
409 if (!vcpu_el2_e2h_is_set(vcpu))
410 val = (val & CNTHCTL_EL1PCTEN) << 10;
412 if (!(val & (CNTHCTL_EL1PCTEN << 10)))
416 ctxt = vcpu_ptimer(vcpu);
422 val = arch_timer_read_cntpct_el0();
424 if (ctxt->offset.vm_offset)
425 val -= *kern_hyp_va(ctxt->offset.vm_offset);
426 if (ctxt->offset.vcpu_offset)
427 val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
429 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
430 __kvm_skip_instr(vcpu);
434 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
436 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
437 int rt = kvm_vcpu_sys_get_rt(vcpu);
438 u64 val = vcpu_get_reg(vcpu, rt);
440 if (sysreg != SYS_TCR_EL1)
444 * Affected parts do not advertise support for hardware Access Flag /
445 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
446 * control bits are still functional. The architecture requires these be
447 * RES0 on systems that do not implement FEAT_HAFDBS.
449 * Uphold the requirements of the architecture by masking guest writes
450 * to TCR_EL1.{HA,HD} here.
452 val &= ~(TCR_HD | TCR_HA);
453 write_sysreg_el1(val, SYS_TCR);
457 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
459 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
460 handle_tx2_tvm(vcpu))
463 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
464 handle_ampere1_tcr(vcpu))
467 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
468 __vgic_v3_perform_cpuif_access(vcpu) == 1)
471 if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
472 return kvm_hyp_handle_ptrauth(vcpu, exit_code);
474 if (kvm_hyp_handle_cntpct(vcpu))
480 static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
482 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
483 __vgic_v3_perform_cpuif_access(vcpu) == 1)
489 static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
491 if (!__populate_fault_info(vcpu))
496 static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
497 __alias(kvm_hyp_handle_memory_fault);
498 static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
499 __alias(kvm_hyp_handle_memory_fault);
501 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
503 if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
506 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
509 valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
510 kvm_vcpu_dabt_isvalid(vcpu) &&
511 !kvm_vcpu_abt_issea(vcpu) &&
512 !kvm_vcpu_abt_iss1tw(vcpu);
515 int ret = __vgic_v2_perform_cpuif_access(vcpu);
520 /* Promote an illegal access to an SError.*/
522 *exit_code = ARM_EXCEPTION_EL1_SERROR;
529 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
531 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
533 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
536 * Allow the hypervisor to handle the exit with an exit handler if it has one.
538 * Returns true if the hypervisor handled the exit, and control should go back
539 * to the guest, or false if it hasn't.
541 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
543 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
546 fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
549 return fn(vcpu, exit_code);
554 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
557 * Check for the conditions of Cortex-A510's #2077057. When these occur
558 * SPSR_EL2 can't be trusted, but isn't needed either as it is
559 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
560 * Are we single-stepping the guest, and took a PAC exception from the
561 * active-not-pending state?
563 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
564 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
565 *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
566 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
567 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
569 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
573 * Return true when we were able to fixup the guest exit and should return to
574 * the guest, false when we should restore the host state and return to the
577 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
580 * Save PSTATE early so that we can evaluate the vcpu mode
583 synchronize_vcpu_pstate(vcpu, exit_code);
586 * Check whether we want to repaint the state one way or
589 early_exit_filter(vcpu, exit_code);
591 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
592 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
594 if (ARM_SERROR_PENDING(*exit_code) &&
595 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
596 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
599 * HVC already have an adjusted PC, which we need to
600 * correct in order to return to after having injected
603 * SMC, on the other hand, is *trapped*, meaning its
604 * preferred return address is the SMC itself.
606 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
607 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
611 * We're using the raw exception code in order to only process
612 * the trap if no SError is pending. We will come back to the
613 * same PC once the SError has been injected, and replay the
614 * trapping instruction.
616 if (*exit_code != ARM_EXCEPTION_TRAP)
619 /* Check if there's an exit handler and allow it to handle the exit. */
620 if (kvm_hyp_handle_exit(vcpu, exit_code))
623 /* Return to the host kernel and handle the exit */
627 /* Re-enter the guest */
628 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
632 static inline void __kvm_unexpected_el2_exception(void)
634 extern char __guest_exit_panic[];
635 unsigned long addr, fixup;
636 struct kvm_exception_table_entry *entry, *end;
637 unsigned long elr_el2 = read_sysreg(elr_el2);
639 entry = &__start___kvm_ex_table;
640 end = &__stop___kvm_ex_table;
642 while (entry < end) {
643 addr = (unsigned long)&entry->insn + entry->insn;
644 fixup = (unsigned long)&entry->fixup + entry->fixup;
646 if (addr != elr_el2) {
651 write_sysreg(fixup, elr_el2);
655 /* Trigger a panic after restoring the hyp context. */
656 write_sysreg(__guest_exit_panic, elr_el2);
659 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */