arm64: kvm: hyp: use cpus_have_final_cap()
authorMark Rutland <mark.rutland@arm.com>
Fri, 21 Feb 2020 14:50:22 +0000 (14:50 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 13 Mar 2020 17:34:28 +0000 (17:34 +0000)
The KVM hyp code is only run after system capabilities have been
finalized, and thus all const cap checks have been patched. This is
noted in in __cpu_init_hyp_mode(), where we BUG() if called too early:

| /*
|  * Call initialization code, and switch to the full blown HYP code.
|  * If the cpucaps haven't been finalized yet, something has gone very
|  * wrong, and hyp will crash and burn when it uses any
|  * cpus_have_const_cap() wrapper.
|  */

Given this, the hyp code can use cpus_have_final_cap() and avoid
generating code to check the cpu_hwcaps array, which would be unsafe to
run in hyp context.

This patch migrate the KVM hyp code to cpus_have_final_cap(), avoiding
this redundant code generation, and making it possible to detect if we
accidentally invoke this code too early. In the latter case, the BUG()
in cpus_have_final_cap() will cause a hyp panic.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/hyp/tlb.c

index dfe8dd1..27fcdff 100644 (file)
@@ -127,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        write_sysreg(val, cptr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
 
                isb();
@@ -146,12 +146,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 hcr = vcpu->arch.hcr_el2;
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+       if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
                hcr |= HCR_TVM;
 
        write_sysreg(hcr, hcr_el2);
 
-       if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
                write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 
        if (has_vhe())
@@ -181,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
@@ -328,7 +328,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
         * resolve the IPA using the AT instruction.
         */
        if (!(esr & ESR_ELx_S1PTW) &&
-           (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+           (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
             (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
@@ -498,7 +498,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (*exit_code != ARM_EXCEPTION_TRAP)
                goto exit;
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+       if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
            kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
            handle_tx2_tvm(vcpu))
                return true;
@@ -555,7 +555,7 @@ exit:
 
 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
 {
-       if (!cpus_have_const_cap(ARM64_SSBD))
+       if (!cpus_have_final_cap(ARM64_SSBD))
                return false;
 
        return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
index 7672a97..75b1925 100644 (file)
@@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct
        ctxt->gp_regs.regs.pc           = read_sysreg_el2(SYS_ELR);
        ctxt->gp_regs.regs.pstate       = read_sysreg_el2(SYS_SPSR);
 
-       if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
 }
 
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
        write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
 
-       if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+       if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
                write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
        } else  if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
        write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
            ctxt->__hyp_running_vcpu) {
                /*
                 * Must only be done for host registers, hence the context
@@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
        write_sysreg_el2(ctxt->gp_regs.regs.pc,         SYS_ELR);
        write_sysreg_el2(pstate,                        SYS_SPSR);
 
-       if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+       if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
 }
 
index 92f560e..ceaddbe 100644 (file)
@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 
        local_irq_save(cxt->flags);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /*
                 * For CPUs that are affected by ARM errata 1165522 or 1530923,
                 * we cannot trust stage-1 to be in a correct state at that
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
                                                  struct tlb_inv_context *cxt)
 {
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
@@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
        isb();
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /* Restore the registers to what they were */
                write_sysreg_el1(cxt->tcr, SYS_TCR);
                write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
 {
        write_sysreg(0, vttbr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+       if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                /* Ensure write of the host VMID */
                isb();
                /* Restore the host's TCR_EL1 */