arm64: KVM: Prevent speculative S1 PTW when restoring vcpu context
authorMarc Zyngier <maz@kernel.org>
Tue, 30 Jul 2019 10:15:31 +0000 (11:15 +0100)
committerMarc Zyngier <maz@kernel.org>
Sat, 26 Oct 2019 09:44:49 +0000 (10:44 +0100)
When handling erratum 1319367, we must ensure that the page table
walker cannot parse the S1 page tables while the guest is in an
inconsistent state. This is done as follows:

On guest entry:
- TCR_EL1.EPD{0,1} are set, ensuring that no PTW can occur
- all system registers are restored, except for TCR_EL1 and SCTLR_EL1
- stage-2 is restored
- SCTLR_EL1 and TCR_EL1 are restored

On guest exit:
- SCTLR_EL1.M and TCR_EL1.EPD{0,1} are set, ensuring that no PTW can occur
- stage-2 is disabled
- All host system registers are restored

Reviewed-by: James Morse <james.morse@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c

index 69e10b2..5765b17 100644 (file)
@@ -118,6 +118,20 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
        }
 
        write_sysreg(val, cptr_el2);
+
+       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+               struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
+
+               isb();
+               /*
+                * At this stage, and thanks to the above isb(), S2 is
+                * configured and enabled. We can now restore the guest's S1
+                * configuration: SCTLR, and only then TCR.
+                */
+               write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
+               isb();
+               write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
+       }
 }
 
 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
@@ -156,6 +170,23 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+               u64 val;
+
+               /*
+                * Set the TCR and SCTLR registers in the exact opposite
+                * sequence as __activate_traps_nvhe (first prevent walks,
+                * then force the MMU on). A generous sprinkling of isb()
+                * ensure that things happen in this exact order.
+                */
+               val = read_sysreg_el1(SYS_TCR);
+               write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
+               isb();
+               val = read_sysreg_el1(SYS_SCTLR);
+               write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
+               isb();
+       }
+
        __deactivate_traps_common();
 
        mdcr_el2 &= MDCR_EL2_HPMN_MASK;
index 7ddbc84..22b8128 100644 (file)
@@ -117,12 +117,26 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
 {
        write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
        write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
-       write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
+
+       if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+               write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
+               write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
+       } else  if (!ctxt->__hyp_running_vcpu) {
+               /*
+                * Must only be done for guest registers, hence the context
+                * test. We're coming from the host, so SCTLR.M is already
+                * set. Pairs with __activate_traps_nvhe().
+                */
+               write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
+                                 TCR_EPD1_MASK | TCR_EPD0_MASK),
+                                SYS_TCR);
+               isb();
+       }
+
        write_sysreg(ctxt->sys_regs[ACTLR_EL1],         actlr_el1);
        write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],     SYS_CPACR);
        write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],     SYS_TTBR0);
        write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],     SYS_TTBR1);
-       write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
        write_sysreg_el1(ctxt->sys_regs[ESR_EL1],       SYS_ESR);
        write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],     SYS_AFSR0);
        write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],     SYS_AFSR1);
@@ -135,6 +149,23 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
        write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+           ctxt->__hyp_running_vcpu) {
+               /*
+                * Must only be done for host registers, hence the context
+                * test. Pairs with __deactivate_traps_nvhe().
+                */
+               isb();
+               /*
+                * At this stage, and thanks to the above isb(), S2 is
+                * deconfigured and disabled. We can now restore the host's
+                * S1 configuration: SCTLR, and only then TCR.
+                */
+               write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
+               isb();
+               write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
+       }
+
        write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
        write_sysreg_el1(ctxt->gp_regs.elr_el1,         SYS_ELR);
        write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);