KVM: arm64: Set host stage 2 using kvm_nvhe_init_params
authorQuentin Perret <qperret@google.com>
Fri, 19 Mar 2021 10:01:29 +0000 (10:01 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 19 Mar 2021 12:01:21 +0000 (12:01 +0000)
Move the registers relevant to host stage 2 enablement to
kvm_nvhe_init_params to prepare the ground for enabling it in later
patches.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-22-qperret@google.com
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-init.S
arch/arm64/kvm/hyp/nvhe/switch.c

index ebe7007..08f63c0 100644 (file)
@@ -158,6 +158,9 @@ struct kvm_nvhe_init_params {
        unsigned long tpidr_el2;
        unsigned long stack_hyp_va;
        phys_addr_t pgd_pa;
+       unsigned long hcr_el2;
+       unsigned long vttbr;
+       unsigned long vtcr;
 };
 
 /* Translate a kernel address @ptr into its equivalent linear mapping */
index a36e2fc..8930b42 100644 (file)
@@ -120,6 +120,9 @@ int main(void)
   DEFINE(NVHE_INIT_TPIDR_EL2,  offsetof(struct kvm_nvhe_init_params, tpidr_el2));
   DEFINE(NVHE_INIT_STACK_HYP_VA,       offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
   DEFINE(NVHE_INIT_PGD_PA,     offsetof(struct kvm_nvhe_init_params, pgd_pa));
+  DEFINE(NVHE_INIT_HCR_EL2,    offsetof(struct kvm_nvhe_init_params, hcr_el2));
+  DEFINE(NVHE_INIT_VTTBR,      offsetof(struct kvm_nvhe_init_params, vttbr));
+  DEFINE(NVHE_INIT_VTCR,       offsetof(struct kvm_nvhe_init_params, vtcr));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_CTX_SP,           offsetof(struct cpu_suspend_ctx, sp));
index d93ea0b..a6b5ba1 100644 (file)
@@ -1418,6 +1418,11 @@ static void cpu_prepare_hyp_mode(int cpu)
 
        params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE);
        params->pgd_pa = kvm_mmu_get_httbr();
+       if (is_protected_kvm_enabled())
+               params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
+       else
+               params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
+       params->vttbr = params->vtcr = 0;
 
        /*
         * Flush the init params from the data cache because the struct will
index a2b8b6a..a50ad9e 100644 (file)
@@ -83,11 +83,6 @@ SYM_CODE_END(__kvm_hyp_init)
  * x0: struct kvm_nvhe_init_params PA
  */
 SYM_CODE_START_LOCAL(___kvm_hyp_init)
-alternative_if ARM64_KVM_PROTECTED_MODE
-       mov_q   x1, HCR_HOST_NVHE_PROTECTED_FLAGS
-       msr     hcr_el2, x1
-alternative_else_nop_endif
-
        ldr     x1, [x0, #NVHE_INIT_TPIDR_EL2]
        msr     tpidr_el2, x1
 
@@ -97,6 +92,15 @@ alternative_else_nop_endif
        ldr     x1, [x0, #NVHE_INIT_MAIR_EL2]
        msr     mair_el2, x1
 
+       ldr     x1, [x0, #NVHE_INIT_HCR_EL2]
+       msr     hcr_el2, x1
+
+       ldr     x1, [x0, #NVHE_INIT_VTTBR]
+       msr     vttbr_el2, x1
+
+       ldr     x1, [x0, #NVHE_INIT_VTCR]
+       msr     vtcr_el2, x1
+
        ldr     x1, [x0, #NVHE_INIT_PGD_PA]
        phys_to_ttbr x2, x1
 alternative_if ARM64_HAS_CNP
index f6d542e..9932356 100644 (file)
@@ -97,10 +97,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
        mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 
        write_sysreg(mdcr_el2, mdcr_el2);
-       if (is_protected_kvm_enabled())
-               write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2);
-       else
-               write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
+       write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
        cptr = CPTR_EL2_DEFAULT;
        if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))