KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled
authorWill Deacon <will@kernel.org>
Mon, 28 Sep 2020 10:45:24 +0000 (11:45 +0100)
committerWill Deacon <will@kernel.org>
Tue, 29 Sep 2020 15:08:17 +0000 (16:08 +0100)
Patching the EL2 exception vectors is integral to the Spectre-v2
workaround, where it can be necessary to execute CPU-specific sequences
to nobble the branch predictor before running the hypervisor text proper.

Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors
to be patched even when KASLR is not enabled.

Fixes: 7a132017e7a5 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE")
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kernel/proton-pack.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/hyp-entry.S

index abe02cf..7f7072f 100644 (file)
@@ -99,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 #define __kvm_hyp_init         CHOOSE_NVHE_SYM(__kvm_hyp_init)
 #define __kvm_hyp_vector       CHOOSE_HYP_SYM(__kvm_hyp_vector)
 
-#ifdef CONFIG_RANDOMIZE_BASE
 extern atomic_t arm64_el2_vector_last_slot;
 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
 #define __bp_harden_hyp_vecs   CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
-#endif
 
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
index 36606ef..cff1ceb 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <asm/page.h>
 #include <asm/memory.h>
+#include <asm/mmu.h>
 #include <asm/cpufeature.h>
 
 /*
@@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
        return ret;
 }
 
-#ifdef CONFIG_RANDOMIZE_BASE
 /*
  * EL2 vectors can be mapped and rerouted in a number of ways,
  * depending on the kernel configuration and CPU present:
@@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
  * VHE, as we don't have hypervisor-specific mappings. If the system
  * is VHE and yet selects this capability, it will be ignored.
  */
-#include <asm/mmu.h>
-
 extern void *__kvm_bp_vect_base;
 extern int __kvm_harden_el2_vector_slot;
 
-/*  This is called on both VHE and !VHE systems */
 static inline void *kvm_get_hyp_vector(void)
 {
        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
@@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
        return vect;
 }
 
-/*  This is only called on a !VHE system */
-static inline int kvm_map_vectors(void)
-{
-       /*
-        * SV2  = ARM64_SPECTRE_V2
-        * HEL2 = ARM64_HARDEN_EL2_VECTORS
-        *
-        * !SV2 + !HEL2 -> use direct vectors
-        *  SV2 + !HEL2 -> use hardened vectors in place
-        * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
-        *  SV2 +  HEL2 -> use hardened vertors and use exec mapping
-        */
-       if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
-               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
-               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
-       }
-
-       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
-               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-
-               /*
-                * Always allocate a spare vector slot, as we don't
-                * know yet which CPUs have a BP hardening slot that
-                * we can reuse.
-                */
-               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
-               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
-               return create_hyp_exec_mappings(vect_pa, size,
-                                               &__kvm_bp_vect_base);
-       }
-
-       return 0;
-}
-#else
-static inline void *kvm_get_hyp_vector(void)
-{
-       return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
-}
-
-static inline int kvm_map_vectors(void)
-{
-       return 0;
-}
-#endif
-
 #define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
 
 /*
index b1ea935..1fbaa02 100644 (file)
@@ -177,7 +177,6 @@ enum mitigation_state arm64_get_spectre_v2_state(void)
 }
 
 #ifdef CONFIG_KVM
-#ifdef CONFIG_RANDOMIZE_BASE
 #include <asm/cacheflush.h>
 #include <asm/kvm_asm.h>
 
@@ -235,7 +234,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
 {
        __this_cpu_write(bp_hardening_data.fn, fn);
 }
-#endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_KVM */
 
 static void call_smc_arch_workaround_1(void)
index 9b90a70..13e559a 100644 (file)
@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
+static int kvm_map_vectors(void)
+{
+       /*
+        * SV2  = ARM64_SPECTRE_V2
+        * HEL2 = ARM64_HARDEN_EL2_VECTORS
+        *
+        * !SV2 + !HEL2 -> use direct vectors
+        *  SV2 + !HEL2 -> use hardened vectors in place
+        * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
+        *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
+        */
+       if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+
+       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+               /*
+                * Always allocate a spare vector slot, as we don't
+                * know yet which CPUs have a BP hardening slot that
+                * we can reuse.
+                */
+               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+               return create_hyp_exec_mappings(vect_pa, size,
+                                               &__kvm_bp_vect_base);
+       }
+
+       return 0;
+}
+
 static void cpu_init_hyp_mode(void)
 {
        phys_addr_t pgd_ptr;
index 89d2bf7..d898f0d 100644 (file)
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir)                               \
                    -DDISABLE_BRANCH_PROFILING          \
                    $(DISABLE_STACKLEAK_PLUGIN)
 
-obj-$(CONFIG_KVM) += vhe/ nvhe/
-obj-$(CONFIG_RANDOMIZE_BASE) += smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
index 3cca756..7ea277b 100644 (file)
@@ -259,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
        valid_vect      el1_error               // Error 32-bit EL1
 SYM_CODE_END(__kvm_hyp_vector)
 
-#ifdef CONFIG_RANDOMIZE_BASE
 .macro hyp_ventry
        .align 7
 1:     esb
@@ -309,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
 1:     .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
        .org 1b
 SYM_CODE_END(__bp_harden_hyp_vecs)
-#endif