KVM: arm64: Tidy up kvm_map_vector()
authorWill Deacon <will@kernel.org>
Fri, 13 Nov 2020 11:38:39 +0000 (11:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 16 Nov 2020 10:40:17 +0000 (10:40 +0000)
The bulk of the work in kvm_map_vector() is conditional on the
ARM64_HARDEN_EL2_VECTORS capability, so return early if that is not set
and make the code a bit easier to read.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-3-will@kernel.org
arch/arm64/kvm/arm.c

index b43b637..476bc61 100644 (file)
@@ -1297,6 +1297,8 @@ static unsigned long nvhe_percpu_order(void)
 
 static int kvm_map_vectors(void)
 {
+       int slot;
+
        /*
         * SV2  = ARM64_SPECTRE_V2
         * HEL2 = ARM64_HARDEN_EL2_VECTORS
@@ -1306,22 +1308,20 @@ static int kvm_map_vectors(void)
         * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
         *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
         */
-       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
-               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+       if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS))
+               return 0;
 
-               /*
-                * Always allocate a spare vector slot, as we don't
-                * know yet which CPUs have a BP hardening slot that
-                * we can reuse.
-                */
-               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
-               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
-               return create_hyp_exec_mappings(vect_pa, size,
-                                               &__kvm_bp_vect_base);
-       }
+       /*
+        * Always allocate a spare vector slot, as we don't know yet which CPUs
+        * have a BP hardening slot that we can reuse.
+        */
+       slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+       BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+       __kvm_harden_el2_vector_slot = slot;
 
-       return 0;
+       return create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
+                                       __BP_HARDEN_HYP_VECS_SZ,
+                                       &__kvm_bp_vect_base);
 }
 
 static void cpu_init_hyp_mode(void)