arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
authorMarc Zyngier <marc.zyngier@arm.com>
Fri, 20 Jul 2018 09:56:32 +0000 (10:56 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Jul 2018 12:27:42 +0000 (14:27 +0200)
commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.

In order to offer ARCH_WORKAROUND_2 support to guests, we need
a bit of infrastructure.

Let's add a flag indicating whether or not the guest uses
SSBD mitigation. Depending on the state of this flag, allow
KVM to disable ARCH_WORKAROUND_2 before entering the guest,
and enable it when exiting it.

Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/arm.c
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/hyp/switch.c

index 6a0d496c4145021ef2eb057289554e82d75d980b..e2f05cedaf97837ea97d44636d13e89b3c3f91a8 100644 (file)
@@ -256,6 +256,11 @@ static inline int kvm_map_vectors(void)
        return 0;
 }
 
+static inline int hyp_map_aux_data(void)
+{
+       return 0;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
index d7cd5410ab5c5310cac34aeb11a268e071f5c710..20436972537f896df1e3cdadc3e31a4adf504d0a 100644 (file)
@@ -1367,6 +1367,12 @@ static int init_hyp_mode(void)
                }
        }
 
+       err = hyp_map_aux_data();
+       if (err) {
+               kvm_err("Cannot map host auxilary data: %d\n", err);
+               goto out_err;
+       }
+
        kvm_info("Hyp mode initialized successfully\n");
 
        return 0;
index b3b4a03f2d2f8636d8be58904271bf23420ba10f..8f5cf83b23396d91bc9e6c56ce6a688503813dbb 100644 (file)
@@ -33,6 +33,9 @@
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
 #define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define        VCPU_WORKAROUND_2_FLAG_SHIFT    0
+#define        VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
+
 /* Translate a kernel address of @sym into its equivalent linear mapping */
 #define kvm_ksym_ref(sym)                                              \
        ({                                                              \
index 834ae734db4172011aeeb89d565a6d4394af4c98..beeafda8956710492c49cd7ec7f9ec243865e6e4 100644 (file)
@@ -213,6 +213,9 @@ struct kvm_vcpu_arch {
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
 
+       /* State of various workarounds, see kvm_asm.h for bit assignment */
+       u64 workaround_flags;
+
        /* Guest debug state */
        u64 debug_flags;
 
index cd7c9a3e0a03430c894cbf919193c459fbcff5a4..547519abc751c43925c5fe47e7120cdc5185a08c 100644 (file)
@@ -387,5 +387,29 @@ static inline int kvm_map_vectors(void)
 }
 #endif
 
+#ifdef CONFIG_ARM64_SSBD
+DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+static inline int hyp_map_aux_data(void)
+{
+       int cpu, err;
+
+       for_each_possible_cpu(cpu) {
+               u64 *ptr;
+
+               ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+               err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+#else
+static inline int hyp_map_aux_data(void)
+{
+       return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 57f4e61456647f35bca684cb98f5e940aa89314e..12f9d1ecdf4ceccd767a2de0827dd2e9d43403b0 100644 (file)
@@ -15,6 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/psci.h>
@@ -267,6 +268,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
        write_sysreg_el2(*vcpu_pc(vcpu), elr);
 }
 
+static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
+{
+       if (!cpus_have_cap(ARM64_SSBD))
+               return false;
+
+       return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
+}
+
+static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+       /*
+        * The host runs with the workaround always present. If the
+        * guest wants it disabled, so be it...
+        */
+       if (__needs_ssbd_off(vcpu) &&
+           __hyp_this_cpu_read(arm64_ssbd_callback_required))
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
+#endif
+}
+
+static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+       /*
+        * If the guest has disabled the workaround, bring it back on.
+        */
+       if (__needs_ssbd_off(vcpu) &&
+           __hyp_this_cpu_read(arm64_ssbd_callback_required))
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
+#endif
+}
+
 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpu_context *host_ctxt;
@@ -297,6 +331,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
        __sysreg_restore_guest_state(guest_ctxt);
        __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
 
+       __set_guest_arch_workaround_state(vcpu);
+
        /* Jump in the fire! */
 again:
        exit_code = __guest_enter(vcpu, host_ctxt);
@@ -339,6 +375,8 @@ again:
                }
        }
 
+       __set_host_arch_workaround_state(vcpu);
+
        fp_enabled = __fpsimd_enabled();
 
        __sysreg_save_guest_state(guest_ctxt);