KVM: arm64: Remove eager host SVE state saving
authorDave Martin <Dave.Martin@arm.com>
Fri, 20 Apr 2018 16:39:16 +0000 (17:39 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Fri, 25 May 2018 11:28:30 +0000 (12:28 +0100)
Now that the host SVE context can be saved on demand from Hyp,
there is no longer any need to save this state in advance before
entering the guest.

This patch removes the relevant call to
kvm_fpsimd_flush_cpu_state().

Since the problem that function was intended to solve now no longer
exists, the function and its dependencies are also deleted.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/fpsimd.c
virt/kvm/arm/arm.c

index 3b85bbb..f079a20 100644 (file)
@@ -312,9 +312,6 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
 
-/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
-static inline void kvm_fpsimd_flush_cpu_state(void) {}
-
 static inline void kvm_arm_vhe_guest_enter(void) {}
 static inline void kvm_arm_vhe_guest_exit(void) {}
 
index fda9289..a4ca202 100644 (file)
@@ -457,16 +457,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 }
 #endif
 
-/*
- * All host FP/SIMD state is restored on guest exit, so nothing needs
- * doing here except in the SVE case:
-*/
-static inline void kvm_fpsimd_flush_cpu_state(void)
-{
-       if (system_supports_sve())
-               sve_flush_cpu_state();
-}
-
 static inline void kvm_arm_vhe_guest_enter(void)
 {
        local_daif_mask();
index e60c3a2..7074c4c 100644 (file)
  */
 struct fpsimd_last_state_struct {
        struct user_fpsimd_state *st;
-       bool sve_in_use;
 };
 
 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -1008,7 +1007,6 @@ void fpsimd_bind_task_to_cpu(void)
                this_cpu_ptr(&fpsimd_last_state);
 
        last->st = &current->thread.uw.fpsimd_state;
-       last->sve_in_use = test_thread_flag(TIF_SVE);
        current->thread.fpsimd_cpu = smp_processor_id();
 
        if (system_supports_sve()) {
@@ -1030,7 +1028,6 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
        WARN_ON(!in_softirq() && !irqs_disabled());
 
        last->st = st;
-       last->sve_in_use = false;
 }
 
 /*
@@ -1091,24 +1088,6 @@ void fpsimd_flush_cpu_state(void)
        set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
 
-/*
- * Invalidate any task SVE state currently held in this CPU's regs.
- *
- * This is used to prevent the kernel from trying to reuse SVE register data
- * that is detroyed by KVM guest enter/exit.  This function should go away when
- * KVM SVE support is implemented.  Don't use it for anything else.
- */
-#ifdef CONFIG_ARM64_SVE
-void sve_flush_cpu_state(void)
-{
-       struct fpsimd_last_state_struct const *last =
-               this_cpu_ptr(&fpsimd_last_state);
-
-       if (last->st && last->sve_in_use)
-               fpsimd_flush_cpu_state();
-}
-#endif /* CONFIG_ARM64_SVE */
-
 #ifdef CONFIG_KERNEL_MODE_NEON
 
 DEFINE_PER_CPU(bool, kernel_neon_busy);
index ce7c6f3..39e7771 100644 (file)
@@ -682,9 +682,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 */
                preempt_disable();
 
-               /* Flush FP/SIMD state that can't survive guest entry/exit */
-               kvm_fpsimd_flush_cpu_state();
-
                kvm_pmu_flush_hwstate(vcpu);
 
                local_irq_disable();