From: Marc Zyngier Date: Thu, 11 Mar 2021 18:29:55 +0000 (+0000) Subject: KVM: arm64: Save/restore SVE state for nVHE X-Git-Tag: v5.15~1218^2~38^2~10^2~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8c8010d69c1322734a272eb95dbbf42b5190e565;p=platform%2Fkernel%2Flinux-starfive.git KVM: arm64: Save/restore SVE state for nVHE Implement the SVE save/restore for nVHE, following a similar logic to that of the VHE implementation: - the SVE state is switched on trap from EL1 to EL2 - no further changes to ZCR_EL2 occur as long as the guest isn't preempted or exit to userspace - ZCR_EL2 is reset to its default value on the first SVE access from the host EL1, and ZCR_EL1 restored to the default guest value in vcpu_put() Acked-by: Will Deacon Signed-off-by: Marc Zyngier --- diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 14ea05c..5621020 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -121,11 +121,17 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - if (guest_has_sve) + if (guest_has_sve) { __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); + /* Restore the VL that was saved when bound to the CPU */ + if (!has_vhe()) + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, + SYS_ZCR_EL1); + } + fpsimd_save_and_flush_cpu_state(); - } else if (host_has_sve) { + } else if (has_vhe() && host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 8071e1c..8a5c57e 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -217,25 +217,19 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) /* Check for an FPSIMD/SVE trap and handle as appropriate */ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - bool vhe, sve_guest, sve_host; + bool sve_guest, sve_host; u8 esr_ec; + u64 reg; if (!system_supports_fpsimd()) return false; - /* - * Currently system_supports_sve() currently implies has_vhe(), - * so the check is redundant. However, has_vhe() can be determined - * statically and helps the compiler remove dead code. - */ - if (has_vhe() && system_supports_sve()) { + if (system_supports_sve()) { sve_guest = vcpu_has_sve(vcpu); sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - vhe = true; } else { sve_guest = false; sve_host = false; - vhe = has_vhe(); } esr_ec = kvm_vcpu_trap_get_class(vcpu); @@ -244,31 +238,26 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) return false; /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest) - if (esr_ec != ESR_ELx_EC_FP_ASIMD) - return false; + if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + return false; /* Valid trap. Switch the context: */ - - if (vhe) { - u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; - + if (has_vhe()) { + reg = CPACR_EL1_FPEN; if (sve_guest) reg |= CPACR_EL1_ZEN; - write_sysreg(reg, cpacr_el1); + sysreg_clear_set(cpacr_el1, 0, reg); } else { - write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, - cptr_el2); - } + reg = CPTR_EL2_TFP; + if (sve_guest) + reg |= CPTR_EL2_TZ; + sysreg_clear_set(cptr_el2, reg, 0); + } isb(); if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - /* - * In the SVE case, VHE is assumed: it is enforced by - * Kconfig and kvm_arch_init(). - */ if (sve_host) __hyp_sve_save_host(vcpu); else diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 60adc7f..b3fc016 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -41,9 +41,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; + val |= CPTR_EL2_TTA | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { - val |= CPTR_EL2_TFP; + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; __activate_traps_fpsimd32(vcpu); }