From: Sean Christopherson Date: Mon, 2 Mar 2020 23:56:36 +0000 (-0800) Subject: KVM: x86: Handle PKU CPUID adjustment in VMX code X-Git-Tag: v5.10.7~2921^2~109 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d64d83d1e026f9fea9c8f18bf97b9529f7e4189c;p=platform%2Fkernel%2Flinux-rpi.git KVM: x86: Handle PKU CPUID adjustment in VMX code Move the setting of the PKU CPUID bit into VMX to eliminate an instance of the undesirable "unsigned f_* = *_supported ? F(*) : 0" pattern in the common CPUID handling code. Drop ->pku_supported(), CPUID adjustment was the only user. Note, some AMD CPUs now support PKU, but SVM doesn't yet support exposing it to a guest. No functional change intended. Reviewed-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 52470cc..5b7848e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1180,7 +1180,6 @@ struct kvm_x86_ops { bool (*xsaves_supported)(void); bool (*umip_emulated)(void); bool (*pt_supported)(void); - bool (*pku_supported)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index d7b3db0..3413fed 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -333,7 +333,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) { unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; unsigned f_la57; - unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0; /* cpuid 7.0.ebx */ const u32 kvm_cpuid_7_0_ebx_x86_features = @@ -373,10 +372,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) cpuid_entry_mask(entry, CPUID_7_ECX); /* Set LA57 based on hardware capability. */ entry->ecx |= f_la57; - entry->ecx |= f_pku; - /* PKU is not yet implemented for shadow paging. */ - if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) - cpuid_entry_clear(entry, X86_FEATURE_PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; cpuid_entry_mask(entry, CPUID_7_EDX); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9ef4ebf..8984ae1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6093,11 +6093,6 @@ static bool svm_has_wbinvd_exit(void) return true; } -static bool svm_pku_supported(void) -{ - return false; -} - #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ @@ -7458,7 +7453,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .xsaves_supported = svm_xsaves_supported, .umip_emulated = svm_umip_emulated, .pt_supported = svm_pt_supported, - .pku_supported = svm_pku_supported, .set_supported_cpuid = svm_set_supported_cpuid, diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index c00e265..8903475 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -146,11 +146,6 @@ static inline bool vmx_umip_emulated(void) SECONDARY_EXEC_DESC; } -static inline bool vmx_pku_supported(void) -{ - return boot_cpu_has(X86_FEATURE_PKU); -} - static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2f0897f..60a6ef3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7137,6 +7137,11 @@ static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry) cpuid_entry_set(entry, X86_FEATURE_INVPCID); if (vmx_umip_emulated()) cpuid_entry_set(entry, X86_FEATURE_UMIP); + + /* PKU is not yet implemented for shadow paging. */ + if (enable_ept && boot_cpu_has(X86_FEATURE_PKU) && + boot_cpu_has(X86_FEATURE_OSPKE)) + cpuid_entry_set(entry, X86_FEATURE_PKU); break; default: break; @@ -7938,7 +7943,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .xsaves_supported = vmx_xsaves_supported, .umip_emulated = vmx_umip_emulated, .pt_supported = vmx_pt_supported, - .pku_supported = vmx_pku_supported, .request_immediate_exit = vmx_request_immediate_exit,