KVM: x86: Introduce cpuid_entry_{get,has}() accessors
authorSean Christopherson <sean.j.christopherson@intel.com>
Mon, 2 Mar 2020 23:56:30 +0000 (15:56 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 16 Mar 2020 16:58:14 +0000 (17:58 +0100)
Introduce accessors to retrieve feature bits from CPUID entries and use
the new accessors where applicable.  Using the accessors eliminates the
need to manually specify the register to be queried at no extra cost
(binary output is identical) and will allow adding runtime consistency
checks on the function and index in a future patch.

No functional change intended.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h

index ffcf647..81bf655 100644 (file)
@@ -68,7 +68,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
                best->edx |= F(APIC);
 
        if (apic) {
-               if (best->ecx & F(TSC_DEADLINE_TIMER))
+               if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
                        apic->lapic_timer.timer_mode_mask = 3 << 17;
                else
                        apic->lapic_timer.timer_mode_mask = 1 << 17;
@@ -96,7 +96,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        }
 
        best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
-       if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+       if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
+                    cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
        /*
@@ -155,7 +156,7 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
                        break;
                }
        }
-       if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
+       if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
                entry->edx &= ~F(NX);
                printk(KERN_INFO "kvm: guest NX capability removed\n");
        }
@@ -387,7 +388,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
                entry->ebx |= F(TSC_ADJUST);
 
                entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
-               f_la57 = entry->ecx & F(LA57);
+               f_la57 = cpuid_entry_get(entry, X86_FEATURE_LA57);
                cpuid_mask(&entry->ecx, CPUID_7_ECX);
                /* Set LA57 based on hardware capability. */
                entry->ecx |= f_la57;
index 46b4b61..bf95428 100644 (file)
@@ -95,17 +95,10 @@ static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_featu
        return reverse_cpuid[x86_leaf];
 }
 
-static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
-                                                    unsigned int x86_feature)
+static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+                                                 const struct cpuid_reg *cpuid)
 {
-       struct kvm_cpuid_entry2 *entry;
-       const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
-
-       entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
-       if (!entry)
-               return NULL;
-
-       switch (cpuid.reg) {
+       switch (cpuid->reg) {
        case CPUID_EAX:
                return &entry->eax;
        case CPUID_EBX:
@@ -120,6 +113,41 @@ static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
        }
 }
 
+static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+                                               unsigned int x86_feature)
+{
+       const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
+
+       return __cpuid_entry_get_reg(entry, &cpuid);
+}
+
+static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
+                                          unsigned int x86_feature)
+{
+       u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+       return *reg & __feature_bit(x86_feature);
+}
+
+static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
+                                           unsigned int x86_feature)
+{
+       return cpuid_entry_get(entry, x86_feature);
+}
+
+static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
+                                                    unsigned int x86_feature)
+{
+       const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
+       struct kvm_cpuid_entry2 *entry;
+
+       entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+       if (!entry)
+               return NULL;
+
+       return __cpuid_entry_get_reg(entry, &cpuid);
+}
+
 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
                                            unsigned int x86_feature)
 {