KVM: selftests: x86: Use "this_cpu" prefix for cpu vendor queries
authorVishal Annapurve <vannapurve@google.com>
Wed, 11 Jan 2023 00:44:43 +0000 (00:44 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 18:06:31 +0000 (10:06 -0800)
Replace is_intel/amd_cpu helpers with this_cpu_* helpers to better
convey the intent of querying vendor of the current cpu.

Suggested-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
Link: https://lore.kernel.org/r/20230111004445.416840-2-vannapurve@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c

index bbe47e6..b5ea72f 100644 (file)
@@ -555,6 +555,28 @@ static inline uint32_t this_cpu_model(void)
        return x86_model(this_cpu_fms());
 }
 
+static inline bool this_cpu_vendor_string_is(const char *vendor)
+{
+       const uint32_t *chunk = (const uint32_t *)vendor;
+       uint32_t eax, ebx, ecx, edx;
+
+       cpuid(0, &eax, &ebx, &ecx, &edx);
+       return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
+}
+
+static inline bool this_cpu_is_intel(void)
+{
+       return this_cpu_vendor_string_is("GenuineIntel");
+}
+
+/*
+ * Exclude early K5 samples with a vendor string of "AMDisbetter!"
+ */
+static inline bool this_cpu_is_amd(void)
+{
+       return this_cpu_vendor_string_is("AuthenticAMD");
+}
+
 static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
                                      uint8_t reg, uint8_t lo, uint8_t hi)
 {
@@ -691,9 +713,6 @@ static inline void cpu_relax(void)
                "hlt\n" \
                )
 
-bool is_intel_cpu(void);
-bool is_amd_cpu(void);
-
 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
index acfa1d0..7d17685 100644 (file)
@@ -113,7 +113,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
 
 bool kvm_is_tdp_enabled(void)
 {
-       if (is_intel_cpu())
+       if (this_cpu_is_intel())
                return get_kvm_intel_param_bool("ept");
        else
                return get_kvm_amd_param_bool("npt");
@@ -1006,28 +1006,6 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state)
        free(state);
 }
 
-static bool cpu_vendor_string_is(const char *vendor)
-{
-       const uint32_t *chunk = (const uint32_t *)vendor;
-       uint32_t eax, ebx, ecx, edx;
-
-       cpuid(0, &eax, &ebx, &ecx, &edx);
-       return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
-}
-
-bool is_intel_cpu(void)
-{
-       return cpu_vendor_string_is("GenuineIntel");
-}
-
-/*
- * Exclude early K5 samples with a vendor string of "AMDisbetter!"
- */
-bool is_amd_cpu(void)
-{
-       return cpu_vendor_string_is("AuthenticAMD");
-}
-
 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
 {
        if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
@@ -1236,7 +1214,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
        max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
 
        /* Avoid reserved HyperTransport region on AMD processors.  */
-       if (!is_amd_cpu())
+       if (!this_cpu_is_amd())
                return max_gfn;
 
        /* On parts with <40 physical address bits, the area is fully hidden */
index 32f7e09..5489c98 100644 (file)
@@ -48,10 +48,10 @@ static void guest_main(void)
        const uint8_t *other_hypercall_insn;
        uint64_t ret;
 
-       if (is_intel_cpu()) {
+       if (this_cpu_is_intel()) {
                native_hypercall_insn = vmx_vmcall;
                other_hypercall_insn  = svm_vmmcall;
-       } else if (is_amd_cpu()) {
+       } else if (this_cpu_is_amd()) {
                native_hypercall_insn = svm_vmmcall;
                other_hypercall_insn  = vmx_vmcall;
        } else {
index fb02581..b0a2a0b 100644 (file)
@@ -93,7 +93,7 @@ int main(void)
 {
        int warnings_before, warnings_after;
 
-       TEST_REQUIRE(is_intel_cpu());
+       TEST_REQUIRE(this_cpu_is_intel());
 
        TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
 
index 2de98fc..c728822 100644 (file)
@@ -363,7 +363,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
  */
 static bool use_intel_pmu(void)
 {
-       return is_intel_cpu() &&
+       return this_cpu_is_intel() &&
               kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
               kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
               kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
@@ -397,7 +397,7 @@ static bool use_amd_pmu(void)
        uint32_t family = kvm_cpu_family();
        uint32_t model = kvm_cpu_model();
 
-       return is_amd_cpu() &&
+       return this_cpu_is_amd() &&
                (is_zen1(family, model) ||
                 is_zen2(family, model) ||
                 is_zen3(family, model));
index 2641b28..53e1ef2 100644 (file)
@@ -111,7 +111,7 @@ int main(int argc, char *argv[])
        struct kvm_vcpu *vcpu;
        struct kvm_vm *vm;
 
-       TEST_REQUIRE(is_intel_cpu());
+       TEST_REQUIRE(this_cpu_is_intel());
        TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
 
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);