KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 10 Jun 2020 13:58:46 +0000 (15:58 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jun 2020 16:35:17 +0000 (12:35 -0400)
state_test/smm_test use KVM_CAP_NESTED_STATE check as an indicator for
nested VMX/SVM presence and this is incorrect. Check for the required
features dirrectly.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610135847.754289-2-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/svm_util.h
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/x86_64/svm.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/smm_test.c
tools/testing/selftests/kvm/x86_64/state_test.c

index 674151d..b7531c8 100644 (file)
@@ -33,6 +33,7 @@ struct svm_test_data {
 struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+bool nested_svm_supported(void);
 void nested_svm_check_supported(void);
 
 static inline bool cpu_has_svm(void)
index 766af99..16fa21e 100644 (file)
@@ -603,6 +603,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+bool nested_vmx_supported(void);
 void nested_vmx_check_supported(void);
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
index c424010..3a5c72e 100644 (file)
@@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
                : "r15", "memory");
 }
 
-void nested_svm_check_supported(void)
+bool nested_svm_supported(void)
 {
        struct kvm_cpuid_entry2 *entry =
                kvm_get_supported_cpuid_entry(0x80000001);
 
-       if (!(entry->ecx & CPUID_SVM)) {
+       return entry->ecx & CPUID_SVM;
+}
+
+void nested_svm_check_supported(void)
+{
+       if (!nested_svm_supported()) {
                print_skip("nested SVM not enabled");
                exit(KSFT_SKIP);
        }
 }
-
index 4ae104f..f1e00d4 100644 (file)
@@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
 
-void nested_vmx_check_supported(void)
+bool nested_vmx_supported(void)
 {
        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
+       return entry->ecx & CPUID_VMX;
+}
+
+void nested_vmx_check_supported(void)
+{
+       if (!nested_vmx_supported()) {
                print_skip("nested VMX not enabled");
                exit(KSFT_SKIP);
        }
index 6f8f478..3631415 100644 (file)
@@ -118,16 +118,17 @@ int main(int argc, char *argv[])
        vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
 
        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
-               if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
+               if (nested_svm_supported())
                        vcpu_alloc_svm(vm, &nested_gva);
-               else
+               else if (nested_vmx_supported())
                        vcpu_alloc_vmx(vm, &nested_gva);
-               vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-       } else {
-               pr_info("will skip SMM test with VMX enabled\n");
-               vcpu_args_set(vm, VCPU_ID, 1, 0);
        }
 
+       if (!nested_gva)
+               pr_info("will skip SMM test with VMX enabled\n");
+
+       vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
index d43b6f9..f6c8b90 100644 (file)
@@ -171,16 +171,17 @@ int main(int argc, char *argv[])
        vcpu_regs_get(vm, VCPU_ID, &regs1);
 
        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
-               if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
+               if (nested_svm_supported())
                        vcpu_alloc_svm(vm, &nested_gva);
-               else
+               else if (nested_vmx_supported())
                        vcpu_alloc_vmx(vm, &nested_gva);
-               vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-       } else {
-               pr_info("will skip nested state checks\n");
-               vcpu_args_set(vm, VCPU_ID, 1, 0);
        }
 
+       if (!nested_gva)
+               pr_info("will skip nested state checks\n");
+
+       vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,