KVM: x86: cleanup CR3 reserved bits checks
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 13 Nov 2020 13:30:38 +0000 (08:30 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 3 Feb 2021 09:30:38 +0000 (04:30 -0500)
If not in long mode, the low bits of CR3 are reserved but not enforced to
be zero, so remove those checks.  If in long mode, however, the MBZ bits
extend down to the highest physical address bit of the guest, excluding
the encryption bit.

Make the checks consistent with the above, and match them between
nested_vmcb_checks and KVM_SET_SREGS.

Cc: stable@vger.kernel.org
Fixes: 761e41693465 ("KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests")
Fixes: a780a3ea6282 ("KVM: X86: Fix reserved bits check for MOV to CR3")
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/x86.c

index 7a605ad..db30670 100644 (file)
@@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 
 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
+       struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
        if ((vmcb12->save.efer & EFER_SVME) == 0)
@@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 
        vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
 
-       if (!vmcb12_lma) {
-               if (vmcb12->save.cr4 & X86_CR4_PAE) {
-                       if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
-                               return false;
-               } else {
-                       if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
-                               return false;
-               }
-       } else {
+       if (vmcb12_lma) {
                if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
                    !(vmcb12->save.cr0 & X86_CR0_PE) ||
-                   (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+                   (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
                        return false;
        }
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
index 0fe874a..6e7d070 100644 (file)
@@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK           0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK       0x7U
-#define MSR_CR3_LONG_MBZ_MASK                  0xfff0000000000000U
 #define MSR_INVALID                            0xffffffffU
 
 extern int sev;
index 42b28d0..c1650e2 100644 (file)
@@ -9624,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                 */
                if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
                        return false;
+               if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+                       return false;
        } else {
                /*
                 * Not in 64-bit mode: EFER.LMA is clear and the code