KVM: nSVM: use svm->nested.save to load vmcb12 registers and avoid TOC/TOU races
authorEmanuele Giuseppe Esposito <eesposit@redhat.com>
Wed, 3 Nov 2021 14:05:25 +0000 (10:05 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:24:40 +0000 (04:24 -0500)
Use the already checked svm->nested.save cached fields
(EFER, CR0, CR4, ...) instead of vmcb12's in
nested_vmcb02_prepare_save().
This prevents from creating TOC/TOU races, since the
guest could modify the vmcb12 fields.

This also avoids the need of force-setting EFER_SVME in
nested_vmcb02_prepare_save.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211103140527.752797-6-eesposit@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c

index 545d0ad..aad09d5 100644 (file)
@@ -248,13 +248,6 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
                                     struct vmcb_save_area_cached *save)
 {
-       /*
-        * FIXME: these should be done after copying the fields,
-        * to avoid TOC/TOU races.  For these save area checks
-        * the possible damage is limited since kvm_set_cr0 and
-        * kvm_set_cr4 handle failure; EFER_SVME is an exception
-        * so it is force-set later in nested_prepare_vmcb_save.
-        */
        if (CC(!(save->efer & EFER_SVME)))
                return false;
 
@@ -511,15 +504,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
 
-       /*
-        * Force-set EFER_SVME even though it is checked earlier on the
-        * VMCB12, because the guest can flip the bit between the check
-        * and now.  Clearing EFER_SVME would call svm_free_nested.
-        */
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+       svm_set_efer(&svm->vcpu, svm->nested.save.efer);
 
-       svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
-       svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
+       svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
+       svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
 
        svm->vcpu.arch.cr2 = vmcb12->save.cr2;
 
@@ -534,8 +522,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 
        /* These bits will be set properly on the first execution when new_vmc12 is true */
        if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
-               svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
-               svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
+               svm->vmcb->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
+               svm->vcpu.arch.dr6  = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
                vmcb_mark_dirty(svm->vmcb, VMCB_DR);
        }
 }
@@ -649,7 +637,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
        nested_vmcb02_prepare_control(svm);
        nested_vmcb02_prepare_save(svm, vmcb12);
 
-       ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
+       ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
                                  nested_npt_enabled(svm), from_vmrun);
        if (ret)
                return ret;