KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
authorMaxim Levitsky <mlevitsk@redhat.com>
Wed, 14 Jul 2021 22:56:24 +0000 (01:56 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Aug 2021 06:59:18 +0000 (08:59 +0200)
commit 0f923e07124df069ba68d8bb12324398f4b6b709 upstream.

* Invert the mask of bits that we pick from L2 in
  nested_vmcb02_prepare_control

* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr

This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.

Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/svm.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c

index 71d630b..f8fad50 100644 (file)
@@ -166,6 +166,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define V_IGN_TPR_SHIFT 20
 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
 
+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
 #define V_INTR_MASKING_SHIFT 24
 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 
index da6ce73..8a229db 100644 (file)
@@ -429,7 +429,10 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 
 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 {
-       const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+       const u32 int_ctl_vmcb01_bits =
+               V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
+
+       const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
 
        if (nested_npt_enabled(svm))
                nested_svm_init_mmu_context(&svm->vcpu);
@@ -437,9 +440,9 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
        svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
                svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
 
-       svm->vmcb->control.int_ctl             =
-               (svm->nested.ctl.int_ctl & ~mask) |
-               (svm->nested.hsave->control.int_ctl & mask);
+       svm->vmcb->control.int_ctl =
+               (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+               (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
 
        svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
index 1c9226c..1c23aee 100644 (file)
@@ -1486,17 +1486,17 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-       const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
        svm_clr_intercept(svm, INTERCEPT_VINTR);
 
        /* Drop int_ctl fields related to VINTR injection.  */
-       svm->vmcb->control.int_ctl &= mask;
+       svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
        if (is_guest_mode(&svm->vcpu)) {
-               svm->nested.hsave->control.int_ctl &= mask;
+               svm->nested.hsave->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
 
                WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
                        (svm->nested.ctl.int_ctl & V_TPR_MASK));
-               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
+               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
+                       V_IRQ_INJECTION_BITS_MASK;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);