KVM: nVMX: Clean up x2APIC MSR handling for L2
authorSean Christopherson <seanjc@google.com>
Tue, 9 Nov 2021 01:30:47 +0000 (01:30 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Nov 2021 15:56:23 +0000 (10:56 -0500)
Clean up the x2APIC MSR bitmap intereption code for L2, which is the last
holdout of open coded bitmap manipulations.  Freshen up the SDM/PRM
comment, rename the function to make it abundantly clear the funky
behavior is x2APIC specific, and explain _why_ vmcs01's bitmap is ignored
(the previous comment was flat out wrong for x2APIC behavior).

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211109013047.2041518-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c

index c569a13..341c508 100644 (file)
@@ -525,44 +525,19 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
 }
 
 /*
- * If a msr is allowed by L0, we should check whether it is allowed by L1.
- * The corresponding bit will be cleared unless both of L0 and L1 allow it.
+ * For x2APIC MSRs, ignore the vmcs01 bitmap.  L1 can enable x2APIC without L1
+ * itself utilizing x2APIC.  All MSRs were previously set to be intercepted,
+ * only the "disable intercept" case needs to be handled.
  */
-static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
-                                              unsigned long *msr_bitmap_nested,
-                                              u32 msr, int type)
+static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
+                                                       unsigned long *msr_bitmap_l0,
+                                                       u32 msr, int type)
 {
-       int f = sizeof(unsigned long);
+       if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
+               vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
 
-       /*
-        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
-        * have the write-low and read-high bitmap offsets the wrong way round.
-        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
-        */
-       if (msr <= 0x1fff) {
-               if (type & MSR_TYPE_R &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
-                       /* read-low */
-                       __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
-
-               if (type & MSR_TYPE_W &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
-                       /* write-low */
-                       __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
-
-       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
-               msr &= 0x1fff;
-               if (type & MSR_TYPE_R &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
-                       /* read-high */
-                       __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
-
-               if (type & MSR_TYPE_W &&
-                  !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
-                       /* write-high */
-                       __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
-
-       }
+       if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
+               vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
 }
 
 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
@@ -631,7 +606,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
        /*
         * To keep the control flow simple, pay eight 8-byte writes (sixteen
         * 4-byte writes on 32-bit systems) up front to enable intercepts for
-        * the x2APIC MSR range and selectively disable them below.
+        * the x2APIC MSR range and selectively toggle those relevant to L2.
         */
        enable_x2apic_msr_intercepts(msr_bitmap_l0);
 
@@ -650,17 +625,17 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                        }
                }
 
-               nested_vmx_disable_intercept_for_msr(
+               nested_vmx_disable_intercept_for_x2apic_msr(
                        msr_bitmap_l1, msr_bitmap_l0,
                        X2APIC_MSR(APIC_TASKPRI),
                        MSR_TYPE_R | MSR_TYPE_W);
 
                if (nested_cpu_has_vid(vmcs12)) {
-                       nested_vmx_disable_intercept_for_msr(
+                       nested_vmx_disable_intercept_for_x2apic_msr(
                                msr_bitmap_l1, msr_bitmap_l0,
                                X2APIC_MSR(APIC_EOI),
                                MSR_TYPE_W);
-                       nested_vmx_disable_intercept_for_msr(
+                       nested_vmx_disable_intercept_for_x2apic_msr(
                                msr_bitmap_l1, msr_bitmap_l0,
                                X2APIC_MSR(APIC_SELF_IPI),
                                MSR_TYPE_W);