KVM: x86: hyper-v: Prepare to meet unallocated Hyper-V context
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 26 Jan 2021 13:48:12 +0000 (14:48 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 9 Feb 2021 13:17:14 +0000 (08:17 -0500)
Currently, Hyper-V context is part of 'struct kvm_vcpu_arch' and is always
available. As a preparation to allocating it dynamically, check that it is
not NULL at call sites which can normally proceed without it i.e. the
behavior is identical to the situation when Hyper-V emulation is not being
used by the guest.

When Hyper-V context for a particular vCPU is not allocated, we may still
need to get 'vp_index' from there. E.g. in a hypothetical situation when
Hyper-V emulation was enabled on one CPU and wasn't on another, Hyper-V
style send-IPI hypercall may still be used. Luckily, vp_index is always
initialized to kvm_vcpu_get_idx() and can only be changed when Hyper-V
context is present. Introduce kvm_hv_get_vpindex() helper for
simplification.

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-12-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 1482cad..10e7ed2 100644 (file)
@@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
                return NULL;
 
        vcpu = kvm_get_vcpu(kvm, vpidx);
-       if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx)
+       if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
                return vcpu;
        kvm_for_each_vcpu(i, vcpu, kvm)
-               if (to_hv_vcpu(vcpu)->vp_index == vpidx)
+               if (kvm_hv_get_vpindex(vcpu) == vpidx)
                        return vcpu;
        return NULL;
 }
@@ -377,9 +377,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
                break;
        }
 
-       trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
-                                   to_hv_vcpu(vcpu)->vp_index, msr,
-                                   *pdata);
+       trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
 
        return 0;
 }
@@ -806,6 +804,9 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
        u64 time_now, exp_time;
        int i;
 
+       if (!hv_vcpu)
+               return;
+
        for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
                if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
                        stimer = &hv_vcpu->stimer[i];
@@ -842,6 +843,9 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
 
+       if (!hv_vcpu)
+               return false;
+
        if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
                return false;
        return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
@@ -1504,8 +1508,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
 
        bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (test_bit(to_hv_vcpu(vcpu)->vp_index,
-                            (unsigned long *)vp_bitmap))
+               if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
                        __set_bit(i, vcpu_bitmap);
        }
        return vcpu_bitmap;
index be1e3f5..57e53a8 100644 (file)
@@ -83,6 +83,13 @@ static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
        return &vcpu->kvm->arch.hyperv.hv_syndbg;
 }
 
+static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+       return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+}
+
 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
 
@@ -121,6 +128,9 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
 
+       if (!hv_vcpu)
+               return false;
+
        return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
                             HV_SYNIC_STIMER_COUNT);
 }
index 847fe11..45d40bf 100644 (file)
@@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic)
        apic_clear_isr(vector, apic);
        apic_update_ppr(apic);
 
-       if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
+       if (to_hv_vcpu(apic->vcpu) &&
+           test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
                kvm_hv_synic_send_eoi(apic->vcpu, vector);
 
        kvm_ioapic_send_eoi(apic, vector);
@@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
         */
 
        apic_clear_irr(vector, apic);
-       if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
+       if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
                /*
                 * For auto-EOI interrupts, there might be another pending
                 * interrupt above PPR, so check whether to raise another
index f83199f..e0a3a9b 100644 (file)
@@ -6810,12 +6810,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        /* All fields are clean at this point */
        if (static_branch_unlikely(&enable_evmcs)) {
-               struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
-
                current_evmcs->hv_clean_fields |=
                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
 
-               current_evmcs->hv_vp_id = hv_vcpu->vp_index;
+               current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
        }
 
        /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
index 40bd2e3..7caf95d 100644 (file)
@@ -8803,8 +8803,11 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
-       bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
-                 to_hv_synic(vcpu)->vec_bitmap, 256);
+       if (to_hv_vcpu(vcpu))
+               bitmap_or((ulong *)eoi_exit_bitmap,
+                         vcpu->arch.ioapic_handled_vectors,
+                         to_hv_synic(vcpu)->vec_bitmap, 256);
+
        static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
 }