KVM: x86: hyperv: keep track of mismatched VP indexes
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 26 Sep 2018 17:02:56 +0000 (19:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Sep 2019 06:21:50 +0000 (08:21 +0200)
[ Upstream commit 87ee613d076351950b74383215437f841ebbeb75 ]

In most common cases VP index of a vcpu matches its vcpu index. Userspace
is, however, free to set any mapping it wishes and we need to account for
that when we need to find a vCPU with a particular VP index. To keep search
algorithms optimal in both cases introduce 'num_mismatched_vp_indexes'
counter showing how many vCPUs with mismatching VP index we have. In case
the counter is zero we can assume vp_index == vcpu_idx.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c

index 3245b95..b641745 100644 (file)
@@ -784,6 +784,9 @@ struct kvm_hv {
        u64 hv_reenlightenment_control;
        u64 hv_tsc_emulation_control;
        u64 hv_tsc_emulation_status;
+
+       /* How many vCPUs have VP index != vCPU index */
+       atomic_t num_mismatched_vp_indexes;
 };
 
 enum kvm_irqchip_mode {
index 3f2775a..2bb554b 100644 (file)
@@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
-       case HV_X64_MSR_VP_INDEX:
-               if (!host || (u32)data >= KVM_MAX_VCPUS)
+       case HV_X64_MSR_VP_INDEX: {
+               struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+               u32 new_vp_index = (u32)data;
+
+               if (!host || new_vp_index >= KVM_MAX_VCPUS)
                        return 1;
-               hv_vcpu->vp_index = (u32)data;
+
+               if (new_vp_index == hv_vcpu->vp_index)
+                       return 0;
+
+               /*
+                * The VP index is initialized to vcpu_index by
+                * kvm_hv_vcpu_postcreate so they initially match.  Now the
+                * VP index is changing, adjust num_mismatched_vp_indexes if
+                * it now matches or no longer matches vcpu_idx.
+                */
+               if (hv_vcpu->vp_index == vcpu_idx)
+                       atomic_inc(&hv->num_mismatched_vp_indexes);
+               else if (new_vp_index == vcpu_idx)
+                       atomic_dec(&hv->num_mismatched_vp_indexes);
+
+               hv_vcpu->vp_index = new_vp_index;
                break;
+       }
        case HV_X64_MSR_VP_ASSIST_PAGE: {
                u64 gfn;
                unsigned long addr;