KVM: x86: hyperv: consistently use 'hv_vcpu' for 'struct kvm_vcpu_hv' variables
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 26 Sep 2018 17:02:55 +0000 (19:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 16 Sep 2019 06:21:50 +0000 (08:21 +0200)
[ Upstream commit 1779a39f786397760ae7a7cc03cf37697d8ae58d ]

Rename 'hv' to 'hv_vcpu' in kvm_hv_set_msr/kvm_hv_get_msr(); 'hv' is
'reserved' for 'struct kvm_hv' variables across the file.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/hyperv.c

index 73fa074..3f2775a 100644 (file)
@@ -1042,20 +1042,20 @@ static u64 current_task_runtime_100ns(void)
 
 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 {
-       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+       struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
        case HV_X64_MSR_VP_INDEX:
                if (!host || (u32)data >= KVM_MAX_VCPUS)
                        return 1;
-               hv->vp_index = (u32)data;
+               hv_vcpu->vp_index = (u32)data;
                break;
        case HV_X64_MSR_VP_ASSIST_PAGE: {
                u64 gfn;
                unsigned long addr;
 
                if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
-                       hv->hv_vapic = data;
+                       hv_vcpu->hv_vapic = data;
                        if (kvm_lapic_enable_pv_eoi(vcpu, 0))
                                return 1;
                        break;
@@ -1066,7 +1066,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                        return 1;
                if (__clear_user((void __user *)addr, PAGE_SIZE))
                        return 1;
-               hv->hv_vapic = data;
+               hv_vcpu->hv_vapic = data;
                kvm_vcpu_mark_page_dirty(vcpu, gfn);
                if (kvm_lapic_enable_pv_eoi(vcpu,
                                            gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
@@ -1082,7 +1082,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        case HV_X64_MSR_VP_RUNTIME:
                if (!host)
                        return 1;
-               hv->runtime_offset = data - current_task_runtime_100ns();
+               hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
                break;
        case HV_X64_MSR_SCONTROL:
        case HV_X64_MSR_SVERSION:
@@ -1174,11 +1174,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
                          bool host)
 {
        u64 data = 0;
-       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+       struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
        case HV_X64_MSR_VP_INDEX:
-               data = hv->vp_index;
+               data = hv_vcpu->vp_index;
                break;
        case HV_X64_MSR_EOI:
                return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
@@ -1187,10 +1187,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
        case HV_X64_MSR_TPR:
                return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
        case HV_X64_MSR_VP_ASSIST_PAGE:
-               data = hv->hv_vapic;
+               data = hv_vcpu->hv_vapic;
                break;
        case HV_X64_MSR_VP_RUNTIME:
-               data = current_task_runtime_100ns() + hv->runtime_offset;
+               data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
                break;
        case HV_X64_MSR_SCONTROL:
        case HV_X64_MSR_SVERSION: