KVM: x86: Remove unused "vcpu" of kvm_scale_tsc()
authorJinrong Liang <cloudliang@tencent.com>
Tue, 25 Jan 2022 09:59:07 +0000 (17:59 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Feb 2022 18:47:14 +0000 (13:47 -0500)
The "struct kvm_vcpu *vcpu" parameter of kvm_scale_tsc() is not used,
so remove it. No functional change intended.

Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
Message-Id: <20220125095909.38122-18-cloudliang@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index 6dcccb3..6232dac 100644 (file)
@@ -1878,7 +1878,7 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
        return kvm_find_user_return_msr(msr) >= 0;
 }
 
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
+u64 kvm_scale_tsc(u64 tsc, u64 ratio);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
index 04d3f8a..4a8e123 100644 (file)
@@ -2399,7 +2399,7 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
        return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
 }
 
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
+u64 kvm_scale_tsc(u64 tsc, u64 ratio)
 {
        u64 _tsc = tsc;
 
@@ -2414,7 +2414,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
+       tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
 
        return target_tsc - tsc;
 }
@@ -2422,7 +2422,7 @@ static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 {
        return vcpu->arch.l1_tsc_offset +
-               kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
+               kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
 }
 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
@@ -2625,7 +2625,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
                WARN_ON(adjustment < 0);
-       adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
+       adjustment = kvm_scale_tsc((u64) adjustment,
                                   vcpu->arch.l1_tsc_scaling_ratio);
        adjust_tsc_offset_guest(vcpu, adjustment);
 }
@@ -3045,7 +3045,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        /* With all the info we got, fill in the values */
 
        if (kvm_has_tsc_control)
-               tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
+               tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
                                            v->arch.l1_tsc_scaling_ratio);
 
        if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
@@ -3857,7 +3857,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        ratio = vcpu->arch.tsc_scaling_ratio;
                }
 
-               msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
+               msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
                break;
        }
        case MSR_MTRRcap:
@@ -5132,7 +5132,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
                           kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
                           kvm->arch.last_tsc_offset == offset);
 
-               tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
+               tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
                ns = get_kvmclock_base_ns();
 
                __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);