KVM: x86: Refactor tsc synchronization code
authorOliver Upton <oupton@google.com>
Thu, 16 Sep 2021 18:15:37 +0000 (18:15 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 18 Oct 2021 18:43:45 +0000 (14:43 -0400)
Refactor kvm_synchronize_tsc to make a new function that allows callers
to specify TSC parameters (offset, value, nanoseconds, etc.) explicitly
for the sake of participating in TSC synchronization.

Signed-off-by: Oliver Upton <oupton@google.com>
Message-Id: <20210916181538.968978-7-oupton@google.com>
[Make sure kvm->arch.cur_tsc_generation and vcpu->arch.this_tsc_generation are
 equal at the end of __kvm_synchronize_tsc, if matched is false. Reported by
 Maxim Levitsky. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index d7588f6..c74a44f 100644 (file)
@@ -2435,13 +2435,63 @@ static inline bool kvm_check_tsc_unstable(void)
        return check_tsc_unstable();
 }
 
+/*
+ * Infers attempts to synchronize the guest's tsc from host writes. Sets the
+ * offset for the vcpu and tracks the TSC matching generation that the vcpu
+ * participates in.
+ */
+static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
+                                 u64 ns, bool matched)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       lockdep_assert_held(&kvm->arch.tsc_write_lock);
+
+       /*
+        * We also track th most recent recorded KHZ, write and time to
+        * allow the matching interval to be extended at each write.
+        */
+       kvm->arch.last_tsc_nsec = ns;
+       kvm->arch.last_tsc_write = tsc;
+       kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
+
+       vcpu->arch.last_guest_tsc = tsc;
+
+       kvm_vcpu_write_tsc_offset(vcpu, offset);
+
+       if (!matched) {
+               /*
+                * We split periods of matched TSC writes into generations.
+                * For each generation, we track the original measured
+                * nanosecond time, offset, and write, so if TSCs are in
+                * sync, we can match exact offset, and if not, we can match
+                * exact software computation in compute_guest_tsc()
+                *
+                * These values are tracked in kvm->arch.cur_xxx variables.
+                */
+               kvm->arch.cur_tsc_generation++;
+               kvm->arch.cur_tsc_nsec = ns;
+               kvm->arch.cur_tsc_write = tsc;
+               kvm->arch.cur_tsc_offset = offset;
+               kvm->arch.nr_vcpus_matched_tsc = 0;
+       } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
+               kvm->arch.nr_vcpus_matched_tsc++;
+       }
+
+       /* Keep track of which generation this VCPU has synchronized to */
+       vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
+       vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
+       vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+
+       kvm_track_tsc_matching(vcpu);
+}
+
 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm *kvm = vcpu->kvm;
        u64 offset, ns, elapsed;
        unsigned long flags;
-       bool matched;
-       bool already_matched;
+       bool matched = false;
        bool synchronizing = false;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -2487,48 +2537,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
                        offset = kvm_compute_l1_tsc_offset(vcpu, data);
                }
                matched = true;
-               already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
-       } else {
-               /*
-                * We split periods of matched TSC writes into generations.
-                * For each generation, we track the original measured
-                * nanosecond time, offset, and write, so if TSCs are in
-                * sync, we can match exact offset, and if not, we can match
-                * exact software computation in compute_guest_tsc()
-                *
-                * These values are tracked in kvm->arch.cur_xxx variables.
-                */
-               kvm->arch.cur_tsc_generation++;
-               kvm->arch.cur_tsc_nsec = ns;
-               kvm->arch.cur_tsc_write = data;
-               kvm->arch.cur_tsc_offset = offset;
-               matched = false;
-       }
-
-       /*
-        * We also track th most recent recorded KHZ, write and time to
-        * allow the matching interval to be extended at each write.
-        */
-       kvm->arch.last_tsc_nsec = ns;
-       kvm->arch.last_tsc_write = data;
-       kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
-
-       vcpu->arch.last_guest_tsc = data;
-
-       /* Keep track of which generation this VCPU has synchronized to */
-       vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-       vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-       vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
-
-       kvm_vcpu_write_tsc_offset(vcpu, offset);
-
-       if (!matched) {
-               kvm->arch.nr_vcpus_matched_tsc = 0;
-       } else if (!already_matched) {
-               kvm->arch.nr_vcpus_matched_tsc++;
        }
 
-       kvm_track_tsc_matching(vcpu);
+       __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }