2 * check TSC synchronization.
4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
6 * We check whether all boot CPUs have their TSC's synchronized,
7 * print a warning if not and turn off the TSC clock-source.
9 * The warp-check is point-to-point between two CPUs, the CPU
10 * initiating the bootup is the 'source CPU', the freshly booting
11 * CPU is the 'target CPU'.
13 * Only two CPUs may participate - they can enter in any order.
14 * ( The serial nature of the boot logic and the CPU hotplug lock
15 * protects against more than 2 CPUs entering this code. )
17 #include <linux/topology.h>
18 #include <linux/spinlock.h>
19 #include <linux/kernel.h>
20 #include <linux/smp.h>
21 #include <linux/nmi.h>
27 unsigned long nextcheck;
31 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
33 void tsc_verify_tsc_adjust(bool resume)
35 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
38 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
41 /* Rate limit the MSR check */
42 if (!resume && time_before(jiffies, adj->nextcheck))
45 adj->nextcheck = jiffies + HZ;
47 rdmsrl(MSR_IA32_TSC_ADJUST, curval);
48 if (adj->adjusted == curval)
51 /* Restore the original value */
52 wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
54 if (!adj->warned || resume) {
55 pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
56 smp_processor_id(), adj->adjusted, curval);
61 static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
62 unsigned int cpu, bool bootcpu)
65 * First online CPU in a package stores the boot value in the
66 * adjustment value. This value might change later via the sync
67 * mechanism. If that fails we still can yell about boot values not
70 * On the boot cpu we just force set the ADJUST value to 0 if it's
71 * non zero. We don't do that on non boot cpus because physical
72 * hotplug should have set the ADJUST register to a value > 0 so
73 * the TSC is in sync with the already running cpus.
75 * But we always force positive ADJUST values. Otherwise the TSC
76 * deadline timer creates an interrupt storm. Sigh!
78 if ((bootcpu && bootval != 0) || (!bootcpu && bootval < 0)) {
79 pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
81 wrmsrl(MSR_IA32_TSC_ADJUST, 0);
84 cur->adjusted = bootval;
88 bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
90 struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
93 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
96 rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
97 cur->bootval = bootval;
98 cur->nextcheck = jiffies + HZ;
99 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
103 #else /* !CONFIG_SMP */
106 * Store and check the TSC ADJUST MSR if available
108 bool tsc_store_and_check_tsc_adjust(bool bootcpu)
110 struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
111 unsigned int refcpu, cpu = smp_processor_id();
112 struct cpumask *mask;
115 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
118 rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
119 cur->bootval = bootval;
120 cur->nextcheck = jiffies + HZ;
124 * Check whether this CPU is the first in a package to come up. In
125 * this case do not check the boot value against another package
126 * because the new package might have been physically hotplugged,
127 * where TSC_ADJUST is expected to be different. When called on the
128 * boot CPU topology_core_cpumask() might not be available yet.
130 mask = topology_core_cpumask(cpu);
131 refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
133 if (refcpu >= nr_cpu_ids) {
134 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
139 ref = per_cpu_ptr(&tsc_adjust, refcpu);
141 * Compare the boot value and complain if it differs in the
144 if (bootval != ref->bootval) {
145 pr_warn(FW_BUG "TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
146 refcpu, ref->bootval, cpu, bootval);
149 * The TSC_ADJUST values in a package must be the same. If the boot
150 * value on this newly upcoming CPU differs from the adjustment
151 * value of the already online CPU in this package, set it to that
154 if (bootval != ref->adjusted) {
155 pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
156 refcpu, ref->adjusted, cpu, bootval);
157 cur->adjusted = ref->adjusted;
158 wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
161 * We have the TSCs forced to be in sync on this package. Skip sync
168 * Entry/exit counters that make sure that both CPUs
169 * run the measurement code at once:
171 static atomic_t start_count;
172 static atomic_t stop_count;
173 static atomic_t skip_test;
174 static atomic_t test_runs;
177 * We use a raw spinlock in this exceptional case, because
178 * we want to have the fastest, inlined, non-debug version
179 * of a critical section, to be able to prove TSC time-warps:
181 static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
183 static cycles_t last_tsc;
184 static cycles_t max_warp;
186 static int random_warps;
189 * TSC-warp measurement loop running on both CPUs. This is not called
190 * if there is no TSC.
192 static cycles_t check_tsc_warp(unsigned int timeout)
194 cycles_t start, now, prev, end, cur_max_warp = 0;
195 int i, cur_warps = 0;
197 start = rdtsc_ordered();
199 * The measurement runs for 'timeout' msecs:
201 end = start + (cycles_t) tsc_khz * timeout;
206 * We take the global lock, measure TSC, save the
207 * previous TSC that was measured (possibly on
208 * another CPU) and update the previous TSC timestamp.
210 arch_spin_lock(&sync_lock);
212 now = rdtsc_ordered();
214 arch_spin_unlock(&sync_lock);
217 * Be nice every now and then (and also check whether
218 * measurement is done [we also insert a 10 million
219 * loops safety exit, so we dont lock up in case the
220 * TSC readout is totally broken]):
222 if (unlikely(!(i & 7))) {
223 if (now > end || i > 10000000)
226 touch_nmi_watchdog();
229 * Outside the critical section we can now see whether
230 * we saw a time-warp of the TSC going backwards:
232 if (unlikely(prev > now)) {
233 arch_spin_lock(&sync_lock);
234 max_warp = max(max_warp, prev - now);
235 cur_max_warp = max_warp;
237 * Check whether this bounces back and forth. Only
238 * one CPU should observe time going backwards.
240 if (cur_warps != nr_warps)
243 cur_warps = nr_warps;
244 arch_spin_unlock(&sync_lock);
248 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
249 now-start, end-start);
254 * If the target CPU coming online doesn't have any of its core-siblings
255 * online, a timeout of 20msec will be used for the TSC-warp measurement
256 * loop. Otherwise a smaller timeout of 2msec will be used, as we have some
257 * information about this socket already (and this information grows as we
258 * have more and more logical-siblings in that socket).
260 * Ideally we should be able to skip the TSC sync check on the other
261 * core-siblings, if the first logical CPU in a socket passed the sync test.
262 * But as the TSC is per-logical CPU and can potentially be modified wrongly
263 * by the bios, TSC sync test for smaller duration should be able
264 * to catch such errors. Also this will catch the condition where all the
265 * cores in the socket doesn't get reset at the same time.
267 static inline unsigned int loop_timeout(int cpu)
269 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
273 * Source CPU calls into this - it waits for the freshly booted
274 * target CPU to arrive and then starts the measurement:
276 void check_tsc_sync_source(int cpu)
281 * No need to check if we already know that the TSC is not
282 * synchronized or if we have no TSC.
284 if (unsynchronized_tsc())
287 if (tsc_clocksource_reliable) {
288 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
290 "Skipped synchronization checks as TSC is reliable.\n");
295 * Set the maximum number of test runs to
296 * 1 if the CPU does not provide the TSC_ADJUST MSR
297 * 3 if the MSR is available, so the target can try to adjust
299 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
300 atomic_set(&test_runs, 1);
302 atomic_set(&test_runs, 3);
305 * Wait for the target to start or to skip the test:
307 while (atomic_read(&start_count) != cpus - 1) {
308 if (atomic_read(&skip_test) > 0) {
309 atomic_set(&skip_test, 0);
316 * Trigger the target to continue into the measurement too:
318 atomic_inc(&start_count);
320 check_tsc_warp(loop_timeout(cpu));
322 while (atomic_read(&stop_count) != cpus-1)
326 * If the test was successful set the number of runs to zero and
327 * stop. If not, decrement the number of runs an check if we can
328 * retry. In case of random warps no retry is attempted.
331 atomic_set(&test_runs, 0);
333 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
334 smp_processor_id(), cpu);
336 } else if (atomic_dec_and_test(&test_runs) || random_warps) {
337 /* Force it to 0 if random warps brought us here */
338 atomic_set(&test_runs, 0);
340 pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
341 smp_processor_id(), cpu);
342 pr_warning("Measured %Ld cycles TSC warp between CPUs, "
343 "turning off TSC clock.\n", max_warp);
345 pr_warning("TSC warped randomly between CPUs\n");
346 mark_tsc_unstable("check_tsc_sync_source failed");
350 * Reset it - just in case we boot another CPU later:
352 atomic_set(&start_count, 0);
359 * Let the target continue with the bootup:
361 atomic_inc(&stop_count);
364 * Retry, if there is a chance to do so.
366 if (atomic_read(&test_runs) > 0)
371 * Freshly booted CPUs call into this:
373 void check_tsc_sync_target(void)
375 struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
376 unsigned int cpu = smp_processor_id();
377 cycles_t cur_max_warp, gbl_max_warp;
380 /* Also aborts if there is no TSC. */
381 if (unsynchronized_tsc() || tsc_clocksource_reliable)
385 * Store, verify and sanitize the TSC adjust register. If
386 * successful skip the test.
388 if (tsc_store_and_check_tsc_adjust(false)) {
389 atomic_inc(&skip_test);
395 * Register this CPU's participation and wait for the
396 * source CPU to start the measurement:
398 atomic_inc(&start_count);
399 while (atomic_read(&start_count) != cpus)
402 cur_max_warp = check_tsc_warp(loop_timeout(cpu));
405 * Store the maximum observed warp value for a potential retry:
407 gbl_max_warp = max_warp;
412 atomic_inc(&stop_count);
415 * Wait for the source CPU to print stuff:
417 while (atomic_read(&stop_count) != cpus)
421 * Reset it for the next sync test:
423 atomic_set(&stop_count, 0);
426 * Check the number of remaining test runs. If not zero, the test
427 * failed and a retry with adjusted TSC is possible. If zero the
428 * test was either successful or failed terminally.
430 if (!atomic_read(&test_runs))
434 * If the warp value of this CPU is 0, then the other CPU
435 * observed time going backwards so this TSC was ahead and
436 * needs to move backwards.
439 cur_max_warp = -gbl_max_warp;
442 * Add the result to the previous adjustment value.
444 * The adjustement value is slightly off by the overhead of the
445 * sync mechanism (observed values are ~200 TSC cycles), but this
446 * really depends on CPU, node distance and frequency. So
447 * compensating for this is hard to get right. Experiments show
448 * that the warp is not longer detectable when the observed warp
449 * value is used. In the worst case the adjustment needs to go
450 * through a 3rd run for fine tuning.
452 * But we must make sure that the value doesn't become negative
453 * otherwise TSC deadline timer will create an interrupt storm.
455 cur->adjusted += cur_max_warp;
456 if (cur->adjusted < 0)
459 pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
460 cpu, cur_max_warp, cur->adjusted);
462 wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
467 #endif /* CONFIG_SMP */