1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/timer.h>
10 #include <linux/acpi_pmtmr.h>
11 #include <linux/cpufreq.h>
12 #include <linux/delay.h>
13 #include <linux/clocksource.h>
14 #include <linux/percpu.h>
15 #include <linux/timex.h>
16 #include <linux/static_key.h>
19 #include <asm/timer.h>
20 #include <asm/vgtod.h>
22 #include <asm/delay.h>
23 #include <asm/hypervisor.h>
25 #include <asm/x86_init.h>
26 #include <asm/geode.h>
28 #include <asm/intel-family.h>
29 #include <asm/i8259.h>
30 #include <asm/uv/uv.h>
32 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
33 EXPORT_SYMBOL(cpu_khz);
35 unsigned int __read_mostly tsc_khz;
36 EXPORT_SYMBOL(tsc_khz);
41 * TSC can be unstable due to cpufreq or due to unsynced TSCs
43 static int __read_mostly tsc_unstable;
45 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
47 int tsc_clocksource_reliable;
49 static u32 art_to_tsc_numerator;
50 static u32 art_to_tsc_denominator;
51 static u64 art_to_tsc_offset;
52 struct clocksource *art_related_clocksource;
55 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
56 seqcount_t seq; /* 32 + 4 = 36 */
58 }; /* fits one cacheline */
60 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
62 __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
66 preempt_disable_notrace();
69 seq = this_cpu_read(cyc2ns.seq.sequence);
72 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
73 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
74 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
76 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
79 __always_inline void cyc2ns_read_end(void)
81 preempt_enable_notrace();
85 * Accelerators for sched_clock()
86 * convert from cycles(64bits) => nanoseconds (64bits)
88 * ns = cycles / (freq / ns_per_sec)
89 * ns = cycles * (ns_per_sec / freq)
90 * ns = cycles * (10^9 / (cpu_khz * 10^3))
91 * ns = cycles * (10^6 / cpu_khz)
93 * Then we use scaling math (suggested by george@mvista.com) to get:
94 * ns = cycles * (10^6 * SC / cpu_khz) / SC
95 * ns = cycles * cyc2ns_scale / SC
97 * And since SC is a constant power of two, we can convert the div
98 * into a shift. The larger SC is, the more accurate the conversion, but
99 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
100 * (64-bit result) can be used.
102 * We can use khz divisor instead of mhz to keep a better precision.
103 * (mathieu.desnoyers@polymtl.ca)
105 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
108 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
110 struct cyc2ns_data data;
111 unsigned long long ns;
113 cyc2ns_read_begin(&data);
115 ns = data.cyc2ns_offset;
116 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
123 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
125 unsigned long long ns_now;
126 struct cyc2ns_data data;
129 ns_now = cycles_2_ns(tsc_now);
132 * Compute a new multiplier as per the above comment and ensure our
133 * time function is continuous; see the comment near struct
136 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
140 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
141 * not expected to be greater than 31 due to the original published
142 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
143 * value) - refer perf_event_mmap_page documentation in perf_event.h.
145 if (data.cyc2ns_shift == 32) {
146 data.cyc2ns_shift = 31;
147 data.cyc2ns_mul >>= 1;
150 data.cyc2ns_offset = ns_now -
151 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
153 c2n = per_cpu_ptr(&cyc2ns, cpu);
155 raw_write_seqcount_latch(&c2n->seq);
157 raw_write_seqcount_latch(&c2n->seq);
161 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
165 local_irq_save(flags);
166 sched_clock_idle_sleep_event();
169 __set_cyc2ns_scale(khz, cpu, tsc_now);
171 sched_clock_idle_wakeup_event();
172 local_irq_restore(flags);
176 * Initialize cyc2ns for boot cpu
178 static void __init cyc2ns_init_boot_cpu(void)
180 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
182 seqcount_init(&c2n->seq);
183 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
187 * Secondary CPUs do not run through tsc_init(), so set up
188 * all the scale factors for all CPUs, assuming the same
189 * speed as the bootup CPU.
191 static void __init cyc2ns_init_secondary_cpus(void)
193 unsigned int cpu, this_cpu = smp_processor_id();
194 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
195 struct cyc2ns_data *data = c2n->data;
197 for_each_possible_cpu(cpu) {
198 if (cpu != this_cpu) {
199 seqcount_init(&c2n->seq);
200 c2n = per_cpu_ptr(&cyc2ns, cpu);
201 c2n->data[0] = data[0];
202 c2n->data[1] = data[1];
208 * Scheduler clock - returns current time in nanosec units.
210 u64 native_sched_clock(void)
212 if (static_branch_likely(&__use_tsc)) {
213 u64 tsc_now = rdtsc();
215 /* return the value in ns */
216 return cycles_2_ns(tsc_now);
220 * Fall back to jiffies if there's no TSC available:
221 * ( But note that we still use it if the TSC is marked
222 * unstable. We do this because unlike Time Of Day,
223 * the scheduler clock tolerates small errors and it's
224 * very important for it to be as fast as the platform
228 /* No locking but a rare wrong value is not a big deal: */
229 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
233 * Generate a sched_clock if you already have a TSC value.
235 u64 native_sched_clock_from_tsc(u64 tsc)
237 return cycles_2_ns(tsc);
240 /* We need to define a real function for sched_clock, to override the
241 weak default version */
242 #ifdef CONFIG_PARAVIRT
243 unsigned long long sched_clock(void)
245 return paravirt_sched_clock();
248 bool using_native_sched_clock(void)
250 return pv_ops.time.sched_clock == native_sched_clock;
254 sched_clock(void) __attribute__((alias("native_sched_clock")));
256 bool using_native_sched_clock(void) { return true; }
259 int check_tsc_unstable(void)
263 EXPORT_SYMBOL_GPL(check_tsc_unstable);
265 #ifdef CONFIG_X86_TSC
266 int __init notsc_setup(char *str)
268 mark_tsc_unstable("boot parameter notsc");
273 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
276 int __init notsc_setup(char *str)
278 setup_clear_cpu_cap(X86_FEATURE_TSC);
283 __setup("notsc", notsc_setup);
285 static int no_sched_irq_time;
286 static int no_tsc_watchdog;
288 static int __init tsc_setup(char *str)
290 if (!strcmp(str, "reliable"))
291 tsc_clocksource_reliable = 1;
292 if (!strncmp(str, "noirqtime", 9))
293 no_sched_irq_time = 1;
294 if (!strcmp(str, "unstable"))
295 mark_tsc_unstable("boot parameter");
296 if (!strcmp(str, "nowatchdog"))
301 __setup("tsc=", tsc_setup);
303 #define MAX_RETRIES 5
304 #define TSC_DEFAULT_THRESHOLD 0x20000
307 * Read TSC and the reference counters. Take care of any disturbances
309 static u64 tsc_read_refs(u64 *p, int hpet)
312 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
315 for (i = 0; i < MAX_RETRIES; i++) {
318 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
320 *p = acpi_pm_read_early();
322 if ((t2 - t1) < thresh)
329 * Calculate the TSC frequency from HPET reference
331 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
336 hpet2 += 0x100000000ULL;
338 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
339 do_div(tmp, 1000000);
340 deltatsc = div64_u64(deltatsc, tmp);
342 return (unsigned long) deltatsc;
346 * Calculate the TSC frequency from PMTimer reference
348 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
356 pm2 += (u64)ACPI_PM_OVRRUN;
358 tmp = pm2 * 1000000000LL;
359 do_div(tmp, PMTMR_TICKS_PER_SEC);
360 do_div(deltatsc, tmp);
362 return (unsigned long) deltatsc;
366 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
367 #define CAL_PIT_LOOPS 1000
370 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
371 #define CAL2_PIT_LOOPS 5000
375 * Try to calibrate the TSC against the Programmable
376 * Interrupt Timer and return the frequency of the TSC
379 * Return ULONG_MAX on failure to calibrate.
381 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
383 u64 tsc, t1, t2, delta;
384 unsigned long tscmin, tscmax;
387 if (!has_legacy_pic()) {
389 * Relies on tsc_early_delay_calibrate() to have given us semi
390 * usable udelay(), wait for the same 50ms we would have with
391 * the PIT loop below.
393 udelay(10 * USEC_PER_MSEC);
394 udelay(10 * USEC_PER_MSEC);
395 udelay(10 * USEC_PER_MSEC);
396 udelay(10 * USEC_PER_MSEC);
397 udelay(10 * USEC_PER_MSEC);
401 /* Set the Gate high, disable speaker */
402 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
405 * Setup CTC channel 2* for mode 0, (interrupt on terminal
406 * count mode), binary count. Set the latch register to 50ms
407 * (LSB then MSB) to begin countdown.
410 outb(latch & 0xff, 0x42);
411 outb(latch >> 8, 0x42);
413 tsc = t1 = t2 = get_cycles();
418 while ((inb(0x61) & 0x20) == 0) {
422 if ((unsigned long) delta < tscmin)
423 tscmin = (unsigned int) delta;
424 if ((unsigned long) delta > tscmax)
425 tscmax = (unsigned int) delta;
432 * If we were not able to read the PIT more than loopmin
433 * times, then we have been hit by a massive SMI
435 * If the maximum is 10 times larger than the minimum,
436 * then we got hit by an SMI as well.
438 if (pitcnt < loopmin || tscmax > 10 * tscmin)
441 /* Calculate the PIT value */
448 * This reads the current MSB of the PIT counter, and
449 * checks if we are running on sufficiently fast and
450 * non-virtualized hardware.
452 * Our expectations are:
454 * - the PIT is running at roughly 1.19MHz
456 * - each IO is going to take about 1us on real hardware,
457 * but we allow it to be much faster (by a factor of 10) or
458 * _slightly_ slower (ie we allow up to a 2us read+counter
459 * update - anything else implies a unacceptably slow CPU
460 * or PIT for the fast calibration to work.
462 * - with 256 PIT ticks to read the value, we have 214us to
463 * see the same MSB (and overhead like doing a single TSC
464 * read per MSB value etc).
466 * - We're doing 2 reads per loop (LSB, MSB), and we expect
467 * them each to take about a microsecond on real hardware.
468 * So we expect a count value of around 100. But we'll be
469 * generous, and accept anything over 50.
471 * - if the PIT is stuck, and we see *many* more reads, we
472 * return early (and the next caller of pit_expect_msb()
473 * then consider it a failure when they don't see the
474 * next expected value).
476 * These expectations mean that we know that we have seen the
477 * transition from one expected value to another with a fairly
478 * high accuracy, and we didn't miss any events. We can thus
479 * use the TSC value at the transitions to calculate a pretty
480 * good value for the TSC frequency.
482 static inline int pit_verify_msb(unsigned char val)
486 return inb(0x42) == val;
489 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
492 u64 tsc = 0, prev_tsc = 0;
494 for (count = 0; count < 50000; count++) {
495 if (!pit_verify_msb(val))
500 *deltap = get_cycles() - prev_tsc;
504 * We require _some_ success, but the quality control
505 * will be based on the error terms on the TSC values.
511 * How many MSB values do we want to see? We aim for
512 * a maximum error rate of 500ppm (in practice the
513 * real error is much smaller), but refuse to spend
514 * more than 50ms on it.
516 #define MAX_QUICK_PIT_MS 50
517 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
519 static unsigned long quick_pit_calibrate(void)
523 unsigned long d1, d2;
525 if (!has_legacy_pic())
528 /* Set the Gate high, disable speaker */
529 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
532 * Counter 2, mode 0 (one-shot), binary count
534 * NOTE! Mode 2 decrements by two (and then the
535 * output is flipped each time, giving the same
536 * final output frequency as a decrement-by-one),
537 * so mode 0 is much better when looking at the
542 /* Start at 0xffff */
547 * The PIT starts counting at the next edge, so we
548 * need to delay for a microsecond. The easiest way
549 * to do that is to just read back the 16-bit counter
554 if (pit_expect_msb(0xff, &tsc, &d1)) {
555 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
556 if (!pit_expect_msb(0xff-i, &delta, &d2))
562 * Extrapolate the error and fail fast if the error will
563 * never be below 500 ppm.
566 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
570 * Iterate until the error is less than 500 ppm
572 if (d1+d2 >= delta >> 11)
576 * Check the PIT one more time to verify that
577 * all TSC reads were stable wrt the PIT.
579 * This also guarantees serialization of the
580 * last cycle read ('d2') in pit_expect_msb.
582 if (!pit_verify_msb(0xfe - i))
587 pr_info("Fast TSC calibration failed\n");
592 * Ok, if we get here, then we've seen the
593 * MSB of the PIT decrement 'i' times, and the
594 * error has shrunk to less than 500 ppm.
596 * As a result, we can depend on there not being
597 * any odd delays anywhere, and the TSC reads are
598 * reliable (within the error).
600 * kHz = ticks / time-in-seconds / 1000;
601 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
602 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
604 delta *= PIT_TICK_RATE;
605 do_div(delta, i*256*1000);
606 pr_info("Fast TSC calibration using PIT\n");
611 * native_calibrate_tsc
612 * Determine TSC frequency via CPUID, else return 0.
614 unsigned long native_calibrate_tsc(void)
616 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
617 unsigned int crystal_khz;
619 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
622 if (boot_cpu_data.cpuid_level < 0x15)
625 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
627 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
628 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
630 if (ebx_numerator == 0 || eax_denominator == 0)
633 crystal_khz = ecx_hz / 1000;
636 * Denverton SoCs don't report crystal clock, and also don't support
637 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
640 if (crystal_khz == 0 &&
641 boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
645 * TSC frequency reported directly by CPUID is a "hardware reported"
646 * frequency and is the most accurate one so far we have. This
647 * is considered a known frequency.
649 if (crystal_khz != 0)
650 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
653 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
654 * clock, but we can easily calculate it to a high degree of accuracy
655 * by considering the crystal ratio and the CPU speed.
657 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
658 unsigned int eax_base_mhz, ebx, ecx, edx;
660 cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
661 crystal_khz = eax_base_mhz * 1000 *
662 eax_denominator / ebx_numerator;
665 if (crystal_khz == 0)
669 * For Atom SoCs TSC is the only reliable clocksource.
670 * Mark TSC reliable so no watchdog on it.
672 if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
673 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
675 #ifdef CONFIG_X86_LOCAL_APIC
677 * The local APIC appears to be fed by the core crystal clock
678 * (which sounds entirely sensible). We can set the global
679 * lapic_timer_period here to avoid having to calibrate the APIC
682 lapic_timer_period = crystal_khz * 1000 / HZ;
685 return crystal_khz * ebx_numerator / eax_denominator;
688 static unsigned long cpu_khz_from_cpuid(void)
690 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
692 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
695 if (boot_cpu_data.cpuid_level < 0x16)
698 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
700 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
702 return eax_base_mhz * 1000;
706 * calibrate cpu using pit, hpet, and ptimer methods. They are available
707 * later in boot after acpi is initialized.
709 static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
711 u64 tsc1, tsc2, delta, ref1, ref2;
712 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
713 unsigned long flags, latch, ms;
714 int hpet = is_hpet_enabled(), i, loopmin;
717 * Run 5 calibration loops to get the lowest frequency value
718 * (the best estimate). We use two different calibration modes
721 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
722 * load a timeout of 50ms. We read the time right after we
723 * started the timer and wait until the PIT count down reaches
724 * zero. In each wait loop iteration we read the TSC and check
725 * the delta to the previous read. We keep track of the min
726 * and max values of that delta. The delta is mostly defined
727 * by the IO time of the PIT access, so we can detect when
728 * any disturbance happened between the two reads. If the
729 * maximum time is significantly larger than the minimum time,
730 * then we discard the result and have another try.
732 * 2) Reference counter. If available we use the HPET or the
733 * PMTIMER as a reference to check the sanity of that value.
734 * We use separate TSC readouts and check inside of the
735 * reference read for any possible disturbance. We dicard
736 * disturbed values here as well. We do that around the PIT
737 * calibration delay loop as we have to wait for a certain
738 * amount of time anyway.
741 /* Preset PIT loop values */
744 loopmin = CAL_PIT_LOOPS;
746 for (i = 0; i < 3; i++) {
747 unsigned long tsc_pit_khz;
750 * Read the start value and the reference count of
751 * hpet/pmtimer when available. Then do the PIT
752 * calibration, which will take at least 50ms, and
753 * read the end value.
755 local_irq_save(flags);
756 tsc1 = tsc_read_refs(&ref1, hpet);
757 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
758 tsc2 = tsc_read_refs(&ref2, hpet);
759 local_irq_restore(flags);
761 /* Pick the lowest PIT TSC calibration so far */
762 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
764 /* hpet or pmtimer available ? */
768 /* Check, whether the sampling was disturbed */
769 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
772 tsc2 = (tsc2 - tsc1) * 1000000LL;
774 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
776 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
778 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
780 /* Check the reference deviation */
781 delta = ((u64) tsc_pit_min) * 100;
782 do_div(delta, tsc_ref_min);
785 * If both calibration results are inside a 10% window
786 * then we can be sure, that the calibration
787 * succeeded. We break out of the loop right away. We
788 * use the reference value, as it is more precise.
790 if (delta >= 90 && delta <= 110) {
791 pr_info("PIT calibration matches %s. %d loops\n",
792 hpet ? "HPET" : "PMTIMER", i + 1);
797 * Check whether PIT failed more than once. This
798 * happens in virtualized environments. We need to
799 * give the virtual PC a slightly longer timeframe for
800 * the HPET/PMTIMER to make the result precise.
802 if (i == 1 && tsc_pit_min == ULONG_MAX) {
805 loopmin = CAL2_PIT_LOOPS;
810 * Now check the results.
812 if (tsc_pit_min == ULONG_MAX) {
813 /* PIT gave no useful value */
814 pr_warn("Unable to calibrate against PIT\n");
816 /* We don't have an alternative source, disable TSC */
817 if (!hpet && !ref1 && !ref2) {
818 pr_notice("No reference (HPET/PMTIMER) available\n");
822 /* The alternative source failed as well, disable TSC */
823 if (tsc_ref_min == ULONG_MAX) {
824 pr_warn("HPET/PMTIMER calibration failed\n");
828 /* Use the alternative source */
829 pr_info("using %s reference calibration\n",
830 hpet ? "HPET" : "PMTIMER");
835 /* We don't have an alternative source, use the PIT calibration value */
836 if (!hpet && !ref1 && !ref2) {
837 pr_info("Using PIT calibration value\n");
841 /* The alternative source failed, use the PIT calibration value */
842 if (tsc_ref_min == ULONG_MAX) {
843 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
848 * The calibration values differ too much. In doubt, we use
849 * the PIT value as we know that there are PMTIMERs around
850 * running at double speed. At least we let the user know:
852 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
853 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
854 pr_info("Using PIT calibration value\n");
859 * native_calibrate_cpu_early - can calibrate the cpu early in boot
861 unsigned long native_calibrate_cpu_early(void)
863 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
866 fast_calibrate = cpu_khz_from_msr();
867 if (!fast_calibrate) {
868 local_irq_save(flags);
869 fast_calibrate = quick_pit_calibrate();
870 local_irq_restore(flags);
872 return fast_calibrate;
877 * native_calibrate_cpu - calibrate the cpu
879 static unsigned long native_calibrate_cpu(void)
881 unsigned long tsc_freq = native_calibrate_cpu_early();
884 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
889 void recalibrate_cpu_khz(void)
892 unsigned long cpu_khz_old = cpu_khz;
894 if (!boot_cpu_has(X86_FEATURE_TSC))
897 cpu_khz = x86_platform.calibrate_cpu();
898 tsc_khz = x86_platform.calibrate_tsc();
901 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
903 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
904 cpu_khz_old, cpu_khz);
908 EXPORT_SYMBOL(recalibrate_cpu_khz);
911 static unsigned long long cyc2ns_suspend;
913 void tsc_save_sched_clock_state(void)
915 if (!sched_clock_stable())
918 cyc2ns_suspend = sched_clock();
922 * Even on processors with invariant TSC, TSC gets reset in some the
923 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
924 * arbitrary value (still sync'd across cpu's) during resume from such sleep
925 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
926 * that sched_clock() continues from the point where it was left off during
929 void tsc_restore_sched_clock_state(void)
931 unsigned long long offset;
935 if (!sched_clock_stable())
938 local_irq_save(flags);
941 * We're coming out of suspend, there's no concurrency yet; don't
942 * bother being nice about the RCU stuff, just write to both
946 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
947 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
949 offset = cyc2ns_suspend - sched_clock();
951 for_each_possible_cpu(cpu) {
952 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
953 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
956 local_irq_restore(flags);
959 #ifdef CONFIG_CPU_FREQ
961 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
964 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
965 * as unstable and give up in those cases.
967 * Should fix up last_tsc too. Currently gettimeofday in the
968 * first tick after the change will be slightly wrong.
971 static unsigned int ref_freq;
972 static unsigned long loops_per_jiffy_ref;
973 static unsigned long tsc_khz_ref;
975 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
978 struct cpufreq_freqs *freq = data;
980 if (num_online_cpus() > 1) {
981 mark_tsc_unstable("cpufreq changes on SMP");
986 ref_freq = freq->old;
987 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
988 tsc_khz_ref = tsc_khz;
991 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
992 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
993 boot_cpu_data.loops_per_jiffy =
994 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
996 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
997 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
998 mark_tsc_unstable("cpufreq changes");
1000 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1006 static struct notifier_block time_cpufreq_notifier_block = {
1007 .notifier_call = time_cpufreq_notifier
1010 static int __init cpufreq_register_tsc_scaling(void)
1012 if (!boot_cpu_has(X86_FEATURE_TSC))
1014 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1016 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1017 CPUFREQ_TRANSITION_NOTIFIER);
1021 core_initcall(cpufreq_register_tsc_scaling);
1023 #endif /* CONFIG_CPU_FREQ */
1025 #define ART_CPUID_LEAF (0x15)
1026 #define ART_MIN_DENOMINATOR (1)
1030 * If ART is present detect the numerator:denominator to convert to TSC
1032 static void __init detect_art(void)
1034 unsigned int unused[2];
1036 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1040 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1041 * and the TSC counter resets must not occur asynchronously.
1043 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1044 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1045 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1049 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1050 &art_to_tsc_numerator, unused, unused+1);
1052 if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1055 rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1057 /* Make this sticky over multiple CPU init calls */
1058 setup_force_cpu_cap(X86_FEATURE_ART);
1062 /* clocksource code */
1064 static void tsc_resume(struct clocksource *cs)
1066 tsc_verify_tsc_adjust(true);
1070 * We used to compare the TSC to the cycle_last value in the clocksource
1071 * structure to avoid a nasty time-warp. This can be observed in a
1072 * very small window right after one CPU updated cycle_last under
1073 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1074 * is smaller than the cycle_last reference value due to a TSC which
1075 * is slighty behind. This delta is nowhere else observable, but in
1076 * that case it results in a forward time jump in the range of hours
1077 * due to the unsigned delta calculation of the time keeping core
1078 * code, which is necessary to support wrapping clocksources like pm
1081 * This sanity check is now done in the core timekeeping code.
1082 * checking the result of read_tsc() - cycle_last for being negative.
1083 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1085 static u64 read_tsc(struct clocksource *cs)
1087 return (u64)rdtsc_ordered();
1090 static void tsc_cs_mark_unstable(struct clocksource *cs)
1096 if (using_native_sched_clock())
1097 clear_sched_clock_stable();
1098 disable_sched_clock_irqtime();
1099 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1102 static void tsc_cs_tick_stable(struct clocksource *cs)
1107 if (using_native_sched_clock())
1108 sched_clock_tick_stable();
1112 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1114 static struct clocksource clocksource_tsc_early = {
1115 .name = "tsc-early",
1118 .mask = CLOCKSOURCE_MASK(64),
1119 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1120 CLOCK_SOURCE_MUST_VERIFY,
1121 .archdata = { .vclock_mode = VCLOCK_TSC },
1122 .resume = tsc_resume,
1123 .mark_unstable = tsc_cs_mark_unstable,
1124 .tick_stable = tsc_cs_tick_stable,
1125 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1129 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1130 * this one will immediately take over. We will only register if TSC has
1133 static struct clocksource clocksource_tsc = {
1137 .mask = CLOCKSOURCE_MASK(64),
1138 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1139 CLOCK_SOURCE_VALID_FOR_HRES |
1140 CLOCK_SOURCE_MUST_VERIFY,
1141 .archdata = { .vclock_mode = VCLOCK_TSC },
1142 .resume = tsc_resume,
1143 .mark_unstable = tsc_cs_mark_unstable,
1144 .tick_stable = tsc_cs_tick_stable,
1145 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1148 void mark_tsc_unstable(char *reason)
1154 if (using_native_sched_clock())
1155 clear_sched_clock_stable();
1156 disable_sched_clock_irqtime();
1157 pr_info("Marking TSC unstable due to %s\n", reason);
1159 clocksource_mark_unstable(&clocksource_tsc_early);
1160 clocksource_mark_unstable(&clocksource_tsc);
1163 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1165 static void __init check_system_tsc_reliable(void)
1167 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1168 if (is_geode_lx()) {
1169 /* RTSC counts during suspend */
1170 #define RTSC_SUSP 0x100
1171 unsigned long res_low, res_high;
1173 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1174 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1175 if (res_low & RTSC_SUSP)
1176 tsc_clocksource_reliable = 1;
1179 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1180 tsc_clocksource_reliable = 1;
1184 * Make an educated guess if the TSC is trustworthy and synchronized
1187 int unsynchronized_tsc(void)
1189 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1193 if (apic_is_clustered_box())
1197 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1200 if (tsc_clocksource_reliable)
1203 * Intel systems are normally all synchronized.
1204 * Exceptions must mark TSC as unstable:
1206 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1207 /* assume multi socket systems are not synchronized: */
1208 if (num_possible_cpus() > 1)
1216 * Convert ART to TSC given numerator/denominator found in detect_art()
1218 struct system_counterval_t convert_art_to_tsc(u64 art)
1222 rem = do_div(art, art_to_tsc_denominator);
1224 res = art * art_to_tsc_numerator;
1225 tmp = rem * art_to_tsc_numerator;
1227 do_div(tmp, art_to_tsc_denominator);
1228 res += tmp + art_to_tsc_offset;
1230 return (struct system_counterval_t) {.cs = art_related_clocksource,
1233 EXPORT_SYMBOL(convert_art_to_tsc);
1236 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1237 * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1239 * PTM requires all timestamps to be in units of nanoseconds. When user
1240 * software requests a cross-timestamp, this function converts system timestamp
1243 * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1244 * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1245 * that this flag is set before conversion to TSC is attempted.
1248 * struct system_counterval_t - system counter value with the pointer to the
1249 * corresponding clocksource
1250 * @cycles: System counter value
1251 * @cs: Clocksource corresponding to system counter value. Used
1252 * by timekeeping code to verify comparibility of two cycle
1256 struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1260 rem = do_div(art_ns, USEC_PER_SEC);
1262 res = art_ns * tsc_khz;
1263 tmp = rem * tsc_khz;
1265 do_div(tmp, USEC_PER_SEC);
1268 return (struct system_counterval_t) { .cs = art_related_clocksource,
1271 EXPORT_SYMBOL(convert_art_ns_to_tsc);
1274 static void tsc_refine_calibration_work(struct work_struct *work);
1275 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1277 * tsc_refine_calibration_work - Further refine tsc freq calibration
1280 * This functions uses delayed work over a period of a
1281 * second to further refine the TSC freq value. Since this is
1282 * timer based, instead of loop based, we don't block the boot
1283 * process while this longer calibration is done.
1285 * If there are any calibration anomalies (too many SMIs, etc),
1286 * or the refined calibration is off by 1% of the fast early
1287 * calibration, we throw out the new calibration and use the
1288 * early calibration.
1290 static void tsc_refine_calibration_work(struct work_struct *work)
1292 static u64 tsc_start = ULLONG_MAX, ref_start;
1294 u64 tsc_stop, ref_stop, delta;
1298 /* Don't bother refining TSC on unstable systems */
1303 * Since the work is started early in boot, we may be
1304 * delayed the first time we expire. So set the workqueue
1305 * again once we know timers are working.
1307 if (tsc_start == ULLONG_MAX) {
1310 * Only set hpet once, to avoid mixing hardware
1311 * if the hpet becomes enabled later.
1313 hpet = is_hpet_enabled();
1314 tsc_start = tsc_read_refs(&ref_start, hpet);
1315 schedule_delayed_work(&tsc_irqwork, HZ);
1319 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1321 /* hpet or pmtimer available ? */
1322 if (ref_start == ref_stop)
1325 /* Check, whether the sampling was disturbed */
1326 if (tsc_stop == ULLONG_MAX)
1329 delta = tsc_stop - tsc_start;
1332 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1334 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1336 /* Make sure we're within 1% */
1337 if (abs(tsc_khz - freq) > tsc_khz/100)
1341 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1342 (unsigned long)tsc_khz / 1000,
1343 (unsigned long)tsc_khz % 1000);
1345 /* Inform the TSC deadline clockevent devices about the recalibration */
1346 lapic_update_tsc_freq();
1348 /* Update the sched_clock() rate to match the clocksource one */
1349 for_each_possible_cpu(cpu)
1350 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1356 if (boot_cpu_has(X86_FEATURE_ART))
1357 art_related_clocksource = &clocksource_tsc;
1358 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1360 clocksource_unregister(&clocksource_tsc_early);
1364 static int __init init_tsc_clocksource(void)
1366 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1372 if (tsc_clocksource_reliable || no_tsc_watchdog)
1373 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1375 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1376 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1379 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1380 * the refined calibration and directly register it as a clocksource.
1382 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1383 if (boot_cpu_has(X86_FEATURE_ART))
1384 art_related_clocksource = &clocksource_tsc;
1385 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1387 clocksource_unregister(&clocksource_tsc_early);
1391 schedule_delayed_work(&tsc_irqwork, 0);
1395 * We use device_initcall here, to ensure we run after the hpet
1396 * is fully initialized, which may occur at fs_initcall time.
1398 device_initcall(init_tsc_clocksource);
1400 static bool __init determine_cpu_tsc_frequencies(bool early)
1402 /* Make sure that cpu and tsc are not already calibrated */
1403 WARN_ON(cpu_khz || tsc_khz);
1406 cpu_khz = x86_platform.calibrate_cpu();
1407 tsc_khz = x86_platform.calibrate_tsc();
1409 /* We should not be here with non-native cpu calibration */
1410 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1411 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1415 * Trust non-zero tsc_khz as authoritative,
1416 * and use it to sanity check cpu_khz,
1417 * which will be off if system timer is off.
1421 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1427 pr_info("Detected %lu.%03lu MHz processor\n",
1428 (unsigned long)cpu_khz / KHZ,
1429 (unsigned long)cpu_khz % KHZ);
1431 if (cpu_khz != tsc_khz) {
1432 pr_info("Detected %lu.%03lu MHz TSC",
1433 (unsigned long)tsc_khz / KHZ,
1434 (unsigned long)tsc_khz % KHZ);
1439 static unsigned long __init get_loops_per_jiffy(void)
1441 u64 lpj = (u64)tsc_khz * KHZ;
1447 static void __init tsc_enable_sched_clock(void)
1449 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1450 tsc_store_and_check_tsc_adjust(true);
1451 cyc2ns_init_boot_cpu();
1452 static_branch_enable(&__use_tsc);
1455 void __init tsc_early_init(void)
1457 if (!boot_cpu_has(X86_FEATURE_TSC))
1459 /* Don't change UV TSC multi-chassis synchronization */
1460 if (is_early_uv_system())
1462 if (!determine_cpu_tsc_frequencies(true))
1464 loops_per_jiffy = get_loops_per_jiffy();
1466 tsc_enable_sched_clock();
1469 void __init tsc_init(void)
1472 * native_calibrate_cpu_early can only calibrate using methods that are
1473 * available early in boot.
1475 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1476 x86_platform.calibrate_cpu = native_calibrate_cpu;
1478 if (!boot_cpu_has(X86_FEATURE_TSC)) {
1479 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1484 /* We failed to determine frequencies earlier, try again */
1485 if (!determine_cpu_tsc_frequencies(false)) {
1486 mark_tsc_unstable("could not calculate TSC khz");
1487 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1490 tsc_enable_sched_clock();
1493 cyc2ns_init_secondary_cpus();
1495 if (!no_sched_irq_time)
1496 enable_sched_clock_irqtime();
1498 lpj_fine = get_loops_per_jiffy();
1501 check_system_tsc_reliable();
1503 if (unsynchronized_tsc()) {
1504 mark_tsc_unstable("TSCs unsynchronized");
1508 if (tsc_clocksource_reliable || no_tsc_watchdog)
1509 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1511 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1517 * If we have a constant TSC and are using the TSC for the delay loop,
1518 * we can skip clock calibration if another cpu in the same socket has already
1519 * been calibrated. This assumes that CONSTANT_TSC applies to all
1520 * cpus in the socket - this should be a safe assumption.
1522 unsigned long calibrate_delay_is_known(void)
1524 int sibling, cpu = smp_processor_id();
1525 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1526 const struct cpumask *mask = topology_core_cpumask(cpu);
1528 if (!constant_tsc || !mask)
1531 sibling = cpumask_any_but(mask, cpu);
1532 if (sibling < nr_cpu_ids)
1533 return cpu_data(sibling).loops_per_jiffy;