1 // SPDX-License-Identifier: GPL-2.0+
3 * This file contains the functions which manage clocksource drivers.
5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/device.h>
11 #include <linux/clocksource.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
15 #include <linux/tick.h>
16 #include <linux/kthread.h>
17 #include <linux/prandom.h>
18 #include <linux/cpu.h>
20 #include "tick-internal.h"
21 #include "timekeeping_internal.h"
24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
25 * @mult: pointer to mult variable
26 * @shift: pointer to shift variable
27 * @from: frequency to convert from
28 * @to: frequency to convert to
29 * @maxsec: guaranteed runtime conversion range in seconds
31 * The function evaluates the shift/mult pair for the scaled math
32 * operations of clocksources and clockevents.
34 * @to and @from are frequency values in HZ. For clock sources @to is
35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
36 * event @to is the counter frequency and @from is NSEC_PER_SEC.
38 * The @maxsec conversion range argument controls the time frame in
39 * seconds which must be covered by the runtime conversion with the
40 * calculated mult and shift factors. This guarantees that no 64bit
41 * overflow happens when the input value of the conversion is
42 * multiplied with the calculated mult factor. Larger ranges may
43 * reduce the conversion accuracy by choosing smaller mult and shift
47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
53 * Calculate the shift factor which is limiting the conversion
56 tmp = ((u64)maxsec * from) >> 32;
63 * Find the conversion shift/mult pair which has the best
64 * accuracy and fits the maxsec conversion range:
66 for (sft = 32; sft > 0; sft--) {
67 tmp = (u64) to << sft;
70 if ((tmp >> sftacc) == 0)
76 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
78 /*[Clocksource internal variables]---------
80 * currently selected clocksource.
81 * suspend_clocksource:
82 * used to calculate the suspend time.
84 * linked list with the registered clocksources
86 * protects manipulations to curr_clocksource and the clocksource_list
88 * Name of the user-specified clocksource.
90 static struct clocksource *curr_clocksource;
91 static struct clocksource *suspend_clocksource;
92 static LIST_HEAD(clocksource_list);
93 static DEFINE_MUTEX(clocksource_mutex);
94 static char override_name[CS_NAME_LEN];
95 static int finished_booting;
96 static u64 suspend_start;
99 * Threshold: 0.0312s, when doubled: 0.0625s.
100 * Also a default for cs->uncertainty_margin when registering clocks.
102 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
105 * Maximum permissible delay between two readouts of the watchdog
106 * clocksource surrounding a read of the clocksource being validated.
107 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as
108 * a lower bound for cs->uncertainty_margin values when registering clocks.
110 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
111 #define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
113 #define MAX_SKEW_USEC 100
116 #define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
118 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
119 static void clocksource_watchdog_work(struct work_struct *work);
120 static void clocksource_select(void);
122 static LIST_HEAD(watchdog_list);
123 static struct clocksource *watchdog;
124 static struct timer_list watchdog_timer;
125 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
126 static DEFINE_SPINLOCK(watchdog_lock);
127 static int watchdog_running;
128 static atomic_t watchdog_reset_pending;
130 static inline void clocksource_watchdog_lock(unsigned long *flags)
132 spin_lock_irqsave(&watchdog_lock, *flags);
135 static inline void clocksource_watchdog_unlock(unsigned long *flags)
137 spin_unlock_irqrestore(&watchdog_lock, *flags);
140 static int clocksource_watchdog_kthread(void *data);
141 static void __clocksource_change_rating(struct clocksource *cs, int rating);
146 #define WATCHDOG_INTERVAL (HZ >> 1)
148 static void clocksource_watchdog_work(struct work_struct *work)
151 * We cannot directly run clocksource_watchdog_kthread() here, because
152 * clocksource_select() calls timekeeping_notify() which uses
153 * stop_machine(). One cannot use stop_machine() from a workqueue() due
154 * lock inversions wrt CPU hotplug.
156 * Also, we only ever run this work once or twice during the lifetime
157 * of the kernel, so there is no point in creating a more permanent
160 * If kthread_run fails the next watchdog scan over the
161 * watchdog_list will find the unstable clock again.
163 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
166 static void __clocksource_unstable(struct clocksource *cs)
168 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
169 cs->flags |= CLOCK_SOURCE_UNSTABLE;
172 * If the clocksource is registered clocksource_watchdog_kthread() will
173 * re-rate and re-select.
175 if (list_empty(&cs->list)) {
180 if (cs->mark_unstable)
181 cs->mark_unstable(cs);
183 /* kick clocksource_watchdog_kthread() */
184 if (finished_booting)
185 schedule_work(&watchdog_work);
189 * clocksource_mark_unstable - mark clocksource unstable via watchdog
190 * @cs: clocksource to be marked unstable
192 * This function is called by the x86 TSC code to mark clocksources as unstable;
193 * it defers demotion and re-selection to a kthread.
195 void clocksource_mark_unstable(struct clocksource *cs)
199 spin_lock_irqsave(&watchdog_lock, flags);
200 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
201 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
202 list_add(&cs->wd_list, &watchdog_list);
203 __clocksource_unstable(cs);
205 spin_unlock_irqrestore(&watchdog_lock, flags);
208 ulong max_cswd_read_retries = 2;
209 module_param(max_cswd_read_retries, ulong, 0644);
210 EXPORT_SYMBOL_GPL(max_cswd_read_retries);
211 static int verify_n_cpus = 8;
212 module_param(verify_n_cpus, int, 0644);
214 enum wd_read_status {
220 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
222 unsigned int nretries;
223 u64 wd_end, wd_end2, wd_delta;
224 int64_t wd_delay, wd_seq_delay;
226 for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
228 *wdnow = watchdog->read(watchdog);
229 *csnow = cs->read(cs);
230 wd_end = watchdog->read(watchdog);
231 wd_end2 = watchdog->read(watchdog);
234 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
235 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
237 if (wd_delay <= WATCHDOG_MAX_SKEW) {
238 if (nretries > 1 || nretries >= max_cswd_read_retries) {
239 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
240 smp_processor_id(), watchdog->name, nretries);
242 return WD_READ_SUCCESS;
246 * Now compute delay in consecutive watchdog read to see if
247 * there is too much external interferences that cause
248 * significant delay in reading both clocksource and watchdog.
250 * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2,
251 * report system busy, reinit the watchdog and skip the current
254 wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
255 wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
256 if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
260 pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
261 smp_processor_id(), watchdog->name, wd_delay, nretries);
262 return WD_READ_UNSTABLE;
265 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
266 smp_processor_id(), watchdog->name, wd_seq_delay);
267 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
272 static u64 csnow_mid;
273 static cpumask_t cpus_ahead;
274 static cpumask_t cpus_behind;
275 static cpumask_t cpus_chosen;
277 static void clocksource_verify_choose_cpus(void)
279 int cpu, i, n = verify_n_cpus;
282 /* Check all of the CPUs. */
283 cpumask_copy(&cpus_chosen, cpu_online_mask);
284 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
288 /* If no checking desired, or no other CPU to check, leave. */
289 cpumask_clear(&cpus_chosen);
290 if (n == 0 || num_online_cpus() <= 1)
293 /* Make sure to select at least one CPU other than the current CPU. */
294 cpu = cpumask_first(cpu_online_mask);
295 if (cpu == smp_processor_id())
296 cpu = cpumask_next(cpu, cpu_online_mask);
297 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
299 cpumask_set_cpu(cpu, &cpus_chosen);
301 /* Force a sane value for the boot parameter. */
306 * Randomly select the specified number of CPUs. If the same
307 * CPU is selected multiple times, that CPU is checked only once,
308 * and no replacement CPU is selected. This gracefully handles
309 * situations where verify_n_cpus is greater than the number of
310 * CPUs that are currently online.
312 for (i = 1; i < n; i++) {
313 cpu = prandom_u32_max(nr_cpu_ids);
314 cpu = cpumask_next(cpu - 1, cpu_online_mask);
315 if (cpu >= nr_cpu_ids)
316 cpu = cpumask_first(cpu_online_mask);
317 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
318 cpumask_set_cpu(cpu, &cpus_chosen);
321 /* Don't verify ourselves. */
322 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
325 static void clocksource_verify_one_cpu(void *csin)
327 struct clocksource *cs = (struct clocksource *)csin;
329 csnow_mid = cs->read(cs);
332 void clocksource_verify_percpu(struct clocksource *cs)
334 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
335 u64 csnow_begin, csnow_end;
339 if (verify_n_cpus == 0)
341 cpumask_clear(&cpus_ahead);
342 cpumask_clear(&cpus_behind);
345 clocksource_verify_choose_cpus();
346 if (cpumask_empty(&cpus_chosen)) {
349 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
352 testcpu = smp_processor_id();
353 pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
354 for_each_cpu(cpu, &cpus_chosen) {
357 csnow_begin = cs->read(cs);
358 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
359 csnow_end = cs->read(cs);
360 delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
362 cpumask_set_cpu(cpu, &cpus_behind);
363 delta = (csnow_end - csnow_mid) & cs->mask;
365 cpumask_set_cpu(cpu, &cpus_ahead);
366 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
367 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
368 if (cs_nsec > cs_nsec_max)
369 cs_nsec_max = cs_nsec;
370 if (cs_nsec < cs_nsec_min)
371 cs_nsec_min = cs_nsec;
375 if (!cpumask_empty(&cpus_ahead))
376 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
377 cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
378 if (!cpumask_empty(&cpus_behind))
379 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
380 cpumask_pr_args(&cpus_behind), testcpu, cs->name);
381 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
382 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
383 testcpu, cs_nsec_min, cs_nsec_max, cs->name);
385 EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
387 static inline void clocksource_reset_watchdog(void)
389 struct clocksource *cs;
391 list_for_each_entry(cs, &watchdog_list, wd_list)
392 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
396 static void clocksource_watchdog(struct timer_list *unused)
398 u64 csnow, wdnow, cslast, wdlast, delta;
399 int next_cpu, reset_pending;
400 int64_t wd_nsec, cs_nsec;
401 struct clocksource *cs;
402 enum wd_read_status read_ret;
403 unsigned long extra_wait = 0;
406 spin_lock(&watchdog_lock);
407 if (!watchdog_running)
410 reset_pending = atomic_read(&watchdog_reset_pending);
412 list_for_each_entry(cs, &watchdog_list, wd_list) {
414 /* Clocksource already marked unstable? */
415 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
416 if (finished_booting)
417 schedule_work(&watchdog_work);
421 read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
423 if (read_ret == WD_READ_UNSTABLE) {
424 /* Clock readout unreliable, so give it up. */
425 __clocksource_unstable(cs);
430 * When WD_READ_SKIP is returned, it means the system is likely
431 * under very heavy load, where the latency of reading
432 * watchdog/clocksource is very big, and affect the accuracy of
433 * watchdog check. So give system some space and suspend the
434 * watchdog check for 5 minutes.
436 if (read_ret == WD_READ_SKIP) {
438 * As the watchdog timer will be suspended, and
439 * cs->last could keep unchanged for 5 minutes, reset
442 clocksource_reset_watchdog();
443 extra_wait = HZ * 300;
447 /* Clocksource initialized ? */
448 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
449 atomic_read(&watchdog_reset_pending)) {
450 cs->flags |= CLOCK_SOURCE_WATCHDOG;
456 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
457 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
460 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
461 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
462 wdlast = cs->wd_last; /* save these in case we print them */
463 cslast = cs->cs_last;
467 if (atomic_read(&watchdog_reset_pending))
470 /* Check the deviation from the watchdog clocksource. */
471 md = cs->uncertainty_margin + watchdog->uncertainty_margin;
472 if (abs(cs_nsec - wd_nsec) > md) {
473 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
474 smp_processor_id(), cs->name);
475 pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
476 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
477 pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
478 cs->name, cs_nsec, csnow, cslast, cs->mask);
479 if (curr_clocksource == cs)
480 pr_warn(" '%s' is current clocksource.\n", cs->name);
481 else if (curr_clocksource)
482 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
484 pr_warn(" No current clocksource.\n");
485 __clocksource_unstable(cs);
489 if (cs == curr_clocksource && cs->tick_stable)
492 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
493 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
494 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
495 /* Mark it valid for high-res. */
496 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
499 * clocksource_done_booting() will sort it if
500 * finished_booting is not set yet.
502 if (!finished_booting)
506 * If this is not the current clocksource let
507 * the watchdog thread reselect it. Due to the
508 * change to high res this clocksource might
509 * be preferred now. If it is the current
510 * clocksource let the tick code know about
513 if (cs != curr_clocksource) {
514 cs->flags |= CLOCK_SOURCE_RESELECT;
515 schedule_work(&watchdog_work);
523 * We only clear the watchdog_reset_pending, when we did a
524 * full cycle through all clocksources.
527 atomic_dec(&watchdog_reset_pending);
530 * Cycle through CPUs to check if the CPUs stay synchronized
533 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
534 if (next_cpu >= nr_cpu_ids)
535 next_cpu = cpumask_first(cpu_online_mask);
538 * Arm timer if not already pending: could race with concurrent
539 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
541 if (!timer_pending(&watchdog_timer)) {
542 watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
543 add_timer_on(&watchdog_timer, next_cpu);
546 spin_unlock(&watchdog_lock);
549 static inline void clocksource_start_watchdog(void)
551 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
553 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
554 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
555 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
556 watchdog_running = 1;
559 static inline void clocksource_stop_watchdog(void)
561 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
563 del_timer(&watchdog_timer);
564 watchdog_running = 0;
567 static void clocksource_resume_watchdog(void)
569 atomic_inc(&watchdog_reset_pending);
572 static void clocksource_enqueue_watchdog(struct clocksource *cs)
574 INIT_LIST_HEAD(&cs->wd_list);
576 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
577 /* cs is a clocksource to be watched. */
578 list_add(&cs->wd_list, &watchdog_list);
579 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
581 /* cs is a watchdog. */
582 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
583 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
587 static void clocksource_select_watchdog(bool fallback)
589 struct clocksource *cs, *old_wd;
592 spin_lock_irqsave(&watchdog_lock, flags);
593 /* save current watchdog */
598 list_for_each_entry(cs, &clocksource_list, list) {
599 /* cs is a clocksource to be watched. */
600 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
603 /* Skip current if we were requested for a fallback. */
604 if (fallback && cs == old_wd)
607 /* Pick the best watchdog. */
608 if (!watchdog || cs->rating > watchdog->rating)
611 /* If we failed to find a fallback restore the old one. */
615 /* If we changed the watchdog we need to reset cycles. */
616 if (watchdog != old_wd)
617 clocksource_reset_watchdog();
619 /* Check if the watchdog timer needs to be started. */
620 clocksource_start_watchdog();
621 spin_unlock_irqrestore(&watchdog_lock, flags);
624 static void clocksource_dequeue_watchdog(struct clocksource *cs)
626 if (cs != watchdog) {
627 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
628 /* cs is a watched clocksource. */
629 list_del_init(&cs->wd_list);
630 /* Check if the watchdog timer needs to be stopped. */
631 clocksource_stop_watchdog();
636 static int __clocksource_watchdog_kthread(void)
638 struct clocksource *cs, *tmp;
642 /* Do any required per-CPU skew verification. */
643 if (curr_clocksource &&
644 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
645 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
646 clocksource_verify_percpu(curr_clocksource);
648 spin_lock_irqsave(&watchdog_lock, flags);
649 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
650 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
651 list_del_init(&cs->wd_list);
652 __clocksource_change_rating(cs, 0);
655 if (cs->flags & CLOCK_SOURCE_RESELECT) {
656 cs->flags &= ~CLOCK_SOURCE_RESELECT;
660 /* Check if the watchdog timer needs to be stopped. */
661 clocksource_stop_watchdog();
662 spin_unlock_irqrestore(&watchdog_lock, flags);
667 static int clocksource_watchdog_kthread(void *data)
669 mutex_lock(&clocksource_mutex);
670 if (__clocksource_watchdog_kthread())
671 clocksource_select();
672 mutex_unlock(&clocksource_mutex);
676 static bool clocksource_is_watchdog(struct clocksource *cs)
678 return cs == watchdog;
681 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
683 static void clocksource_enqueue_watchdog(struct clocksource *cs)
685 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
686 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
689 static void clocksource_select_watchdog(bool fallback) { }
690 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
691 static inline void clocksource_resume_watchdog(void) { }
692 static inline int __clocksource_watchdog_kthread(void) { return 0; }
693 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
694 void clocksource_mark_unstable(struct clocksource *cs) { }
696 static inline void clocksource_watchdog_lock(unsigned long *flags) { }
697 static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
699 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
701 static bool clocksource_is_suspend(struct clocksource *cs)
703 return cs == suspend_clocksource;
706 static void __clocksource_suspend_select(struct clocksource *cs)
709 * Skip the clocksource which will be stopped in suspend state.
711 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
715 * The nonstop clocksource can be selected as the suspend clocksource to
716 * calculate the suspend time, so it should not supply suspend/resume
717 * interfaces to suspend the nonstop clocksource when system suspends.
719 if (cs->suspend || cs->resume) {
720 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
724 /* Pick the best rating. */
725 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
726 suspend_clocksource = cs;
730 * clocksource_suspend_select - Select the best clocksource for suspend timing
731 * @fallback: if select a fallback clocksource
733 static void clocksource_suspend_select(bool fallback)
735 struct clocksource *cs, *old_suspend;
737 old_suspend = suspend_clocksource;
739 suspend_clocksource = NULL;
741 list_for_each_entry(cs, &clocksource_list, list) {
742 /* Skip current if we were requested for a fallback. */
743 if (fallback && cs == old_suspend)
746 __clocksource_suspend_select(cs);
751 * clocksource_start_suspend_timing - Start measuring the suspend timing
752 * @cs: current clocksource from timekeeping
753 * @start_cycles: current cycles from timekeeping
755 * This function will save the start cycle values of suspend timer to calculate
756 * the suspend time when resuming system.
758 * This function is called late in the suspend process from timekeeping_suspend(),
759 * that means processes are frozen, non-boot cpus and interrupts are disabled
760 * now. It is therefore possible to start the suspend timer without taking the
763 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
765 if (!suspend_clocksource)
769 * If current clocksource is the suspend timer, we should use the
770 * tkr_mono.cycle_last value as suspend_start to avoid same reading
771 * from suspend timer.
773 if (clocksource_is_suspend(cs)) {
774 suspend_start = start_cycles;
778 if (suspend_clocksource->enable &&
779 suspend_clocksource->enable(suspend_clocksource)) {
780 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
784 suspend_start = suspend_clocksource->read(suspend_clocksource);
788 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
789 * @cs: current clocksource from timekeeping
790 * @cycle_now: current cycles from timekeeping
792 * This function will calculate the suspend time from suspend timer.
794 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
796 * This function is called early in the resume process from timekeeping_resume(),
797 * that means there is only one cpu, no processes are running and the interrupts
798 * are disabled. It is therefore possible to stop the suspend timer without
799 * taking the clocksource mutex.
801 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
803 u64 now, delta, nsec = 0;
805 if (!suspend_clocksource)
809 * If current clocksource is the suspend timer, we should use the
810 * tkr_mono.cycle_last value from timekeeping as current cycle to
811 * avoid same reading from suspend timer.
813 if (clocksource_is_suspend(cs))
816 now = suspend_clocksource->read(suspend_clocksource);
818 if (now > suspend_start) {
819 delta = clocksource_delta(now, suspend_start,
820 suspend_clocksource->mask);
821 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
822 suspend_clocksource->shift);
826 * Disable the suspend timer to save power if current clocksource is
827 * not the suspend timer.
829 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
830 suspend_clocksource->disable(suspend_clocksource);
836 * clocksource_suspend - suspend the clocksource(s)
838 void clocksource_suspend(void)
840 struct clocksource *cs;
842 list_for_each_entry_reverse(cs, &clocksource_list, list)
848 * clocksource_resume - resume the clocksource(s)
850 void clocksource_resume(void)
852 struct clocksource *cs;
854 list_for_each_entry(cs, &clocksource_list, list)
858 clocksource_resume_watchdog();
862 * clocksource_touch_watchdog - Update watchdog
864 * Update the watchdog after exception contexts such as kgdb so as not
865 * to incorrectly trip the watchdog. This might fail when the kernel
866 * was stopped in code which holds watchdog_lock.
868 void clocksource_touch_watchdog(void)
870 clocksource_resume_watchdog();
874 * clocksource_max_adjustment- Returns max adjustment amount
875 * @cs: Pointer to clocksource
878 static u32 clocksource_max_adjustment(struct clocksource *cs)
882 * We won't try to correct for more than 11% adjustments (110,000 ppm),
884 ret = (u64)cs->mult * 11;
890 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
891 * @mult: cycle to nanosecond multiplier
892 * @shift: cycle to nanosecond divisor (power of two)
893 * @maxadj: maximum adjustment value to mult (~11%)
894 * @mask: bitmask for two's complement subtraction of non 64 bit counters
895 * @max_cyc: maximum cycle value before potential overflow (does not include
898 * NOTE: This function includes a safety margin of 50%, in other words, we
899 * return half the number of nanoseconds the hardware counter can technically
900 * cover. This is done so that we can potentially detect problems caused by
901 * delayed timers or bad hardware, which might result in time intervals that
902 * are larger than what the math used can handle without overflows.
904 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
906 u64 max_nsecs, max_cycles;
909 * Calculate the maximum number of cycles that we can pass to the
910 * cyc2ns() function without overflowing a 64-bit result.
912 max_cycles = ULLONG_MAX;
913 do_div(max_cycles, mult+maxadj);
916 * The actual maximum number of cycles we can defer the clocksource is
917 * determined by the minimum of max_cycles and mask.
918 * Note: Here we subtract the maxadj to make sure we don't sleep for
919 * too long if there's a large negative adjustment.
921 max_cycles = min(max_cycles, mask);
922 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
924 /* return the max_cycles value as well if requested */
926 *max_cyc = max_cycles;
928 /* Return 50% of the actual maximum, so we can detect bad values */
935 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
936 * @cs: Pointer to clocksource to be updated
939 static inline void clocksource_update_max_deferment(struct clocksource *cs)
941 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
942 cs->maxadj, cs->mask,
946 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
948 struct clocksource *cs;
950 if (!finished_booting || list_empty(&clocksource_list))
954 * We pick the clocksource with the highest rating. If oneshot
955 * mode is active, we pick the highres valid clocksource with
958 list_for_each_entry(cs, &clocksource_list, list) {
959 if (skipcur && cs == curr_clocksource)
961 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
968 static void __clocksource_select(bool skipcur)
970 bool oneshot = tick_oneshot_mode_active();
971 struct clocksource *best, *cs;
973 /* Find the best suitable clocksource */
974 best = clocksource_find_best(oneshot, skipcur);
978 if (!strlen(override_name))
981 /* Check for the override clocksource. */
982 list_for_each_entry(cs, &clocksource_list, list) {
983 if (skipcur && cs == curr_clocksource)
985 if (strcmp(cs->name, override_name) != 0)
988 * Check to make sure we don't switch to a non-highres
989 * capable clocksource if the tick code is in oneshot
990 * mode (highres or nohz)
992 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
993 /* Override clocksource cannot be used. */
994 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
995 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
997 override_name[0] = 0;
1000 * The override cannot be currently verified.
1001 * Deferring to let the watchdog check.
1003 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1007 /* Override clocksource can be used. */
1013 if (curr_clocksource != best && !timekeeping_notify(best)) {
1014 pr_info("Switched to clocksource %s\n", best->name);
1015 curr_clocksource = best;
1020 * clocksource_select - Select the best clocksource available
1022 * Private function. Must hold clocksource_mutex when called.
1024 * Select the clocksource with the best rating, or the clocksource,
1025 * which is selected by userspace override.
1027 static void clocksource_select(void)
1029 __clocksource_select(false);
1032 static void clocksource_select_fallback(void)
1034 __clocksource_select(true);
1038 * clocksource_done_booting - Called near the end of core bootup
1040 * Hack to avoid lots of clocksource churn at boot time.
1041 * We use fs_initcall because we want this to start before
1042 * device_initcall but after subsys_initcall.
1044 static int __init clocksource_done_booting(void)
1046 mutex_lock(&clocksource_mutex);
1047 curr_clocksource = clocksource_default_clock();
1048 finished_booting = 1;
1050 * Run the watchdog first to eliminate unstable clock sources
1052 __clocksource_watchdog_kthread();
1053 clocksource_select();
1054 mutex_unlock(&clocksource_mutex);
1057 fs_initcall(clocksource_done_booting);
1060 * Enqueue the clocksource sorted by rating
1062 static void clocksource_enqueue(struct clocksource *cs)
1064 struct list_head *entry = &clocksource_list;
1065 struct clocksource *tmp;
1067 list_for_each_entry(tmp, &clocksource_list, list) {
1068 /* Keep track of the place, where to insert */
1069 if (tmp->rating < cs->rating)
1073 list_add(&cs->list, entry);
1077 * __clocksource_update_freq_scale - Used update clocksource with new freq
1078 * @cs: clocksource to be registered
1079 * @scale: Scale factor multiplied against freq to get clocksource hz
1080 * @freq: clocksource frequency (cycles per second) divided by scale
1082 * This should only be called from the clocksource->enable() method.
1084 * This *SHOULD NOT* be called directly! Please use the
1085 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1088 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1093 * Default clocksources are *special* and self-define their mult/shift.
1094 * But, you're not special, so you should specify a freq value.
1098 * Calc the maximum number of seconds which we can run before
1099 * wrapping around. For clocksources which have a mask > 32-bit
1100 * we need to limit the max sleep time to have a good
1101 * conversion precision. 10 minutes is still a reasonable
1102 * amount. That results in a shift value of 24 for a
1103 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1104 * ~ 0.06ppm granularity for NTP.
1111 else if (sec > 600 && cs->mask > UINT_MAX)
1114 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1115 NSEC_PER_SEC / scale, sec * scale);
1119 * If the uncertainty margin is not specified, calculate it.
1120 * If both scale and freq are non-zero, calculate the clock
1121 * period, but bound below at 2*WATCHDOG_MAX_SKEW. However,
1122 * if either of scale or freq is zero, be very conservative and
1123 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
1124 * uncertainty margin. Allow stupidly small uncertainty margins
1125 * to be specified by the caller for testing purposes, but warn
1126 * to discourage production use of this capability.
1128 if (scale && freq && !cs->uncertainty_margin) {
1129 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1130 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1131 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1132 } else if (!cs->uncertainty_margin) {
1133 cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1135 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1138 * Ensure clocksources that have large 'mult' values don't overflow
1141 cs->maxadj = clocksource_max_adjustment(cs);
1142 while (freq && ((cs->mult + cs->maxadj < cs->mult)
1143 || (cs->mult - cs->maxadj > cs->mult))) {
1146 cs->maxadj = clocksource_max_adjustment(cs);
1150 * Only warn for *special* clocksources that self-define
1151 * their mult/shift values and don't specify a freq.
1153 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1154 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1157 clocksource_update_max_deferment(cs);
1159 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1160 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1162 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1165 * __clocksource_register_scale - Used to install new clocksources
1166 * @cs: clocksource to be registered
1167 * @scale: Scale factor multiplied against freq to get clocksource hz
1168 * @freq: clocksource frequency (cycles per second) divided by scale
1170 * Returns -EBUSY if registration fails, zero otherwise.
1172 * This *SHOULD NOT* be called directly! Please use the
1173 * clocksource_register_hz() or clocksource_register_khz helper functions.
1175 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1177 unsigned long flags;
1179 clocksource_arch_init(cs);
1181 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1182 cs->id = CSID_GENERIC;
1183 if (cs->vdso_clock_mode < 0 ||
1184 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1185 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1186 cs->name, cs->vdso_clock_mode);
1187 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1190 /* Initialize mult/shift and max_idle_ns */
1191 __clocksource_update_freq_scale(cs, scale, freq);
1193 /* Add clocksource to the clocksource list */
1194 mutex_lock(&clocksource_mutex);
1196 clocksource_watchdog_lock(&flags);
1197 clocksource_enqueue(cs);
1198 clocksource_enqueue_watchdog(cs);
1199 clocksource_watchdog_unlock(&flags);
1201 clocksource_select();
1202 clocksource_select_watchdog(false);
1203 __clocksource_suspend_select(cs);
1204 mutex_unlock(&clocksource_mutex);
1207 EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1209 static void __clocksource_change_rating(struct clocksource *cs, int rating)
1211 list_del(&cs->list);
1212 cs->rating = rating;
1213 clocksource_enqueue(cs);
1217 * clocksource_change_rating - Change the rating of a registered clocksource
1218 * @cs: clocksource to be changed
1219 * @rating: new rating
1221 void clocksource_change_rating(struct clocksource *cs, int rating)
1223 unsigned long flags;
1225 mutex_lock(&clocksource_mutex);
1226 clocksource_watchdog_lock(&flags);
1227 __clocksource_change_rating(cs, rating);
1228 clocksource_watchdog_unlock(&flags);
1230 clocksource_select();
1231 clocksource_select_watchdog(false);
1232 clocksource_suspend_select(false);
1233 mutex_unlock(&clocksource_mutex);
1235 EXPORT_SYMBOL(clocksource_change_rating);
1238 * Unbind clocksource @cs. Called with clocksource_mutex held
1240 static int clocksource_unbind(struct clocksource *cs)
1242 unsigned long flags;
1244 if (clocksource_is_watchdog(cs)) {
1245 /* Select and try to install a replacement watchdog. */
1246 clocksource_select_watchdog(true);
1247 if (clocksource_is_watchdog(cs))
1251 if (cs == curr_clocksource) {
1252 /* Select and try to install a replacement clock source */
1253 clocksource_select_fallback();
1254 if (curr_clocksource == cs)
1258 if (clocksource_is_suspend(cs)) {
1260 * Select and try to install a replacement suspend clocksource.
1261 * If no replacement suspend clocksource, we will just let the
1262 * clocksource go and have no suspend clocksource.
1264 clocksource_suspend_select(true);
1267 clocksource_watchdog_lock(&flags);
1268 clocksource_dequeue_watchdog(cs);
1269 list_del_init(&cs->list);
1270 clocksource_watchdog_unlock(&flags);
1276 * clocksource_unregister - remove a registered clocksource
1277 * @cs: clocksource to be unregistered
1279 int clocksource_unregister(struct clocksource *cs)
1283 mutex_lock(&clocksource_mutex);
1284 if (!list_empty(&cs->list))
1285 ret = clocksource_unbind(cs);
1286 mutex_unlock(&clocksource_mutex);
1289 EXPORT_SYMBOL(clocksource_unregister);
1293 * current_clocksource_show - sysfs interface for current clocksource
1296 * @buf: char buffer to be filled with clocksource list
1298 * Provides sysfs interface for listing current clocksource.
1300 static ssize_t current_clocksource_show(struct device *dev,
1301 struct device_attribute *attr,
1306 mutex_lock(&clocksource_mutex);
1307 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1308 mutex_unlock(&clocksource_mutex);
1313 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1317 /* strings from sysfs write are not 0 terminated! */
1318 if (!cnt || cnt >= CS_NAME_LEN)
1322 if (buf[cnt-1] == '\n')
1325 memcpy(dst, buf, cnt);
1331 * current_clocksource_store - interface for manually overriding clocksource
1334 * @buf: name of override clocksource
1335 * @count: length of buffer
1337 * Takes input from sysfs interface for manually overriding the default
1338 * clocksource selection.
1340 static ssize_t current_clocksource_store(struct device *dev,
1341 struct device_attribute *attr,
1342 const char *buf, size_t count)
1346 mutex_lock(&clocksource_mutex);
1348 ret = sysfs_get_uname(buf, override_name, count);
1350 clocksource_select();
1352 mutex_unlock(&clocksource_mutex);
1356 static DEVICE_ATTR_RW(current_clocksource);
1359 * unbind_clocksource_store - interface for manually unbinding clocksource
1363 * @count: length of buffer
1365 * Takes input from sysfs interface for manually unbinding a clocksource.
1367 static ssize_t unbind_clocksource_store(struct device *dev,
1368 struct device_attribute *attr,
1369 const char *buf, size_t count)
1371 struct clocksource *cs;
1372 char name[CS_NAME_LEN];
1375 ret = sysfs_get_uname(buf, name, count);
1380 mutex_lock(&clocksource_mutex);
1381 list_for_each_entry(cs, &clocksource_list, list) {
1382 if (strcmp(cs->name, name))
1384 ret = clocksource_unbind(cs);
1387 mutex_unlock(&clocksource_mutex);
1389 return ret ? ret : count;
1391 static DEVICE_ATTR_WO(unbind_clocksource);
1394 * available_clocksource_show - sysfs interface for listing clocksource
1397 * @buf: char buffer to be filled with clocksource list
1399 * Provides sysfs interface for listing registered clocksources
1401 static ssize_t available_clocksource_show(struct device *dev,
1402 struct device_attribute *attr,
1405 struct clocksource *src;
1408 mutex_lock(&clocksource_mutex);
1409 list_for_each_entry(src, &clocksource_list, list) {
1411 * Don't show non-HRES clocksource if the tick code is
1412 * in one shot mode (highres=on or nohz=on)
1414 if (!tick_oneshot_mode_active() ||
1415 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1416 count += snprintf(buf + count,
1417 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1420 mutex_unlock(&clocksource_mutex);
1422 count += snprintf(buf + count,
1423 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1427 static DEVICE_ATTR_RO(available_clocksource);
1429 static struct attribute *clocksource_attrs[] = {
1430 &dev_attr_current_clocksource.attr,
1431 &dev_attr_unbind_clocksource.attr,
1432 &dev_attr_available_clocksource.attr,
1435 ATTRIBUTE_GROUPS(clocksource);
1437 static struct bus_type clocksource_subsys = {
1438 .name = "clocksource",
1439 .dev_name = "clocksource",
1442 static struct device device_clocksource = {
1444 .bus = &clocksource_subsys,
1445 .groups = clocksource_groups,
1448 static int __init init_clocksource_sysfs(void)
1450 int error = subsys_system_register(&clocksource_subsys, NULL);
1453 error = device_register(&device_clocksource);
1458 device_initcall(init_clocksource_sysfs);
1459 #endif /* CONFIG_SYSFS */
1462 * boot_override_clocksource - boot clock override
1463 * @str: override name
1465 * Takes a clocksource= boot argument and uses it
1466 * as the clocksource override name.
1468 static int __init boot_override_clocksource(char* str)
1470 mutex_lock(&clocksource_mutex);
1472 strlcpy(override_name, str, sizeof(override_name));
1473 mutex_unlock(&clocksource_mutex);
1477 __setup("clocksource=", boot_override_clocksource);
1480 * boot_override_clock - Compatibility layer for deprecated boot option
1481 * @str: override name
1483 * DEPRECATED! Takes a clock= boot argument and uses it
1484 * as the clocksource override name
1486 static int __init boot_override_clock(char* str)
1488 if (!strcmp(str, "pmtmr")) {
1489 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1490 return boot_override_clocksource("acpi_pm");
1492 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1493 return boot_override_clocksource(str);
1496 __setup("clock=", boot_override_clock);