2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
23 * ####################################################################
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
30 * local_clock() -- is cpu_clock() on the current cpu.
34 * The implementation either uses sched_clock() when
35 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
36 * sched_clock() is assumed to provide these properties (mostly it means
37 * the architecture provides a globally synchronized highres time source).
39 * Otherwise it tries to create a semi stable clock from a mixture of other
42 * - GTOD (clock monotomic)
44 * - explicit idle events
46 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
47 * deltas are filtered to provide monotonicity and keeping it within an
50 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
51 * that is otherwise invisible (TSC gets stopped).
56 * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
57 * like cpufreq interrupts that can change the base clock (TSC) multiplier
58 * and cause funny jumps in time -- although the filtering provided by
59 * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
60 * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
63 #include <linux/spinlock.h>
64 #include <linux/hardirq.h>
65 #include <linux/module.h>
66 #include <linux/percpu.h>
67 #include <linux/ktime.h>
68 #include <linux/sched.h>
70 __read_mostly int sched_clock_running;
72 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
73 __read_mostly int sched_clock_stable;
75 struct sched_clock_data {
81 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
83 static inline struct sched_clock_data *this_scd(void)
85 return &__get_cpu_var(sched_clock_data);
88 static inline struct sched_clock_data *cpu_sdc(int cpu)
90 return &per_cpu(sched_clock_data, cpu);
93 void sched_clock_init(void)
95 u64 ktime_now = ktime_to_ns(ktime_get());
98 for_each_possible_cpu(cpu) {
99 struct sched_clock_data *scd = cpu_sdc(cpu);
102 scd->tick_gtod = ktime_now;
103 scd->clock = ktime_now;
106 sched_clock_running = 1;
110 * min, max except they take wrapping into account
113 static inline u64 wrap_min(u64 x, u64 y)
115 return (s64)(x - y) < 0 ? x : y;
118 static inline u64 wrap_max(u64 x, u64 y)
120 return (s64)(x - y) > 0 ? x : y;
124 * update the percpu scd from the raw @now value
126 * - filter out backward motion
127 * - use the GTOD tick value to create a window to filter crazy TSC values
129 static u64 sched_clock_local(struct sched_clock_data *scd)
131 u64 now, clock, old_clock, min_clock, max_clock;
136 delta = now - scd->tick_raw;
137 if (unlikely(delta < 0))
140 old_clock = scd->clock;
143 * scd->clock = clamp(scd->tick_gtod + delta,
144 * max(scd->tick_gtod, scd->clock),
145 * scd->tick_gtod + TICK_NSEC);
148 clock = scd->tick_gtod + delta;
149 min_clock = wrap_max(scd->tick_gtod, old_clock);
150 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
152 clock = wrap_max(clock, min_clock);
153 clock = wrap_min(clock, max_clock);
155 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
161 static u64 sched_clock_remote(struct sched_clock_data *scd)
163 struct sched_clock_data *my_scd = this_scd();
164 u64 this_clock, remote_clock;
165 u64 *ptr, old_val, val;
167 sched_clock_local(my_scd);
169 this_clock = my_scd->clock;
170 remote_clock = scd->clock;
173 * Use the opportunity that we have both locks
174 * taken to couple the two clocks: we take the
175 * larger time as the latest time for both
176 * runqueues. (this creates monotonic movement)
178 if (likely((s64)(remote_clock - this_clock) < 0)) {
180 old_val = remote_clock;
184 * Should be rare, but possible:
186 ptr = &my_scd->clock;
187 old_val = this_clock;
191 if (cmpxchg64(ptr, old_val, val) != old_val)
198 * Similar to cpu_clock(), but requires local IRQs to be disabled.
202 u64 sched_clock_cpu(int cpu)
204 struct sched_clock_data *scd;
207 WARN_ON_ONCE(!irqs_disabled());
209 if (sched_clock_stable)
210 return sched_clock();
212 if (unlikely(!sched_clock_running))
217 if (cpu != smp_processor_id())
218 clock = sched_clock_remote(scd);
220 clock = sched_clock_local(scd);
225 void sched_clock_tick(void)
227 struct sched_clock_data *scd;
230 if (sched_clock_stable)
233 if (unlikely(!sched_clock_running))
236 WARN_ON_ONCE(!irqs_disabled());
239 now_gtod = ktime_to_ns(ktime_get());
243 scd->tick_gtod = now_gtod;
244 sched_clock_local(scd);
248 * We are going deep-idle (irqs are disabled):
250 void sched_clock_idle_sleep_event(void)
252 sched_clock_cpu(smp_processor_id());
254 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
257 * We just idled delta nanoseconds (called with irqs disabled):
259 void sched_clock_idle_wakeup_event(u64 delta_ns)
261 if (timekeeping_suspended)
265 touch_softlockup_watchdog();
267 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
270 * As outlined at the top, provides a fast, high resolution, nanosecond
271 * time source that is monotonic per cpu argument and has bounded drift
274 * ######################### BIG FAT WARNING ##########################
275 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
276 * # go backwards !! #
277 * ####################################################################
279 u64 cpu_clock(int cpu)
284 local_irq_save(flags);
285 clock = sched_clock_cpu(cpu);
286 local_irq_restore(flags);
292 * Similar to cpu_clock() for the current cpu. Time will only be observed
293 * to be monotonic if care is taken to only compare timestampt taken on the
298 u64 local_clock(void)
303 local_irq_save(flags);
304 clock = sched_clock_cpu(smp_processor_id());
305 local_irq_restore(flags);
310 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
312 void sched_clock_init(void)
314 sched_clock_running = 1;
317 u64 sched_clock_cpu(int cpu)
319 if (unlikely(!sched_clock_running))
322 return sched_clock();
325 u64 cpu_clock(int cpu)
327 return sched_clock_cpu(cpu);
330 u64 local_clock(void)
332 return sched_clock_cpu(0);
335 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
337 EXPORT_SYMBOL_GPL(cpu_clock);
338 EXPORT_SYMBOL_GPL(local_clock);