2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
26 #include "tick-internal.h"
27 #include "ntp_internal.h"
28 #include "timekeeping_internal.h"
30 #define TK_CLEAR_NTP (1 << 0)
31 #define TK_MIRROR (1 << 1)
33 static struct timekeeper timekeeper;
34 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
35 static seqcount_t timekeeper_seq;
36 static struct timekeeper shadow_timekeeper;
38 /* flag for if timekeeping is suspended */
39 int __read_mostly timekeeping_suspended;
41 /* Flag for if there is a persistent clock on this platform */
42 bool __read_mostly persistent_clock_exist = false;
44 static inline void tk_normalize_xtime(struct timekeeper *tk)
46 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
47 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
52 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
54 tk->xtime_sec = ts->tv_sec;
55 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
58 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
60 tk->xtime_sec += ts->tv_sec;
61 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
62 tk_normalize_xtime(tk);
65 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
70 * Verify consistency of: offset_real = -wall_to_monotonic
71 * before modifying anything
73 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
74 -tk->wall_to_monotonic.tv_nsec);
75 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
76 tk->wall_to_monotonic = wtm;
77 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
78 tk->offs_real = timespec_to_ktime(tmp);
79 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
82 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
84 /* Verify consistency before modifying */
85 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
87 tk->total_sleep_time = t;
88 tk->offs_boot = timespec_to_ktime(t);
92 * timekeeper_setup_internals - Set up internals to use clocksource clock.
94 * @clock: Pointer to clocksource.
96 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
97 * pair and interval request.
99 * Unless you're the timekeeping code, you should not be using this!
101 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
104 u64 tmp, ntpinterval;
105 struct clocksource *old_clock;
107 old_clock = tk->clock;
109 tk->cycle_last = clock->cycle_last = clock->read(clock);
111 /* Do the ns -> cycle conversion first, using original mult */
112 tmp = NTP_INTERVAL_LENGTH;
113 tmp <<= clock->shift;
115 tmp += clock->mult/2;
116 do_div(tmp, clock->mult);
120 interval = (cycle_t) tmp;
121 tk->cycle_interval = interval;
123 /* Go back from cycles -> shifted ns */
124 tk->xtime_interval = (u64) interval * clock->mult;
125 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
127 ((u64) interval * clock->mult) >> clock->shift;
129 /* if changing clocks, convert xtime_nsec shift units */
131 int shift_change = clock->shift - old_clock->shift;
132 if (shift_change < 0)
133 tk->xtime_nsec >>= -shift_change;
135 tk->xtime_nsec <<= shift_change;
137 tk->shift = clock->shift;
140 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
143 * The timekeeper keeps its own mult values for the currently
144 * active clocksource. These value will be adjusted via NTP
145 * to counteract clock drifting.
147 tk->mult = clock->mult;
150 /* Timekeeper helper functions. */
152 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
153 u32 (*arch_gettimeoffset)(void);
155 u32 get_arch_timeoffset(void)
157 if (likely(arch_gettimeoffset))
158 return arch_gettimeoffset();
162 static inline u32 get_arch_timeoffset(void) { return 0; }
165 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
167 cycle_t cycle_now, cycle_delta;
168 struct clocksource *clock;
171 /* read clocksource: */
173 cycle_now = clock->read(clock);
175 /* calculate the delta since the last update_wall_time: */
176 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
178 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
181 /* If arch requires, add in get_arch_timeoffset() */
182 return nsec + get_arch_timeoffset();
185 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
187 cycle_t cycle_now, cycle_delta;
188 struct clocksource *clock;
191 /* read clocksource: */
193 cycle_now = clock->read(clock);
195 /* calculate the delta since the last update_wall_time: */
196 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
198 /* convert delta to nanoseconds. */
199 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
201 /* If arch requires, add in get_arch_timeoffset() */
202 return nsec + get_arch_timeoffset();
205 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
207 static void update_pvclock_gtod(struct timekeeper *tk)
209 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
213 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
215 int pvclock_gtod_register_notifier(struct notifier_block *nb)
217 struct timekeeper *tk = &timekeeper;
221 raw_spin_lock_irqsave(&timekeeper_lock, flags);
222 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
223 update_pvclock_gtod(tk);
224 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
228 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
231 * pvclock_gtod_unregister_notifier - unregister a pvclock
232 * timedata update listener
234 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
239 raw_spin_lock_irqsave(&timekeeper_lock, flags);
240 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
241 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
245 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
247 /* must hold timekeeper_lock */
248 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
250 if (action & TK_CLEAR_NTP) {
255 update_pvclock_gtod(tk);
257 if (action & TK_MIRROR)
258 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
262 * timekeeping_forward_now - update clock to the current time
264 * Forward the current clock to update its state since the last call to
265 * update_wall_time(). This is useful before significant clock changes,
266 * as it avoids having to deal with this time offset explicitly.
268 static void timekeeping_forward_now(struct timekeeper *tk)
270 cycle_t cycle_now, cycle_delta;
271 struct clocksource *clock;
275 cycle_now = clock->read(clock);
276 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
277 tk->cycle_last = clock->cycle_last = cycle_now;
279 tk->xtime_nsec += cycle_delta * tk->mult;
281 /* If arch requires, add in get_arch_timeoffset() */
282 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
284 tk_normalize_xtime(tk);
286 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
287 timespec_add_ns(&tk->raw_time, nsec);
291 * __getnstimeofday - Returns the time of day in a timespec.
292 * @ts: pointer to the timespec to be set
294 * Updates the time of day in the timespec.
295 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
297 int __getnstimeofday(struct timespec *ts)
299 struct timekeeper *tk = &timekeeper;
304 seq = read_seqcount_begin(&timekeeper_seq);
306 ts->tv_sec = tk->xtime_sec;
307 nsecs = timekeeping_get_ns(tk);
309 } while (read_seqcount_retry(&timekeeper_seq, seq));
312 timespec_add_ns(ts, nsecs);
315 * Do not bail out early, in case there were callers still using
316 * the value, even in the face of the WARN_ON.
318 if (unlikely(timekeeping_suspended))
322 EXPORT_SYMBOL(__getnstimeofday);
325 * getnstimeofday - Returns the time of day in a timespec.
326 * @ts: pointer to the timespec to be set
328 * Returns the time of day in a timespec (WARN if suspended).
330 void getnstimeofday(struct timespec *ts)
332 WARN_ON(__getnstimeofday(ts));
334 EXPORT_SYMBOL(getnstimeofday);
336 ktime_t ktime_get(void)
338 struct timekeeper *tk = &timekeeper;
342 WARN_ON(timekeeping_suspended);
345 seq = read_seqcount_begin(&timekeeper_seq);
346 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
347 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
349 } while (read_seqcount_retry(&timekeeper_seq, seq));
351 * Use ktime_set/ktime_add_ns to create a proper ktime on
352 * 32-bit architectures without CONFIG_KTIME_SCALAR.
354 return ktime_add_ns(ktime_set(secs, 0), nsecs);
356 EXPORT_SYMBOL_GPL(ktime_get);
359 * ktime_get_ts - get the monotonic clock in timespec format
360 * @ts: pointer to timespec variable
362 * The function calculates the monotonic clock from the realtime
363 * clock and the wall_to_monotonic offset and stores the result
364 * in normalized timespec format in the variable pointed to by @ts.
366 void ktime_get_ts(struct timespec *ts)
368 struct timekeeper *tk = &timekeeper;
369 struct timespec tomono;
373 WARN_ON(timekeeping_suspended);
376 seq = read_seqcount_begin(&timekeeper_seq);
377 ts->tv_sec = tk->xtime_sec;
378 nsec = timekeeping_get_ns(tk);
379 tomono = tk->wall_to_monotonic;
381 } while (read_seqcount_retry(&timekeeper_seq, seq));
383 ts->tv_sec += tomono.tv_sec;
385 timespec_add_ns(ts, nsec + tomono.tv_nsec);
387 EXPORT_SYMBOL_GPL(ktime_get_ts);
391 * timekeeping_clocktai - Returns the TAI time of day in a timespec
392 * @ts: pointer to the timespec to be set
394 * Returns the time of day in a timespec.
396 void timekeeping_clocktai(struct timespec *ts)
398 struct timekeeper *tk = &timekeeper;
402 WARN_ON(timekeeping_suspended);
405 seq = read_seqcount_begin(&timekeeper_seq);
407 ts->tv_sec = tk->xtime_sec + tk->tai_offset;
408 nsecs = timekeeping_get_ns(tk);
410 } while (read_seqcount_retry(&timekeeper_seq, seq));
413 timespec_add_ns(ts, nsecs);
416 EXPORT_SYMBOL(timekeeping_clocktai);
420 * ktime_get_clocktai - Returns the TAI time of day in a ktime
422 * Returns the time of day in a ktime.
424 ktime_t ktime_get_clocktai(void)
428 timekeeping_clocktai(&ts);
429 return timespec_to_ktime(ts);
431 EXPORT_SYMBOL(ktime_get_clocktai);
433 #ifdef CONFIG_NTP_PPS
436 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
437 * @ts_raw: pointer to the timespec to be set to raw monotonic time
438 * @ts_real: pointer to the timespec to be set to the time of day
440 * This function reads both the time of day and raw monotonic time at the
441 * same time atomically and stores the resulting timestamps in timespec
444 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
446 struct timekeeper *tk = &timekeeper;
448 s64 nsecs_raw, nsecs_real;
450 WARN_ON_ONCE(timekeeping_suspended);
453 seq = read_seqcount_begin(&timekeeper_seq);
455 *ts_raw = tk->raw_time;
456 ts_real->tv_sec = tk->xtime_sec;
457 ts_real->tv_nsec = 0;
459 nsecs_raw = timekeeping_get_ns_raw(tk);
460 nsecs_real = timekeeping_get_ns(tk);
462 } while (read_seqcount_retry(&timekeeper_seq, seq));
464 timespec_add_ns(ts_raw, nsecs_raw);
465 timespec_add_ns(ts_real, nsecs_real);
467 EXPORT_SYMBOL(getnstime_raw_and_real);
469 #endif /* CONFIG_NTP_PPS */
472 * do_gettimeofday - Returns the time of day in a timeval
473 * @tv: pointer to the timeval to be set
475 * NOTE: Users should be converted to using getnstimeofday()
477 void do_gettimeofday(struct timeval *tv)
481 getnstimeofday(&now);
482 tv->tv_sec = now.tv_sec;
483 tv->tv_usec = now.tv_nsec/1000;
485 EXPORT_SYMBOL(do_gettimeofday);
488 * do_settimeofday - Sets the time of day
489 * @tv: pointer to the timespec variable containing the new time
491 * Sets the time of day to the new time and update NTP and notify hrtimers
493 int do_settimeofday(const struct timespec *tv)
495 struct timekeeper *tk = &timekeeper;
496 struct timespec ts_delta, xt;
499 if (!timespec_valid_strict(tv))
502 raw_spin_lock_irqsave(&timekeeper_lock, flags);
503 write_seqcount_begin(&timekeeper_seq);
505 timekeeping_forward_now(tk);
508 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
509 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
511 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
513 tk_set_xtime(tk, tv);
515 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR);
517 write_seqcount_end(&timekeeper_seq);
518 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
520 /* signal hrtimers about time change */
525 EXPORT_SYMBOL(do_settimeofday);
528 * timekeeping_inject_offset - Adds or subtracts from the current time.
529 * @tv: pointer to the timespec variable containing the offset
531 * Adds or subtracts an offset value from the current time.
533 int timekeeping_inject_offset(struct timespec *ts)
535 struct timekeeper *tk = &timekeeper;
540 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
543 raw_spin_lock_irqsave(&timekeeper_lock, flags);
544 write_seqcount_begin(&timekeeper_seq);
546 timekeeping_forward_now(tk);
548 /* Make sure the proposed value is valid */
549 tmp = timespec_add(tk_xtime(tk), *ts);
550 if (!timespec_valid_strict(&tmp)) {
555 tk_xtime_add(tk, ts);
556 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
558 error: /* even if we error out, we forwarded the time, so call update */
559 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR);
561 write_seqcount_end(&timekeeper_seq);
562 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
564 /* signal hrtimers about time change */
569 EXPORT_SYMBOL(timekeeping_inject_offset);
573 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
576 s32 timekeeping_get_tai_offset(void)
578 struct timekeeper *tk = &timekeeper;
583 seq = read_seqcount_begin(&timekeeper_seq);
584 ret = tk->tai_offset;
585 } while (read_seqcount_retry(&timekeeper_seq, seq));
591 * __timekeeping_set_tai_offset - Lock free worker function
594 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
596 tk->tai_offset = tai_offset;
597 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
601 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
604 void timekeeping_set_tai_offset(s32 tai_offset)
606 struct timekeeper *tk = &timekeeper;
609 raw_spin_lock_irqsave(&timekeeper_lock, flags);
610 write_seqcount_begin(&timekeeper_seq);
611 __timekeeping_set_tai_offset(tk, tai_offset);
612 write_seqcount_end(&timekeeper_seq);
613 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
618 * change_clocksource - Swaps clocksources if a new one is available
620 * Accumulates current time interval and initializes new clocksource
622 static int change_clocksource(void *data)
624 struct timekeeper *tk = &timekeeper;
625 struct clocksource *new, *old;
628 new = (struct clocksource *) data;
630 raw_spin_lock_irqsave(&timekeeper_lock, flags);
631 write_seqcount_begin(&timekeeper_seq);
633 timekeeping_forward_now(tk);
635 * If the cs is in module, get a module reference. Succeeds
636 * for built-in code (owner == NULL) as well.
638 if (try_module_get(new->owner)) {
639 if (!new->enable || new->enable(new) == 0) {
641 tk_setup_internals(tk, new);
644 module_put(old->owner);
646 module_put(new->owner);
649 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR);
651 write_seqcount_end(&timekeeper_seq);
652 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
658 * timekeeping_notify - Install a new clock source
659 * @clock: pointer to the clock source
661 * This function is called from clocksource.c after a new, better clock
662 * source has been registered. The caller holds the clocksource_mutex.
664 int timekeeping_notify(struct clocksource *clock)
666 struct timekeeper *tk = &timekeeper;
668 if (tk->clock == clock)
670 stop_machine(change_clocksource, clock, NULL);
672 return tk->clock == clock ? 0 : -1;
676 * ktime_get_real - get the real (wall-) time in ktime_t format
678 * returns the time in ktime_t format
680 ktime_t ktime_get_real(void)
684 getnstimeofday(&now);
686 return timespec_to_ktime(now);
688 EXPORT_SYMBOL_GPL(ktime_get_real);
691 * getrawmonotonic - Returns the raw monotonic time in a timespec
692 * @ts: pointer to the timespec to be set
694 * Returns the raw monotonic time (completely un-modified by ntp)
696 void getrawmonotonic(struct timespec *ts)
698 struct timekeeper *tk = &timekeeper;
703 seq = read_seqcount_begin(&timekeeper_seq);
704 nsecs = timekeeping_get_ns_raw(tk);
707 } while (read_seqcount_retry(&timekeeper_seq, seq));
709 timespec_add_ns(ts, nsecs);
711 EXPORT_SYMBOL(getrawmonotonic);
714 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
716 int timekeeping_valid_for_hres(void)
718 struct timekeeper *tk = &timekeeper;
723 seq = read_seqcount_begin(&timekeeper_seq);
725 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
727 } while (read_seqcount_retry(&timekeeper_seq, seq));
733 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
735 u64 timekeeping_max_deferment(void)
737 struct timekeeper *tk = &timekeeper;
742 seq = read_seqcount_begin(&timekeeper_seq);
744 ret = tk->clock->max_idle_ns;
746 } while (read_seqcount_retry(&timekeeper_seq, seq));
752 * read_persistent_clock - Return time from the persistent clock.
754 * Weak dummy function for arches that do not yet support it.
755 * Reads the time from the battery backed persistent clock.
756 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
758 * XXX - Do be sure to remove it once all arches implement it.
760 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
767 * read_boot_clock - Return time of the system start.
769 * Weak dummy function for arches that do not yet support it.
770 * Function to read the exact time the system has been started.
771 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
773 * XXX - Do be sure to remove it once all arches implement it.
775 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
782 * timekeeping_init - Initializes the clocksource and common timekeeping values
784 void __init timekeeping_init(void)
786 struct timekeeper *tk = &timekeeper;
787 struct clocksource *clock;
789 struct timespec now, boot, tmp;
791 read_persistent_clock(&now);
793 if (!timespec_valid_strict(&now)) {
794 pr_warn("WARNING: Persistent clock returned invalid value!\n"
795 " Check your CMOS/BIOS settings.\n");
798 } else if (now.tv_sec || now.tv_nsec)
799 persistent_clock_exist = true;
801 read_boot_clock(&boot);
802 if (!timespec_valid_strict(&boot)) {
803 pr_warn("WARNING: Boot clock returned invalid value!\n"
804 " Check your CMOS/BIOS settings.\n");
809 raw_spin_lock_irqsave(&timekeeper_lock, flags);
810 write_seqcount_begin(&timekeeper_seq);
813 clock = clocksource_default_clock();
815 clock->enable(clock);
816 tk_setup_internals(tk, clock);
818 tk_set_xtime(tk, &now);
819 tk->raw_time.tv_sec = 0;
820 tk->raw_time.tv_nsec = 0;
821 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
824 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
825 tk_set_wall_to_mono(tk, tmp);
829 tk_set_sleep_time(tk, tmp);
831 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
833 write_seqcount_end(&timekeeper_seq);
834 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
837 /* time in seconds when suspend began */
838 static struct timespec timekeeping_suspend_time;
841 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
842 * @delta: pointer to a timespec delta value
844 * Takes a timespec offset measuring a suspend interval and properly
845 * adds the sleep offset to the timekeeping variables.
847 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
848 struct timespec *delta)
850 if (!timespec_valid_strict(delta)) {
851 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
852 "sleep delta value!\n");
855 tk_xtime_add(tk, delta);
856 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
857 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
858 tk_debug_account_sleep_time(delta);
862 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
863 * @delta: pointer to a timespec delta value
865 * This hook is for architectures that cannot support read_persistent_clock
866 * because their RTC/persistent clock is only accessible when irqs are enabled.
868 * This function should only be called by rtc_resume(), and allows
869 * a suspend offset to be injected into the timekeeping values.
871 void timekeeping_inject_sleeptime(struct timespec *delta)
873 struct timekeeper *tk = &timekeeper;
877 * Make sure we don't set the clock twice, as timekeeping_resume()
880 if (has_persistent_clock())
883 raw_spin_lock_irqsave(&timekeeper_lock, flags);
884 write_seqcount_begin(&timekeeper_seq);
886 timekeeping_forward_now(tk);
888 __timekeeping_inject_sleeptime(tk, delta);
890 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR);
892 write_seqcount_end(&timekeeper_seq);
893 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
895 /* signal hrtimers about time change */
900 * timekeeping_resume - Resumes the generic timekeeping subsystem.
902 * This is for the generic clocksource timekeeping.
903 * xtime/wall_to_monotonic/jiffies/etc are
904 * still managed by arch specific suspend/resume code.
906 static void timekeeping_resume(void)
908 struct timekeeper *tk = &timekeeper;
909 struct clocksource *clock = tk->clock;
911 struct timespec ts_new, ts_delta;
912 cycle_t cycle_now, cycle_delta;
913 bool suspendtime_found = false;
915 read_persistent_clock(&ts_new);
917 clockevents_resume();
918 clocksource_resume();
920 raw_spin_lock_irqsave(&timekeeper_lock, flags);
921 write_seqcount_begin(&timekeeper_seq);
924 * After system resumes, we need to calculate the suspended time and
925 * compensate it for the OS time. There are 3 sources that could be
926 * used: Nonstop clocksource during suspend, persistent clock and rtc
929 * One specific platform may have 1 or 2 or all of them, and the
930 * preference will be:
931 * suspend-nonstop clocksource -> persistent clock -> rtc
932 * The less preferred source will only be tried if there is no better
933 * usable source. The rtc part is handled separately in rtc core code.
935 cycle_now = clock->read(clock);
936 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
937 cycle_now > clock->cycle_last) {
938 u64 num, max = ULLONG_MAX;
939 u32 mult = clock->mult;
940 u32 shift = clock->shift;
943 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
946 * "cycle_delta * mutl" may cause 64 bits overflow, if the
947 * suspended time is too long. In that case we need do the
948 * 64 bits math carefully
951 if (cycle_delta > max) {
952 num = div64_u64(cycle_delta, max);
953 nsec = (((u64) max * mult) >> shift) * num;
954 cycle_delta -= num * max;
956 nsec += ((u64) cycle_delta * mult) >> shift;
958 ts_delta = ns_to_timespec(nsec);
959 suspendtime_found = true;
960 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
961 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
962 suspendtime_found = true;
965 if (suspendtime_found)
966 __timekeeping_inject_sleeptime(tk, &ts_delta);
968 /* Re-base the last cycle value */
969 tk->cycle_last = clock->cycle_last = cycle_now;
971 timekeeping_suspended = 0;
972 timekeeping_update(tk, TK_MIRROR);
973 write_seqcount_end(&timekeeper_seq);
974 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
976 touch_softlockup_watchdog();
978 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
980 /* Resume hrtimers */
984 static int timekeeping_suspend(void)
986 struct timekeeper *tk = &timekeeper;
988 struct timespec delta, delta_delta;
989 static struct timespec old_delta;
991 read_persistent_clock(&timekeeping_suspend_time);
993 raw_spin_lock_irqsave(&timekeeper_lock, flags);
994 write_seqcount_begin(&timekeeper_seq);
995 timekeeping_forward_now(tk);
996 timekeeping_suspended = 1;
999 * To avoid drift caused by repeated suspend/resumes,
1000 * which each can add ~1 second drift error,
1001 * try to compensate so the difference in system time
1002 * and persistent_clock time stays close to constant.
1004 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
1005 delta_delta = timespec_sub(delta, old_delta);
1006 if (abs(delta_delta.tv_sec) >= 2) {
1008 * if delta_delta is too large, assume time correction
1009 * has occured and set old_delta to the current delta.
1013 /* Otherwise try to adjust old_system to compensate */
1014 timekeeping_suspend_time =
1015 timespec_add(timekeeping_suspend_time, delta_delta);
1017 write_seqcount_end(&timekeeper_seq);
1018 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1020 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1021 clocksource_suspend();
1022 clockevents_suspend();
1027 /* sysfs resume/suspend bits for timekeeping */
1028 static struct syscore_ops timekeeping_syscore_ops = {
1029 .resume = timekeeping_resume,
1030 .suspend = timekeeping_suspend,
1033 static int __init timekeeping_init_ops(void)
1035 register_syscore_ops(&timekeeping_syscore_ops);
1039 device_initcall(timekeeping_init_ops);
1042 * If the error is already larger, we look ahead even further
1043 * to compensate for late or lost adjustments.
1045 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1046 s64 error, s64 *interval,
1050 u32 look_ahead, adj;
1054 * Use the current error value to determine how much to look ahead.
1055 * The larger the error the slower we adjust for it to avoid problems
1056 * with losing too many ticks, otherwise we would overadjust and
1057 * produce an even larger error. The smaller the adjustment the
1058 * faster we try to adjust for it, as lost ticks can do less harm
1059 * here. This is tuned so that an error of about 1 msec is adjusted
1060 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1062 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1063 error2 = abs(error2);
1064 for (look_ahead = 0; error2 > 0; look_ahead++)
1068 * Now calculate the error in (1 << look_ahead) ticks, but first
1069 * remove the single look ahead already included in the error.
1071 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1072 tick_error -= tk->xtime_interval >> 1;
1073 error = ((error - tick_error) >> look_ahead) + tick_error;
1075 /* Finally calculate the adjustment shift value. */
1080 *interval = -*interval;
1084 for (adj = 0; error > i; adj++)
1093 * Adjust the multiplier to reduce the error value,
1094 * this is optimized for the most common adjustments of -1,0,1,
1095 * for other values we can do a bit more work.
1097 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1099 s64 error, interval = tk->cycle_interval;
1103 * The point of this is to check if the error is greater than half
1106 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1108 * Note we subtract one in the shift, so that error is really error*2.
1109 * This "saves" dividing(shifting) interval twice, but keeps the
1110 * (error > interval) comparison as still measuring if error is
1111 * larger than half an interval.
1113 * Note: It does not "save" on aggravation when reading the code.
1115 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1116 if (error > interval) {
1118 * We now divide error by 4(via shift), which checks if
1119 * the error is greater than twice the interval.
1120 * If it is greater, we need a bigadjust, if its smaller,
1121 * we can adjust by 1.
1125 * XXX - In update_wall_time, we round up to the next
1126 * nanosecond, and store the amount rounded up into
1127 * the error. This causes the likely below to be unlikely.
1129 * The proper fix is to avoid rounding up by using
1130 * the high precision tk->xtime_nsec instead of
1131 * xtime.tv_nsec everywhere. Fixing this will take some
1134 if (likely(error <= interval))
1137 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1139 if (error < -interval) {
1140 /* See comment above, this is just switched for the negative */
1142 if (likely(error >= -interval)) {
1144 interval = -interval;
1147 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1154 if (unlikely(tk->clock->maxadj &&
1155 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1156 printk_once(KERN_WARNING
1157 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1158 tk->clock->name, (long)tk->mult + adj,
1159 (long)tk->clock->mult + tk->clock->maxadj);
1162 * So the following can be confusing.
1164 * To keep things simple, lets assume adj == 1 for now.
1166 * When adj != 1, remember that the interval and offset values
1167 * have been appropriately scaled so the math is the same.
1169 * The basic idea here is that we're increasing the multiplier
1170 * by one, this causes the xtime_interval to be incremented by
1171 * one cycle_interval. This is because:
1172 * xtime_interval = cycle_interval * mult
1173 * So if mult is being incremented by one:
1174 * xtime_interval = cycle_interval * (mult + 1)
1176 * xtime_interval = (cycle_interval * mult) + cycle_interval
1177 * Which can be shortened to:
1178 * xtime_interval += cycle_interval
1180 * So offset stores the non-accumulated cycles. Thus the current
1181 * time (in shifted nanoseconds) is:
1182 * now = (offset * adj) + xtime_nsec
1183 * Now, even though we're adjusting the clock frequency, we have
1184 * to keep time consistent. In other words, we can't jump back
1185 * in time, and we also want to avoid jumping forward in time.
1187 * So given the same offset value, we need the time to be the same
1188 * both before and after the freq adjustment.
1189 * now = (offset * adj_1) + xtime_nsec_1
1190 * now = (offset * adj_2) + xtime_nsec_2
1192 * (offset * adj_1) + xtime_nsec_1 =
1193 * (offset * adj_2) + xtime_nsec_2
1197 * (offset * adj_1) + xtime_nsec_1 =
1198 * (offset * (adj_1+1)) + xtime_nsec_2
1199 * (offset * adj_1) + xtime_nsec_1 =
1200 * (offset * adj_1) + offset + xtime_nsec_2
1201 * Canceling the sides:
1202 * xtime_nsec_1 = offset + xtime_nsec_2
1204 * xtime_nsec_2 = xtime_nsec_1 - offset
1205 * Which simplfies to:
1206 * xtime_nsec -= offset
1208 * XXX - TODO: Doc ntp_error calculation.
1211 tk->xtime_interval += interval;
1212 tk->xtime_nsec -= offset;
1213 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1217 * It may be possible that when we entered this function, xtime_nsec
1218 * was very small. Further, if we're slightly speeding the clocksource
1219 * in the code above, its possible the required corrective factor to
1220 * xtime_nsec could cause it to underflow.
1222 * Now, since we already accumulated the second, cannot simply roll
1223 * the accumulated second back, since the NTP subsystem has been
1224 * notified via second_overflow. So instead we push xtime_nsec forward
1225 * by the amount we underflowed, and add that amount into the error.
1227 * We'll correct this error next time through this function, when
1228 * xtime_nsec is not as small.
1230 if (unlikely((s64)tk->xtime_nsec < 0)) {
1231 s64 neg = -(s64)tk->xtime_nsec;
1233 tk->ntp_error += neg << tk->ntp_error_shift;
1239 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1241 * Helper function that accumulates a the nsecs greater then a second
1242 * from the xtime_nsec field to the xtime_secs field.
1243 * It also calls into the NTP code to handle leapsecond processing.
1246 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1248 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1250 while (tk->xtime_nsec >= nsecps) {
1253 tk->xtime_nsec -= nsecps;
1256 /* Figure out if its a leap sec and apply if needed */
1257 leap = second_overflow(tk->xtime_sec);
1258 if (unlikely(leap)) {
1261 tk->xtime_sec += leap;
1265 tk_set_wall_to_mono(tk,
1266 timespec_sub(tk->wall_to_monotonic, ts));
1268 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1270 clock_was_set_delayed();
1276 * logarithmic_accumulation - shifted accumulation of cycles
1278 * This functions accumulates a shifted interval of cycles into
1279 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1282 * Returns the unconsumed cycles.
1284 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1287 cycle_t interval = tk->cycle_interval << shift;
1290 /* If the offset is smaller then a shifted interval, do nothing */
1291 if (offset < interval)
1294 /* Accumulate one shifted interval */
1296 tk->cycle_last += interval;
1298 tk->xtime_nsec += tk->xtime_interval << shift;
1299 accumulate_nsecs_to_secs(tk);
1301 /* Accumulate raw time */
1302 raw_nsecs = (u64)tk->raw_interval << shift;
1303 raw_nsecs += tk->raw_time.tv_nsec;
1304 if (raw_nsecs >= NSEC_PER_SEC) {
1305 u64 raw_secs = raw_nsecs;
1306 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1307 tk->raw_time.tv_sec += raw_secs;
1309 tk->raw_time.tv_nsec = raw_nsecs;
1311 /* Accumulate error between NTP and clock interval */
1312 tk->ntp_error += ntp_tick_length() << shift;
1313 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1314 (tk->ntp_error_shift + shift);
1319 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1320 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1325 * Store only full nanoseconds into xtime_nsec after rounding
1326 * it up and add the remainder to the error difference.
1327 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1328 * by truncating the remainder in vsyscalls. However, it causes
1329 * additional work to be done in timekeeping_adjust(). Once
1330 * the vsyscall implementations are converted to use xtime_nsec
1331 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1332 * users are removed, this can be killed.
1334 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1335 tk->xtime_nsec -= remainder;
1336 tk->xtime_nsec += 1ULL << tk->shift;
1337 tk->ntp_error += remainder << tk->ntp_error_shift;
1341 #define old_vsyscall_fixup(tk)
1347 * update_wall_time - Uses the current clocksource to increment the wall time
1350 static void update_wall_time(void)
1352 struct clocksource *clock;
1353 struct timekeeper *real_tk = &timekeeper;
1354 struct timekeeper *tk = &shadow_timekeeper;
1356 int shift = 0, maxshift;
1357 unsigned long flags;
1359 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1361 /* Make sure we're fully resumed: */
1362 if (unlikely(timekeeping_suspended))
1365 clock = real_tk->clock;
1367 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1368 offset = real_tk->cycle_interval;
1370 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1373 /* Check if there's really nothing to do */
1374 if (offset < real_tk->cycle_interval)
1378 * With NO_HZ we may have to accumulate many cycle_intervals
1379 * (think "ticks") worth of time at once. To do this efficiently,
1380 * we calculate the largest doubling multiple of cycle_intervals
1381 * that is smaller than the offset. We then accumulate that
1382 * chunk in one go, and then try to consume the next smaller
1385 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1386 shift = max(0, shift);
1387 /* Bound shift to one less than what overflows tick_length */
1388 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1389 shift = min(shift, maxshift);
1390 while (offset >= tk->cycle_interval) {
1391 offset = logarithmic_accumulation(tk, offset, shift);
1392 if (offset < tk->cycle_interval<<shift)
1396 /* correct the clock when NTP error is too big */
1397 timekeeping_adjust(tk, offset);
1400 * XXX This can be killed once everyone converts
1401 * to the new update_vsyscall.
1403 old_vsyscall_fixup(tk);
1406 * Finally, make sure that after the rounding
1407 * xtime_nsec isn't larger than NSEC_PER_SEC
1409 accumulate_nsecs_to_secs(tk);
1411 write_seqcount_begin(&timekeeper_seq);
1412 /* Update clock->cycle_last with the new value */
1413 clock->cycle_last = tk->cycle_last;
1415 * Update the real timekeeper.
1417 * We could avoid this memcpy by switching pointers, but that
1418 * requires changes to all other timekeeper usage sites as
1419 * well, i.e. move the timekeeper pointer getter into the
1420 * spinlocked/seqcount protected sections. And we trade this
1421 * memcpy under the timekeeper_seq against one before we start
1424 memcpy(real_tk, tk, sizeof(*tk));
1425 timekeeping_update(real_tk, 0);
1426 write_seqcount_end(&timekeeper_seq);
1428 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1432 * getboottime - Return the real time of system boot.
1433 * @ts: pointer to the timespec to be set
1435 * Returns the wall-time of boot in a timespec.
1437 * This is based on the wall_to_monotonic offset and the total suspend
1438 * time. Calls to settimeofday will affect the value returned (which
1439 * basically means that however wrong your real time clock is at boot time,
1440 * you get the right time here).
1442 void getboottime(struct timespec *ts)
1444 struct timekeeper *tk = &timekeeper;
1445 struct timespec boottime = {
1446 .tv_sec = tk->wall_to_monotonic.tv_sec +
1447 tk->total_sleep_time.tv_sec,
1448 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1449 tk->total_sleep_time.tv_nsec
1452 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1454 EXPORT_SYMBOL_GPL(getboottime);
1457 * get_monotonic_boottime - Returns monotonic time since boot
1458 * @ts: pointer to the timespec to be set
1460 * Returns the monotonic time since boot in a timespec.
1462 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1463 * includes the time spent in suspend.
1465 void get_monotonic_boottime(struct timespec *ts)
1467 struct timekeeper *tk = &timekeeper;
1468 struct timespec tomono, sleep;
1472 WARN_ON(timekeeping_suspended);
1475 seq = read_seqcount_begin(&timekeeper_seq);
1476 ts->tv_sec = tk->xtime_sec;
1477 nsec = timekeeping_get_ns(tk);
1478 tomono = tk->wall_to_monotonic;
1479 sleep = tk->total_sleep_time;
1481 } while (read_seqcount_retry(&timekeeper_seq, seq));
1483 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1485 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1487 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1490 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1492 * Returns the monotonic time since boot in a ktime
1494 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1495 * includes the time spent in suspend.
1497 ktime_t ktime_get_boottime(void)
1501 get_monotonic_boottime(&ts);
1502 return timespec_to_ktime(ts);
1504 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1507 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1508 * @ts: pointer to the timespec to be converted
1510 void monotonic_to_bootbased(struct timespec *ts)
1512 struct timekeeper *tk = &timekeeper;
1514 *ts = timespec_add(*ts, tk->total_sleep_time);
1516 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1518 unsigned long get_seconds(void)
1520 struct timekeeper *tk = &timekeeper;
1522 return tk->xtime_sec;
1524 EXPORT_SYMBOL(get_seconds);
1526 struct timespec __current_kernel_time(void)
1528 struct timekeeper *tk = &timekeeper;
1530 return tk_xtime(tk);
1533 struct timespec current_kernel_time(void)
1535 struct timekeeper *tk = &timekeeper;
1536 struct timespec now;
1540 seq = read_seqcount_begin(&timekeeper_seq);
1543 } while (read_seqcount_retry(&timekeeper_seq, seq));
1547 EXPORT_SYMBOL(current_kernel_time);
1549 struct timespec get_monotonic_coarse(void)
1551 struct timekeeper *tk = &timekeeper;
1552 struct timespec now, mono;
1556 seq = read_seqcount_begin(&timekeeper_seq);
1559 mono = tk->wall_to_monotonic;
1560 } while (read_seqcount_retry(&timekeeper_seq, seq));
1562 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1563 now.tv_nsec + mono.tv_nsec);
1568 * Must hold jiffies_lock
1570 void do_timer(unsigned long ticks)
1572 jiffies_64 += ticks;
1574 calc_global_load(ticks);
1578 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1579 * and sleep offsets.
1580 * @xtim: pointer to timespec to be set with xtime
1581 * @wtom: pointer to timespec to be set with wall_to_monotonic
1582 * @sleep: pointer to timespec to be set with time in suspend
1584 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1585 struct timespec *wtom, struct timespec *sleep)
1587 struct timekeeper *tk = &timekeeper;
1591 seq = read_seqcount_begin(&timekeeper_seq);
1592 *xtim = tk_xtime(tk);
1593 *wtom = tk->wall_to_monotonic;
1594 *sleep = tk->total_sleep_time;
1595 } while (read_seqcount_retry(&timekeeper_seq, seq));
1598 #ifdef CONFIG_HIGH_RES_TIMERS
1600 * ktime_get_update_offsets - hrtimer helper
1601 * @offs_real: pointer to storage for monotonic -> realtime offset
1602 * @offs_boot: pointer to storage for monotonic -> boottime offset
1604 * Returns current monotonic time and updates the offsets
1605 * Called from hrtimer_interupt() or retrigger_next_event()
1607 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1610 struct timekeeper *tk = &timekeeper;
1616 seq = read_seqcount_begin(&timekeeper_seq);
1618 secs = tk->xtime_sec;
1619 nsecs = timekeeping_get_ns(tk);
1621 *offs_real = tk->offs_real;
1622 *offs_boot = tk->offs_boot;
1623 *offs_tai = tk->offs_tai;
1624 } while (read_seqcount_retry(&timekeeper_seq, seq));
1626 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1627 now = ktime_sub(now, *offs_real);
1633 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1635 ktime_t ktime_get_monotonic_offset(void)
1637 struct timekeeper *tk = &timekeeper;
1639 struct timespec wtom;
1642 seq = read_seqcount_begin(&timekeeper_seq);
1643 wtom = tk->wall_to_monotonic;
1644 } while (read_seqcount_retry(&timekeeper_seq, seq));
1646 return timespec_to_ktime(wtom);
1648 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1651 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1653 int do_adjtimex(struct timex *txc)
1655 struct timekeeper *tk = &timekeeper;
1656 unsigned long flags;
1661 /* Validate the data before disabling interrupts */
1662 ret = ntp_validate_timex(txc);
1666 if (txc->modes & ADJ_SETOFFSET) {
1667 struct timespec delta;
1668 delta.tv_sec = txc->time.tv_sec;
1669 delta.tv_nsec = txc->time.tv_usec;
1670 if (!(txc->modes & ADJ_NANO))
1671 delta.tv_nsec *= 1000;
1672 ret = timekeeping_inject_offset(&delta);
1677 getnstimeofday(&ts);
1679 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1680 write_seqcount_begin(&timekeeper_seq);
1682 orig_tai = tai = tk->tai_offset;
1683 ret = __do_adjtimex(txc, &ts, &tai);
1685 if (tai != orig_tai) {
1686 __timekeeping_set_tai_offset(tk, tai);
1687 clock_was_set_delayed();
1689 write_seqcount_end(&timekeeper_seq);
1690 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1695 #ifdef CONFIG_NTP_PPS
1697 * hardpps() - Accessor function to NTP __hardpps function
1699 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1701 unsigned long flags;
1703 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1704 write_seqcount_begin(&timekeeper_seq);
1706 __hardpps(phase_ts, raw_ts);
1708 write_seqcount_end(&timekeeper_seq);
1709 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1711 EXPORT_SYMBOL(hardpps);
1715 * xtime_update() - advances the timekeeping infrastructure
1716 * @ticks: number of ticks, that have elapsed since the last call.
1718 * Must be called with interrupts disabled.
1720 void xtime_update(unsigned long ticks)
1722 write_seqlock(&jiffies_lock);
1724 write_sequnlock(&jiffies_lock);