* @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties
* @vread: vsyscall based read
+ * @resume: resume function for the clocksource, if necessary
* @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore.
*/
u32 shift;
unsigned long flags;
cycle_t (*vread)(void);
+ void (*resume)(void);
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
extern int clocksource_register(struct clocksource*);
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
+extern void clocksource_resume(void);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
static struct timer_list watchdog_timer;
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
+static int watchdog_resumed;
+
/*
* Interval: 0.5sec Threshold: 0.0625s
*/
struct clocksource *cs, *tmp;
cycle_t csnow, wdnow;
int64_t wd_nsec, cs_nsec;
+ int resumed;
spin_lock(&watchdog_lock);
+ resumed = watchdog_resumed;
+ if (unlikely(resumed))
+ watchdog_resumed = 0;
+
wdnow = watchdog->read();
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
watchdog_last = wdnow;
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
csnow = cs->read();
+
+ if (unlikely(resumed)) {
+ cs->wd_last = csnow;
+ continue;
+ }
+
/* Initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
}
spin_unlock(&watchdog_lock);
}
+static void clocksource_resume_watchdog(void)
+{
+ spin_lock(&watchdog_lock);
+ watchdog_resumed = 1;
+ spin_unlock(&watchdog_lock);
+}
+
static void clocksource_check_watchdog(struct clocksource *cs)
{
struct clocksource *cse;
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
+
+static inline void clocksource_resume_watchdog(void) { }
#endif
/**
+ * clocksource_resume - resume the clocksource(s)
+ */
+void clocksource_resume(void)
+{
+ struct list_head *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *cs;
+
+ cs = list_entry(tmp, struct clocksource, list);
+ if (cs->resume)
+ cs->resume();
+ }
+
+ clocksource_resume_watchdog();
+
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+}
+
+/**
* clocksource_get_next - Returns the selected clocksource
*
*/