#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
#include <linux/tick.h>
#include <linux/kthread.h>
+#include <linux/kallsyms.h>
#include "tick-internal.h"
#include "timekeeping_internal.h"
u64 csnow, wdnow, cslast, wdlast, delta;
int64_t wd_nsec, cs_nsec;
int next_cpu, reset_pending;
+#ifdef CONFIG_IPIPE
+ u64 wdref;
+#endif
spin_lock(&watchdog_lock);
if (!watchdog_running)
continue;
}
+#ifdef CONFIG_IPIPE
+retry:
+#endif
local_irq_disable();
+#ifdef CONFIG_IPIPE
+ wdref = watchdog->read(watchdog);
+#endif
csnow = cs->read(cs);
wdnow = watchdog->read(watchdog);
local_irq_enable();
+#ifdef CONFIG_IPIPE
+ wd_nsec = clocksource_cyc2ns((wdnow - wdref) & watchdog->mask,
+ watchdog->mult, watchdog->shift);
+ if (wd_nsec > WATCHDOG_THRESHOLD)
+ goto retry;
+#endif
+
/* Clocksource initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
atomic_read(&watchdog_reset_pending)) {
}
fs_initcall(clocksource_done_booting);
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+unsigned long long __ipipe_cs_freq;
+EXPORT_SYMBOL_GPL(__ipipe_cs_freq);
+
+struct clocksource *__ipipe_cs;
+EXPORT_SYMBOL_GPL(__ipipe_cs);
+
+u64 (*__ipipe_cs_read)(struct clocksource *cs);
+u64 __ipipe_cs_last_tsc;
+u64 __ipipe_cs_mask;
+unsigned __ipipe_cs_lat = 0xffffffff;
+
+static void ipipe_check_clocksource(struct clocksource *cs)
+{
+ u64 (*cread)(struct clocksource *cs);
+ u64 lat, mask, saved;
+ unsigned long long freq;
+ unsigned long flags;
+ unsigned i;
+
+ if (cs->ipipe_read) {
+ mask = CLOCKSOURCE_MASK(64);
+ cread = cs->ipipe_read;
+ } else {
+ mask = cs->mask;
+ cread = cs->read;
+
+ if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) == 0)
+ return;
+
+ /*
+ * We only support masks such that cs->mask + 1 is a power of 2,
+ * 64 bits masks or masks lesser than 32 bits
+ */
+ if (mask != CLOCKSOURCE_MASK(64)
+ && ((mask & (mask + 1)) != 0 || mask > 0xffffffff))
+ return;
+ }
+
+ /*
+ * We prefer a clocksource with a better resolution than 1us
+ */
+ if (cs->shift <= 34) {
+ freq = 1000000000ULL << cs->shift;
+ do_div(freq, cs->mult);
+ } else {
+ freq = 1000000ULL << cs->shift;
+ do_div(freq, cs->mult);
+ freq *= 1000;
+ }
+ if (freq < 1000000)
+ return;
+
+ /* Measure the clocksource latency */
+ flags = hard_local_irq_save();
+ saved = __ipipe_cs_last_tsc;
+ lat = cread(cs);
+ for (i = 0; i < 10; i++)
+ cread(cs);
+ lat = cread(cs) - lat;
+ __ipipe_cs_last_tsc = saved;
+ hard_local_irq_restore(flags);
+ lat = (lat * cs->mult) >> cs->shift;
+ do_div(lat, i + 1);
+
+ if (!strcmp(cs->name, override_name))
+ goto skip_tests;
+
+ if (lat > __ipipe_cs_lat)
+ return;
+
+ if (__ipipe_cs && !strcmp(__ipipe_cs->name, override_name))
+ return;
+
+ skip_tests:
+ flags = hard_local_irq_save();
+ if (__ipipe_cs_last_tsc == 0) {
+ __ipipe_cs_lat = lat;
+ __ipipe_cs_freq = freq;
+ __ipipe_cs = cs;
+ __ipipe_cs_read = cread;
+ __ipipe_cs_mask = mask;
+ }
+ hard_local_irq_restore(flags);
+}
+#else /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+#define ipipe_check_clocksource(cs) do { }while (0)
+#endif /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+
/*
* Enqueue the clocksource sorted by rating
*/
entry = &tmp->list;
}
list_add(&cs->list, entry);
+
+ ipipe_check_clocksource(cs);
}
/**
#include <linux/kernel_stat.h>
#include <linux/export.h>
#include <linux/interrupt.h>
+#include <linux/ipipe.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
raw_spin_unlock_irq(&base->lock);
}
+#ifdef CONFIG_IPIPE
+
+void update_root_process_times(struct pt_regs *regs)
+{
+ int user_tick = user_mode(regs);
+
+ if (__ipipe_root_tick_p(regs)) {
+ update_process_times(user_tick);
+ return;
+ }
+
+ run_local_timers();
+ rcu_check_callbacks(user_tick);
+ run_posix_cpu_timers(current);
+}
+
+#endif
+
/*
* This function runs timers and the timer-tq in bottom half context.
*/