clocksource, clockevents: ipipe: enable pipelined ticks and timekeeping
authorPhilippe Gerum <rpm@xenomai.org>
Sun, 3 Dec 2017 10:55:42 +0000 (11:55 +0100)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 27 Apr 2018 09:21:34 +0000 (11:21 +0200)
include/linux/clockchips.h
include/linux/clocksource.h
include/linux/timekeeper_internal.h
include/linux/timekeeping.h
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer.c

index 8ae9a95ebf5b5053e576a5995fa4d58b57f20bb5..ab59237a85d76c686df2b1d0c55e87821a7327dc 100644 (file)
@@ -129,6 +129,15 @@ struct clock_event_device {
        const struct cpumask    *cpumask;
        struct list_head        list;
        struct module           *owner;
+
+#ifdef CONFIG_IPIPE
+       struct ipipe_timer      *ipipe_timer;
+       unsigned                ipipe_stolen;
+
+#define clockevent_ipipe_stolen(evt) ((evt)->ipipe_stolen)
+#else
+#define clockevent_ipipe_stolen(evt) (0)
+#endif /* !CONFIG_IPIPE */
 } ____cacheline_aligned;
 
 /* Helpers to verify state of a clockevent device */
index 7dff1963c185c9d2c85da84f5e89f96cc5a2b3b5..ffbdea2b2bea0de8f4c38cd2d7c8a01d2d13e5e5 100644 (file)
@@ -107,6 +107,9 @@ struct clocksource {
        u64 wd_last;
 #endif
        struct module *owner;
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+       u64 (*ipipe_read)(struct clocksource *cs);
+#endif /* CONFIG_IPIPE_WANT_CLOCKSOURCE */
 };
 
 /*
index 97154c61e5d2b3dc9e59f37abb187652f10bb8f7..ff8d7c7f62e12180d0161cd2a4836b81fe295faf 100644 (file)
@@ -135,7 +135,7 @@ extern void update_vsyscall_tz(void);
 #elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
 
 extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
-                               struct clocksource *c, u32 mult,
+                               struct clocksource *c, u32 mult, u32 shift,
                                u64 cycle_last);
 extern void update_vsyscall_tz(void);
 
index 0021575fe871133da5d67e2a5f09c85d0196fccd..33f2839069f71be0c34ea443b783b39c156546c6 100644 (file)
@@ -347,5 +347,13 @@ extern void read_boot_clock64(struct timespec64 *ts);
 extern int update_persistent_clock(struct timespec now);
 extern int update_persistent_clock64(struct timespec64 now);
 
+#ifdef CONFIG_IPIPE
+void update_root_process_times(struct pt_regs *regs);
+#else  /* !CONFIG_IPIPE */
+static inline void update_root_process_times(struct pt_regs *regs)
+{
+       update_process_times(user_mode(regs));
+}
+#endif /* CONFIG_IPIPE */
 
 #endif
index 4237e0744e26bd276de92ca083a44676c9ede240..fd6894afff6be6e883e46e11c70631576542b5cc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/smp.h>
 #include <linux/device.h>
+#include <linux/ipipe_tickdev.h>
 
 #include "tick-internal.h"
 
@@ -453,6 +454,8 @@ void clockevents_register_device(struct clock_event_device *dev)
        /* Initialize state to DETACHED */
        clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
 
+       ipipe_host_timer_register(dev);
+
        if (!dev->cpumask) {
                WARN_ON(num_possible_cpus() > 1);
                dev->cpumask = cpumask_of(smp_processor_id());
index 03918a19cf2da854bcefa9f8188daa53a7db82f4..7cdccd7d8426c32b4885d8f700d6ab5d6cd0c47b 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
 #include <linux/tick.h>
 #include <linux/kthread.h>
+#include <linux/kallsyms.h>
 
 #include "tick-internal.h"
 #include "timekeeping_internal.h"
@@ -177,6 +178,9 @@ static void clocksource_watchdog(unsigned long data)
        u64 csnow, wdnow, cslast, wdlast, delta;
        int64_t wd_nsec, cs_nsec;
        int next_cpu, reset_pending;
+#ifdef CONFIG_IPIPE
+       u64 wdref;
+#endif
 
        spin_lock(&watchdog_lock);
        if (!watchdog_running)
@@ -193,11 +197,24 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
                }
 
+#ifdef CONFIG_IPIPE
+retry:
+#endif
                local_irq_disable();
+#ifdef CONFIG_IPIPE
+               wdref = watchdog->read(watchdog);
+#endif
                csnow = cs->read(cs);
                wdnow = watchdog->read(watchdog);
                local_irq_enable();
 
+#ifdef CONFIG_IPIPE
+               wd_nsec = clocksource_cyc2ns((wdnow - wdref) & watchdog->mask,
+                                            watchdog->mult, watchdog->shift);
+               if (wd_nsec > WATCHDOG_THRESHOLD)
+                       goto retry;
+#endif
+
                /* Clocksource initialized ? */
                if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
                    atomic_read(&watchdog_reset_pending)) {
@@ -678,6 +695,95 @@ static int __init clocksource_done_booting(void)
 }
 fs_initcall(clocksource_done_booting);
 
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+unsigned long long __ipipe_cs_freq;
+EXPORT_SYMBOL_GPL(__ipipe_cs_freq);
+
+struct clocksource *__ipipe_cs;
+EXPORT_SYMBOL_GPL(__ipipe_cs);
+
+u64 (*__ipipe_cs_read)(struct clocksource *cs);
+u64 __ipipe_cs_last_tsc;
+u64 __ipipe_cs_mask;
+unsigned __ipipe_cs_lat = 0xffffffff;
+
+static void ipipe_check_clocksource(struct clocksource *cs)
+{
+       u64 (*cread)(struct clocksource *cs);
+       u64 lat, mask, saved;
+       unsigned long long freq;
+       unsigned long flags;
+       unsigned i;
+
+       if (cs->ipipe_read) {
+               mask = CLOCKSOURCE_MASK(64);
+               cread = cs->ipipe_read;
+       } else {
+               mask = cs->mask;
+               cread = cs->read;
+
+               if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) == 0)
+                       return;
+
+               /*
+                * We only support masks such that cs->mask + 1 is a power of 2,
+                * 64 bits masks or masks lesser than 32 bits
+                */
+               if (mask != CLOCKSOURCE_MASK(64)
+                   && ((mask & (mask + 1)) != 0 || mask > 0xffffffff))
+                       return;
+       }
+
+       /*
+        * We prefer a clocksource with a better resolution than 1us
+        */
+       if (cs->shift <= 34) {
+               freq = 1000000000ULL << cs->shift;
+               do_div(freq, cs->mult);
+       } else {
+               freq = 1000000ULL << cs->shift;
+               do_div(freq, cs->mult);
+               freq *= 1000;
+       }
+       if (freq < 1000000)
+               return;
+
+       /* Measure the clocksource latency */
+       flags = hard_local_irq_save();
+       saved = __ipipe_cs_last_tsc;
+       lat = cread(cs);
+       for (i = 0; i < 10; i++)
+               cread(cs);
+       lat = cread(cs) - lat;
+       __ipipe_cs_last_tsc = saved;
+       hard_local_irq_restore(flags);
+       lat = (lat * cs->mult) >> cs->shift;
+       do_div(lat, i + 1);
+
+       if (!strcmp(cs->name, override_name))
+               goto skip_tests;
+
+       if (lat > __ipipe_cs_lat)
+               return;
+
+       if (__ipipe_cs && !strcmp(__ipipe_cs->name, override_name))
+               return;
+
+  skip_tests:
+       flags = hard_local_irq_save();
+       if (__ipipe_cs_last_tsc == 0) {
+               __ipipe_cs_lat = lat;
+               __ipipe_cs_freq = freq;
+               __ipipe_cs = cs;
+               __ipipe_cs_read = cread;
+               __ipipe_cs_mask = mask;
+       }
+       hard_local_irq_restore(flags);
+}
+#else /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+#define ipipe_check_clocksource(cs)    do { }while (0)
+#endif /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+
 /*
  * Enqueue the clocksource sorted by rating
  */
@@ -693,6 +799,8 @@ static void clocksource_enqueue(struct clocksource *cs)
                entry = &tmp->list;
        }
        list_add(&cs->list, entry);
+
+       ipipe_check_clocksource(cs);
 }
 
 /**
index 49edc1c4f3e645894f839c40489ab81594a02398..9ab5c0e94f2cddf3da46c7a5ea382d1a1e2eb244 100644 (file)
@@ -89,7 +89,7 @@ static void tick_periodic(int cpu)
                update_wall_time();
        }
 
-       update_process_times(user_mode(get_irq_regs()));
+       update_root_process_times(get_irq_regs());
        profile_tick(CPU_PROFILING);
 }
 
index dfa4a117fee34b6fcaf4fd1b7440b87d126c0878..849479493e5976cde09480ee9e44eb6296350b05 100644 (file)
@@ -158,7 +158,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
                ts->next_tick = 0;
        }
 #endif
-       update_process_times(user_mode(regs));
+       update_root_process_times(regs);
        profile_tick(CPU_PROFILING);
 }
 #endif
index 2cafb49aa65e13b5b1d29223e8943d53386a795d..002c93975327e3eca8078fec9d5846ee4179ccef 100644 (file)
@@ -525,7 +525,7 @@ static inline void update_vsyscall(struct timekeeper *tk)
        xt = timespec64_to_timespec(tk_xtime(tk));
        wm = timespec64_to_timespec(tk->wall_to_monotonic);
        update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
-                           tk->tkr_mono.cycle_last);
+                           tk->tkr_mono.shift, tk->tkr_mono.cycle_last);
 }
 
 static inline void old_vsyscall_fixup(struct timekeeper *tk)
index db5e6daadd94ed883c681a4c88517ef9b0706f0a..5edb5cb7d3dfc46f891cdef1405da0e87ce695c3 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
+#include <linux/ipipe.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -1621,6 +1622,24 @@ static inline void __run_timers(struct timer_base *base)
        raw_spin_unlock_irq(&base->lock);
 }
 
+#ifdef CONFIG_IPIPE
+
+void update_root_process_times(struct pt_regs *regs)
+{
+       int user_tick = user_mode(regs);
+
+       if (__ipipe_root_tick_p(regs)) {
+               update_process_times(user_tick);
+               return;
+       }
+
+       run_local_timers();
+       rcu_check_callbacks(user_tick);
+       run_posix_cpu_timers(current);
+}
+
+#endif
+
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */