time: Add timerkeeper::tkr_raw
authorPeter Zijlstra <peterz@infradead.org>
Thu, 19 Mar 2015 08:28:44 +0000 (09:28 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 27 Mar 2015 08:45:07 +0000 (09:45 +0100)
Introduce tkr_raw and make use of it.

  base_raw -> tkr_raw.base
  clock->{mult,shift} -> tkr_raw.{mult.shift}

Kill timekeeping_get_ns_raw() in favour of
timekeeping_get_ns(&tkr_raw), this removes all mono_raw special
casing.

Duplicate the updates to tkr_mono.cycle_last into tkr_raw.cycle_last,
both need the same value.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150319093400.422589590@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/timekeeper_internal.h
kernel/time/timekeeping.c

index 73df17f..fb86963 100644 (file)
@@ -41,6 +41,7 @@ struct tk_read_base {
 /**
  * struct timekeeper - Structure holding internal timekeeping values.
  * @tkr_mono:          The readout base structure for CLOCK_MONOTONIC
+ * @tkr_raw:           The readout base structure for CLOCK_MONOTONIC_RAW
  * @xtime_sec:         Current CLOCK_REALTIME time in seconds
  * @ktime_sec:         Current CLOCK_MONOTONIC time in seconds
  * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
@@ -48,7 +49,6 @@ struct tk_read_base {
  * @offs_boot:         Offset clock monotonic -> clock boottime
  * @offs_tai:          Offset clock monotonic -> clock tai
  * @tai_offset:                The current UTC to TAI offset in seconds
- * @base_raw:          Monotonic raw base time in ktime_t format
  * @raw_time:          Monotonic raw base time in timespec64 format
  * @cycle_interval:    Number of clock cycles in one NTP interval
  * @xtime_interval:    Number of clock shifted nano seconds in one NTP
@@ -77,6 +77,7 @@ struct tk_read_base {
  */
 struct timekeeper {
        struct tk_read_base     tkr_mono;
+       struct tk_read_base     tkr_raw;
        u64                     xtime_sec;
        unsigned long           ktime_sec;
        struct timespec64       wall_to_monotonic;
@@ -84,7 +85,6 @@ struct timekeeper {
        ktime_t                 offs_boot;
        ktime_t                 offs_tai;
        s32                     tai_offset;
-       ktime_t                 base_raw;
        struct timespec64       raw_time;
 
        /* The following members are for timekeeping internal use */
index 1405091..cbb612e 100644 (file)
@@ -252,6 +252,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
        tk->tkr_mono.mask = clock->mask;
        tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
 
+       tk->tkr_raw.clock = clock;
+       tk->tkr_raw.read = clock->read;
+       tk->tkr_raw.mask = clock->mask;
+       tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
+
        /* Do the ns -> cycle conversion first, using original mult */
        tmp = NTP_INTERVAL_LENGTH;
        tmp <<= clock->shift;
@@ -278,7 +283,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
                else
                        tk->tkr_mono.xtime_nsec <<= shift_change;
        }
+       tk->tkr_raw.xtime_nsec = 0;
+
        tk->tkr_mono.shift = clock->shift;
+       tk->tkr_raw.shift = clock->shift;
 
        tk->ntp_error = 0;
        tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
@@ -290,6 +298,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
         * to counteract clock drifting.
         */
        tk->tkr_mono.mult = clock->mult;
+       tk->tkr_raw.mult = clock->mult;
        tk->ntp_err_mult = 0;
 }
 
@@ -316,21 +325,6 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
        return nsec + arch_gettimeoffset();
 }
 
-static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
-{
-       struct clocksource *clock = tk->tkr_mono.clock;
-       cycle_t delta;
-       s64 nsec;
-
-       delta = timekeeping_get_delta(&tk->tkr_mono);
-
-       /* convert delta to nanoseconds. */
-       nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
-
-       /* If arch requires, add in get_arch_timeoffset() */
-       return nsec + arch_gettimeoffset();
-}
-
 /**
  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
  * @tkr: Timekeeping readout base from which we take the update
@@ -562,7 +556,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
        tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 
        /* Update the monotonic raw base */
-       tk->base_raw = timespec64_to_ktime(tk->raw_time);
+       tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
 
        /*
         * The sum of the nanoseconds portions of xtime and
@@ -611,6 +605,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
        cycle_now = tk->tkr_mono.read(clock);
        delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
        tk->tkr_mono.cycle_last = cycle_now;
+       tk->tkr_raw.cycle_last  = cycle_now;
 
        tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
 
@@ -619,7 +614,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
 
        tk_normalize_xtime(tk);
 
-       nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
+       nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
        timespec64_add_ns(&tk->raw_time, nsec);
 }
 
@@ -748,8 +743,8 @@ ktime_t ktime_get_raw(void)
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-               base = tk->base_raw;
-               nsecs = timekeeping_get_ns_raw(tk);
+               base = tk->tkr_raw.base;
+               nsecs = timekeeping_get_ns(&tk->tkr_raw);
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
@@ -862,7 +857,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
                ts_real->tv_sec = tk->xtime_sec;
                ts_real->tv_nsec = 0;
 
-               nsecs_raw = timekeeping_get_ns_raw(tk);
+               nsecs_raw  = timekeeping_get_ns(&tk->tkr_raw);
                nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -1096,7 +1091,7 @@ void getrawmonotonic64(struct timespec64 *ts)
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-               nsecs = timekeeping_get_ns_raw(tk);
+               nsecs = timekeeping_get_ns(&tk->tkr_raw);
                ts64 = tk->raw_time;
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
@@ -1217,7 +1212,6 @@ void __init timekeeping_init(void)
        tk_set_xtime(tk, &now);
        tk->raw_time.tv_sec = 0;
        tk->raw_time.tv_nsec = 0;
-       tk->base_raw.tv64 = 0;
        if (boot.tv_sec == 0 && boot.tv_nsec == 0)
                boot = tk_xtime(tk);
 
@@ -1367,6 +1361,8 @@ void timekeeping_resume(void)
 
        /* Re-base the last cycle value */
        tk->tkr_mono.cycle_last = cycle_now;
+       tk->tkr_raw.cycle_last  = cycle_now;
+
        tk->ntp_error = 0;
        timekeeping_suspended = 0;
        timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1681,6 +1677,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
        /* Accumulate one shifted interval */
        offset -= interval;
        tk->tkr_mono.cycle_last += interval;
+       tk->tkr_raw.cycle_last  += interval;
 
        tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
        *clock_set |= accumulate_nsecs_to_secs(tk);