* @clock_was_set_seq: Sequence counter of clock was set events
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
hres_active : 1,
hang_detected : 1;
ktime_t expires_next;
+ struct hrtimer *next_timer;
unsigned int nr_events;
unsigned int nr_retries;
unsigned int nr_hangs;
}
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *timer)
+{
+#ifdef CONFIG_HIGH_RES_TIMERS
+ cpu_base->next_timer = timer;
+#endif
+}
+
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
unsigned int active = cpu_base->active_bases;
+ hrtimer_update_next_timer(cpu_base, NULL);
for (; active; base++, active >>= 1) {
struct timerqueue_node *next;
struct hrtimer *timer;
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
- if (expires.tv64 < expires_next.tv64)
+ if (expires.tv64 < expires_next.tv64) {
expires_next = expires;
+ hrtimer_update_next_timer(cpu_base, timer);
+ }
}
/*
* clock_was_set() might have changed base->offset of any of
if (cpu_base->in_hrtirq)
return 0;
+ cpu_base->next_timer = timer;
+
/*
* If a hang was detected in the last timer interrupt then we
* do not schedule a timer which is earlier than the expiry
unsigned long newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
- struct timerqueue_node *next_timer;
+ unsigned int state = timer->state;
- if (!(timer->state & HRTIMER_STATE_ENQUEUED))
- goto out;
+ timer->state = newstate;
+ if (!(state & HRTIMER_STATE_ENQUEUED))
+ return;
- next_timer = timerqueue_getnext(&base->active);
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
- if (&timer->node == next_timer) {
#ifdef CONFIG_HIGH_RES_TIMERS
- /* Reprogram the clock event device. if enabled */
- if (reprogram && cpu_base->hres_active) {
- ktime_t expires;
-
- expires = ktime_sub(hrtimer_get_expires(timer),
- base->offset);
- if (cpu_base->expires_next.tv64 == expires.tv64)
- hrtimer_force_reprogram(cpu_base, 1);
- }
+ /*
+ * Note: If reprogram is false we do not update
+ * cpu_base->next_timer. This happens when we remove the first
+ * timer on a remote cpu. No harm as we never dereference
+ * cpu_base->next_timer. So the worst thing what can happen is
+ * an superflous call to hrtimer_force_reprogram() on the
+ * remote cpu later on if the same timer gets enqueued again.
+ */
+ if (reprogram && timer == cpu_base->next_timer)
+ hrtimer_force_reprogram(cpu_base, 1);
#endif
- }
-out:
- timer->state = newstate;
}
/*