From: Dimitri Sivanich Date: Fri, 18 Apr 2008 20:39:00 +0000 (-0700) Subject: hrtimer: reduce calls to hrtimer_get_softirq_time() X-Git-Tag: v2.6.26-rc1~1129^2~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=833883d9ac4cfb31c1c4419335e68e6895a05b6b;p=platform%2Fkernel%2Flinux-3.10.git hrtimer: reduce calls to hrtimer_get_softirq_time() It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more often than it needs to. This can cause frequent contention on systems with large numbers of processors/cores. With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if there is a pending timer in one of the hrtimer bases, and only once. This also combines hrtimer_run_queues() and the inline run_hrtimer_queue() into one function. [ tglx@linutronix.de: coding style ] Signed-off-by: Dimitri Sivanich Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c642ef7..70d4adc 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1238,51 +1238,51 @@ void hrtimer_run_pending(void) /* * Called from hardirq context every jiffy */ -static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, - int index) +void hrtimer_run_queues(void) { struct rb_node *node; - struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base; + int index, gettime = 1; - if (!base->first) + if (hrtimer_hres_active()) return; - if (base->get_softirq_time) - base->softirq_time = base->get_softirq_time(); - - spin_lock(&cpu_base->lock); + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { + base = &cpu_base->clock_base[index]; - while ((node = base->first)) { - struct hrtimer *timer; - - timer = rb_entry(node, struct hrtimer, node); - if (base->softirq_time.tv64 <= timer->expires.tv64) - break; - - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { - __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); - list_add_tail(&timer->cb_entry, - &base->cpu_base->cb_pending); + if (!base->first) continue; + + if (gettime) { + hrtimer_get_softirq_time(cpu_base); + gettime = 0; } - __run_hrtimer(timer); - } - spin_unlock(&cpu_base->lock); -} + if (base->get_softirq_time) + base->softirq_time = base->get_softirq_time(); -void hrtimer_run_queues(void) -{ - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); - int i; + spin_lock(&cpu_base->lock); - if (hrtimer_hres_active()) - return; + while ((node = base->first)) { + struct hrtimer *timer; - hrtimer_get_softirq_time(cpu_base); + timer = rb_entry(node, struct hrtimer, node); + if (base->softirq_time.tv64 <= timer->expires.tv64) + break; - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) - run_hrtimer_queue(cpu_base, i); + if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { + __remove_hrtimer(timer, base, + HRTIMER_STATE_PENDING, 0); + list_add_tail(&timer->cb_entry, + &base->cpu_base->cb_pending); + continue; + } + + __run_hrtimer(timer); + } + spin_unlock(&cpu_base->lock); + } } /*