From: Paul E. McKenney Date: Wed, 2 Nov 2011 14:38:25 +0000 (-0700) Subject: rcu: Fix idle-task checks X-Git-Tag: v3.3-rc1~193^2^2~35 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=11dbaa8cb79a6e4a234a134898436f717a663f01;p=profile%2Fivi%2Fkernel-adaptation-intel-automotive.git rcu: Fix idle-task checks RCU has traditionally relied on idle_cpu() to determine whether a given CPU is running in the context of an idle task, but commit 908a3283 (Fix idle_cpu()) has invalidated this approach. After commit 908a3283, idle_cpu() will return true if the current CPU is currently running the idle task, and will be doing so for the foreseeable future. RCU instead needs to know whether or not the current CPU is currently running the idle task, regardless of what the near future might bring. This commit therefore switches from idle_cpu() to "current->pid != 0". Reported-by: Wu Fengguang Suggested-by: Carsten Emde Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt Tested-by: Wu Fengguang Signed-off-by: Paul E. McKenney --- diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 6d70ff7..4e16ce3 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -64,7 +64,7 @@ static void rcu_idle_enter_common(long long oldval) return; } RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); - if (!idle_cpu(smp_processor_id())) { + if (current->pid != 0) { struct task_struct *idle = idle_task(smp_processor_id()); RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", @@ -118,7 +118,7 @@ static void rcu_idle_exit_common(long long oldval) return; } RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); - if (!idle_cpu(smp_processor_id())) { + if (current->pid != 0) { struct task_struct *idle = idle_task(smp_processor_id()); RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", diff --git a/kernel/rcutree.c b/kernel/rcutree.c index b1711c4..49e0783 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -355,7 +355,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) return; } trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); - if (!idle_cpu(smp_processor_id())) { + if (current->pid != 0) { struct task_struct *idle = idle_task(smp_processor_id()); trace_rcu_dyntick("Error on entry: not idle task", @@ -449,7 +449,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); - if (!idle_cpu(smp_processor_id())) { + if (current->pid != 0) { struct task_struct *idle = idle_task(smp_processor_id()); trace_rcu_dyntick("Error on exit: not idle task",