sched / idle: Call idle_set_state() from cpuidle_enter_state()
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 9 May 2015 23:18:03 +0000 (01:18 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 14 May 2015 19:35:10 +0000 (21:35 +0200)
Introduce a wrapper function around idle_set_state() called
sched_idle_set_state() that will pass this_rq() to it as the
first argument and make cpuidle_enter_state() call the new
function before and after entering the target state.

At the same time, remove direct invocations of idle_set_state()
from call_cpuidle().

This will allow the invocation of default_idle_call() to be
moved from call_cpuidle() to cpuidle_enter_state() safely
and call_cpuidle() to be simplified a bit as a result.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Tested-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Kevin Hilman <khilman@linaro.org>
drivers/cpuidle/cpuidle.c
include/linux/cpuidle.h
kernel/sched/idle.c

index 597f884..9306dd5 100644 (file)
@@ -170,6 +170,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        if (broadcast && tick_broadcast_enter())
                return -EBUSY;
 
+       /* Take note of the planned idle state. */
+       sched_idle_set_state(target_state);
+
        trace_cpu_idle_rcuidle(index, dev->cpu);
        time_start = ktime_get();
 
@@ -178,6 +181,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        time_end = ktime_get();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
+       /* The cpu is no longer idle or about to enter idle. */
+       sched_idle_set_state(NULL);
+
        if (broadcast) {
                if (WARN_ON_ONCE(!irqs_disabled()))
                        local_irq_disable();
index 9c5e892..301eaaa 100644 (file)
@@ -200,6 +200,9 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
        struct cpuidle_device *dev) {return NULL; }
 #endif
 
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
 #else
index 9c919b4..5d9f549 100644 (file)
 
 #include "sched.h"
 
+/**
+ * sched_idle_set_state - Record idle state for the current CPU.
+ * @idle_state: State to record.
+ */
+void sched_idle_set_state(struct cpuidle_state *idle_state)
+{
+       idle_set_state(this_rq(), idle_state);
+}
+
 static int __read_mostly cpu_idle_force_poll;
 
 void cpu_idle_poll_ctrl(bool enable)
@@ -100,9 +109,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                return -EBUSY;
        }
 
-       /* Take note of the planned idle state. */
-       idle_set_state(this_rq(), &drv->states[next_state]);
-
        /*
         * Enter the idle state previously returned by the governor decision.
         * This function will block until an interrupt occurs and will take
@@ -110,9 +116,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         */
        entered_state = cpuidle_enter(drv, dev, next_state);
 
-       /* The cpu is no longer idle or about to enter idle. */
-       idle_set_state(this_rq(), NULL);
-
        if (entered_state == -EBUSY)
                default_idle_call();