cpuidle, cpu_pm: Remove RCU fiddling from cpu_pm_{enter,exit}()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 12 Jan 2023 19:43:28 +0000 (20:43 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 13 Jan 2023 10:48:15 +0000 (11:48 +0100)
All callers should still have RCU enabled.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Tony Lindgren <tony@atomide.com>
Tested-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20230112195540.190860672@infradead.org
kernel/cpu_pm.c

index ba4ba71..b0f0d15 100644 (file)
@@ -30,16 +30,9 @@ static int cpu_pm_notify(enum cpu_pm_event event)
 {
        int ret;
 
-       /*
-        * This introduces a RCU read critical section, which could be
-        * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
-        * this.
-        */
-       ct_irq_enter_irqson();
        rcu_read_lock();
        ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
        rcu_read_unlock();
-       ct_irq_exit_irqson();
 
        return notifier_to_errno(ret);
 }
@@ -49,11 +42,9 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
        unsigned long flags;
        int ret;
 
-       ct_irq_enter_irqson();
        raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
        ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
        raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
-       ct_irq_exit_irqson();
 
        return notifier_to_errno(ret);
 }