sched/fair: Trigger the update of blocked load on newly idle cpu
authorVincent Guittot <vincent.guittot@linaro.org>
Wed, 24 Feb 2021 13:30:06 +0000 (14:30 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 11:40:22 +0000 (12:40 +0100)
Instead of waking up a random and already idle CPU, we can take advantage
of this_cpu being about to enter idle to run the ILB and update the
blocked load.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-7-vincent.guittot@linaro.org
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/sched.h

index f9dfb349146e900869a2c1816408664f285434c5..361974efc2430fe9ed2b065d318cdbcb56ddb5dc 100644 (file)
@@ -737,7 +737,7 @@ static void nohz_csd_func(void *info)
        /*
         * Release the rq::nohz_csd.
         */
-       flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
+       flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
        WARN_ON(!(flags & NOHZ_KICK_MASK));
 
        rq->idle_balance = idle_cpu(cpu);
index 356a2456c5e9ec063a2fcd0da453cfbec49a80b6..e87e1b3bcdca93fab7a6ae1df8fee7221d6c1d59 100644 (file)
@@ -10453,6 +10453,24 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
        return true;
 }
 
+/*
+ * Check if we need to run the ILB for updating blocked load before entering
+ * idle state.
+ */
+void nohz_run_idle_balance(int cpu)
+{
+       unsigned int flags;
+
+       flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
+
+       /*
+        * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
+        * (ie NOHZ_STATS_KICK set) and will do the same.
+        */
+       if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
+               _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE);
+}
+
 static void nohz_newidle_balance(struct rq *this_rq)
 {
        int this_cpu = this_rq->cpu;
@@ -10474,10 +10492,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
                return;
 
        /*
-        * Blocked load of idle CPUs need to be updated.
-        * Kick an ILB to update statistics.
+        * Set the need to trigger ILB in order to update blocked load
+        * before entering idle state.
         */
-       kick_ilb(NOHZ_STATS_KICK);
+       atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
 }
 
 #else /* !CONFIG_NO_HZ_COMMON */
index 7199e6f23789e35973ac2faa115d7fc83f1fd595..7a92d6054aba59067013433377457bc253de7b15 100644 (file)
@@ -261,6 +261,12 @@ exit_idle:
 static void do_idle(void)
 {
        int cpu = smp_processor_id();
+
+       /*
+        * Check if we need to update blocked load
+        */
+       nohz_run_idle_balance(cpu);
+
        /*
         * If the arch has a polling bit, we maintain an invariant:
         *
index 10a1522b1e30311a89ab7c7025c3af081f566741..0ddc9a6ff03a27112a822a27bceaecd95a95f996 100644 (file)
@@ -2385,9 +2385,11 @@ extern void cfs_bandwidth_usage_dec(void);
 #ifdef CONFIG_NO_HZ_COMMON
 #define NOHZ_BALANCE_KICK_BIT  0
 #define NOHZ_STATS_KICK_BIT    1
+#define NOHZ_NEWILB_KICK_BIT   2
 
 #define NOHZ_BALANCE_KICK      BIT(NOHZ_BALANCE_KICK_BIT)
 #define NOHZ_STATS_KICK                BIT(NOHZ_STATS_KICK_BIT)
+#define NOHZ_NEWILB_KICK       BIT(NOHZ_NEWILB_KICK_BIT)
 
 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
 
@@ -2398,6 +2400,11 @@ extern void nohz_balance_exit_idle(struct rq *rq);
 static inline void nohz_balance_exit_idle(struct rq *rq) { }
 #endif
 
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void nohz_run_idle_balance(int cpu);
+#else
+static inline void nohz_run_idle_balance(int cpu) { }
+#endif
 
 #ifdef CONFIG_SMP
 static inline