sched/fair: Reduce the periodic update duration
authorVincent Guittot <vincent.guittot@linaro.org>
Tue, 13 Feb 2018 10:31:18 +0000 (11:31 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 9 Mar 2018 06:59:22 +0000 (07:59 +0100)
Instead of using the cfs_rq_is_decayed() which monitors all *_avg
and *_sum, we create a cfs_rq_has_blocked() which only takes care of
util_avg and load_avg. We are only interested by these 2 values which are
decaying faster than the *_sum so we can stop the periodic update earlier.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: brendan.jackman@arm.com
Cc: dietmar.eggemann@arm.com
Cc: morten.rasmussen@foss.arm.com
Cc: valentin.schneider@arm.com
Link: http://lkml.kernel.org/r/1518517879-2280-3-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 78b06a0..aad7c03 100644 (file)
@@ -7424,6 +7424,19 @@ static void attach_tasks(struct lb_env *env)
        rq_unlock(env->dst_rq, &rf);
 }
 
+static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
+{
+       if (cfs_rq->avg.load_avg)
+               return true;
+
+       if (cfs_rq->avg.util_avg)
+               return true;
+
+       return false;
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 {
        if (cfs_rq->load.weight)
@@ -7441,8 +7454,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
        return true;
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
 static void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -7478,7 +7489,9 @@ static void update_blocked_averages(int cpu)
                 */
                if (cfs_rq_is_decayed(cfs_rq))
                        list_del_leaf_cfs_rq(cfs_rq);
-               else
+
+               /* Don't need periodic decay once load/util_avg are null */
+               if (cfs_rq_has_blocked(cfs_rq))
                        done = false;
        }
 
@@ -7548,7 +7561,7 @@ static inline void update_blocked_averages(int cpu)
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
-       if (cfs_rq_is_decayed(cfs_rq))
+       if (!cfs_rq_has_blocked(cfs_rq))
                rq->has_blocked_load = 0;
 #endif
        rq_unlock_irqrestore(rq, &rf);