sched/pelt: Relax the sync of load_sum with load_avg
authorVincent Guittot <vincent.guittot@linaro.org>
Tue, 11 Jan 2022 13:46:59 +0000 (14:46 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 18 Jan 2022 11:09:58 +0000 (12:09 +0100)
Similarly to util_avg and util_sum, don't sync load_sum with the low
bound of load_avg but only ensure that load_sum stays in the correct range.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Sachin Sant <sachinp@linux.ibm.com>
Link: https://lkml.kernel.org/r/20220111134659.24961-5-vincent.guittot@linaro.org
kernel/sched/fair.c

index 0e87e1916504462d0c32da765e45629f133e9aee..f4f02c2cff87fec91b9c5deaa61707cb48c42967 100644 (file)
@@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static inline void
 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u32 divider = get_pelt_divider(&se->avg);
        sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
-       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+       sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+                                         cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
 }
 #else
 static inline void
@@ -3513,9 +3515,10 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
+       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3542,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
                 * assuming all tasks are equally runnable.
                 */
                if (scale_load_down(gcfs_rq->load.weight)) {
-                       load_sum = div_s64(gcfs_rq->avg.load_sum,
+                       load_sum = div_u64(gcfs_rq->avg.load_sum,
                                scale_load_down(gcfs_rq->load.weight));
                }
 
@@ -3559,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
        runnable_sum = max(runnable_sum, running_sum);
 
-       load_sum = (s64)se_weight(se) * runnable_sum;
-       load_avg = div_s64(load_sum, divider);
-
-       se->avg.load_sum = runnable_sum;
+       load_sum = se_weight(se) * runnable_sum;
+       load_avg = div_u64(load_sum, divider);
 
-       delta = load_avg - se->avg.load_avg;
-       if (!delta)
+       delta_avg = load_avg - se->avg.load_avg;
+       if (!delta_avg)
                return;
 
-       se->avg.load_avg = load_avg;
+       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
 
-       add_positive(&cfs_rq->avg.load_avg, delta);
-       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+       se->avg.load_sum = runnable_sum;
+       se->avg.load_avg = load_avg;
+       add_positive(&cfs_rq->avg.load_avg, delta_avg);
+       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+       /* See update_cfs_rq_load_avg() */
+       cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+                                         cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3687,7 +3693,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 
                r = removed_load;
                sub_positive(&sa->load_avg, r);
-               sa->load_sum = sa->load_avg * divider;
+               sub_positive(&sa->load_sum, r * divider);
+               /* See sa->util_sum below */
+               sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
 
                r = removed_util;
                sub_positive(&sa->util_avg, r);