sched: Aggregate total task_group load
authorPaul Turner <pjt@google.com>
Thu, 4 Oct 2012 11:18:30 +0000 (13:18 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 24 Oct 2012 08:27:24 +0000 (10:27 +0200)
Maintain a global running sum of the average load seen on each cfs_rq belonging
to each task group so that it may be used in calculating an appropriate
shares:weight distribution.

Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.792901086@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h

index 2d2e2b3..2908923 100644 (file)
@@ -230,6 +230,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        cfs_rq->runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
+       SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
+                       atomic64_read(&cfs_rq->tg->load_avg));
+       SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
+                       cfs_rq->tg_load_contrib);
 #endif
 
        print_cfs_group_stats(m, cpu, cfs_rq->tg);
index 74dc29b..db78822 100644 (file)
@@ -1102,6 +1102,26 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
        return decays;
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+                                                int force_update)
+{
+       struct task_group *tg = cfs_rq->tg;
+       s64 tg_contrib;
+
+       tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
+       tg_contrib -= cfs_rq->tg_load_contrib;
+
+       if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
+               atomic64_add(tg_contrib, &tg->load_avg);
+               cfs_rq->tg_load_contrib += tg_contrib;
+       }
+}
+#else
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+                                                int force_update) {}
+#endif
+
 /* Compute the current contribution to load_avg by se, return any delta */
 static long __update_entity_load_avg_contrib(struct sched_entity *se)
 {
@@ -1172,6 +1192,8 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
                atomic64_add(decays, &cfs_rq->decay_counter);
                cfs_rq->last_decay = now;
        }
+
+       __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
 }
 
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
index 30236ab..924a990 100644 (file)
@@ -112,6 +112,7 @@ struct task_group {
        unsigned long shares;
 
        atomic_t load_weight;
+       atomic64_t load_avg;
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -232,6 +233,9 @@ struct cfs_rq {
        u64 runnable_load_avg, blocked_load_avg;
        atomic64_t decay_counter, removed_load;
        u64 last_decay;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       u64 tg_load_contrib;
+#endif
 #endif
 #ifdef CONFIG_FAIR_GROUP_SCHED
        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */