return max(rq->cpu_load[type-1], total);
}
+static struct sched_group *group_of(int cpu)
+{
+ struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+ if (!sd)
+ return NULL;
+
+ return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+ struct sched_group *group = group_of(cpu);
+
+ if (!group)
+ return SCHED_LOAD_SCALE;
+
+ return group->cpu_power;
+}
+
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
return NULL;
}
-static struct sched_group *group_of(int cpu)
-{
- struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
-
- if (!sd)
- return NULL;
-
- return sd->groups;
-}
-
-static unsigned long power_of(int cpu)
-{
- struct sched_group *group = group_of(cpu);
-
- if (!group)
- return SCHED_LOAD_SCALE;
-
- return group->cpu_power;
-}
-
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
for_each_domain(cpu, tmp) {
/*
- * If power savings logic is enabled for a domain, stop there.
+ * If power savings logic is enabled for a domain, see if we
+ * are not overloaded, if so, don't balance wider.
*/
- if (tmp->flags & SD_POWERSAVINGS_BALANCE)
- break;
+ if (tmp->flags & SD_POWERSAVINGS_BALANCE) {
+ unsigned long power = 0;
+ unsigned long nr_running = 0;
+ unsigned long capacity;
+ int i;
+
+ for_each_cpu(i, sched_domain_span(tmp)) {
+ power += power_of(i);
+ nr_running += cpu_rq(i)->cfs.nr_running;
+ }
+
+ capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
+
+ if (nr_running/2 < capacity)
+ break;
+ }
switch (flag) {
case SD_BALANCE_WAKE: