{
struct sched_group *idlest = NULL, *group = sd->groups;
struct sched_group *most_spare_sg = NULL;
+ struct sched_group *group_with_idle = NULL;
unsigned long min_runnable_load = ULONG_MAX;
unsigned long this_runnable_load = ULONG_MAX;
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
+ bool found_local_idle = false;
+ int found_idle_cpu = -1;
if (sd_flag & SD_BALANCE_WAKE)
load_idx = sd->wake_idx;
if (spare_cap > max_spare_cap)
max_spare_cap = spare_cap;
+
+ if (idle_cpu(i)) {
+ if (found_local_idle)
+ continue;
+ if (local_group)
+ found_local_idle = true;
+
+ found_idle_cpu = i;
+ group_with_idle = group;
+ }
}
/* Adjust by relative CPU capacity of the group */
return most_spare_sg;
skip_spare:
+ if (found_idle_cpu != -1)
+ return group_with_idle;
+
if (!idlest)
return NULL;