TP_printk("needed=%d", __entry->needed)
);
+TRACE_EVENT(sched_find_idlest_cpu,
+
+ TP_PROTO(const struct cpumask *group_cpus, int cpu, int new_cpu),
+
+ TP_ARGS(group_cpus, cpu, new_cpu),
+
+ TP_STRUCT__entry(
+ __bitmask(cpumask, num_possible_cpus())
+ __field( int, cpu )
+ __field( int, new_cpu )
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(cpumask, cpumask_bits(group_cpus),
+ num_possible_cpus());
+ __entry->cpu = cpu;
+ __entry->new_cpu = new_cpu;
+ ),
+
+ TP_printk("group_cpus=%s cpu=%d new_cpu=%d",
+ __get_bitmask(cpumask), __entry->cpu, __entry->new_cpu)
+);
+
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */
{
struct sched_group *idlest = NULL, *group = sd->groups;
struct sched_group *most_spare_sg = NULL;
+ struct sched_group *group_with_idle = NULL;
unsigned long min_runnable_load = ULONG_MAX;
unsigned long this_runnable_load = ULONG_MAX;
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
+ bool found_local_idle = false;
+ int found_idle_cpu = -1;
if (sd_flag & SD_BALANCE_WAKE)
load_idx = sd->wake_idx;
if (spare_cap > max_spare_cap)
max_spare_cap = spare_cap;
+
+ /* If there is an idle CPU, don't be stupit */
+ if (idle_cpu(i)) {
+ if (found_local_idle)
+ continue;
+ if (local_group)
+ found_local_idle = true;
+
+ found_idle_cpu = i;
+ group_with_idle = group;
+ }
}
/* Adjust by relative CPU capacity of the group */
* utilization.
*/
if (sd_flag & SD_BALANCE_FORK)
- goto skip_spare;
+ goto try_skip_packing;
if (this_spare > task_util(p) / 2 &&
imbalance_scale*this_spare > 100*most_spare)
if (most_spare > task_util(p) / 2)
return most_spare_sg;
-skip_spare:
+try_skip_packing:
+ if (found_idle_cpu != -1)
+ return group_with_idle;
+
if (!idlest)
return NULL;
(100*this_avg_load < imbalance_scale*min_avg_load))
return NULL;
+ /* Last try: all CPUs are loaded, so keep continue on current */
+ if (found_idle_cpu == -1 && sd_flag & SD_BALANCE_FORK)
+ return NULL;
+
return idlest;
}
int cpu, int prev_cpu, int sd_flag)
{
int new_cpu = cpu;
+ int prop_cpu = cpu;
if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
return prev_cpu;
}
new_cpu = find_idlest_group_cpu(group, p, cpu);
+ trace_sched_find_idlest_cpu(sched_group_span(group), cpu,
+ new_cpu);
if (new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;