return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
}
+
+struct shallowest_idle {
+ struct cpuidle_state *c_state;
+ unsigned int exit_latency;
+ int cpu;
+ int idle_stamp;
+ struct sched_domain *sd;
+ struct sched_group *sg;
+};
+
+static int find_shallowest_idle_in_group(struct shallowest_idle *shidle,
+ struct sched_group *sg,
+ struct cpumask *cpus)
+{
+ int ret = -1;
+ int i;
+ struct rq *rq;
+ struct cpuidle_state *c_state;
+
+ for_each_cpu_and(i, sched_group_span(sg), cpus) {
+ if (available_idle_cpu(i)) {
+ rq = cpu_rq(i);
+ c_state = idle_get_state(rq);
+ if (c_state && c_state->exit_latency <
+ shidle->exit_latency) {
+ shidle->c_state = c_state;
+ shidle->exit_latency = c_state->exit_latency;
+ shidle->idle_stamp = rq->idle_stamp;
+ shidle->cpu = i;
+ shidle->sg = sg;
+ ret = 0;
+ } else if ((!c_state || c_state->exit_latency ==
+ shidle->exit_latency) &&
+ rq->idle_stamp > shidle->idle_stamp) {
+ shidle->idle_stamp = rq->idle_stamp;
+ shidle->cpu = i;
+ shidle->sg = sg;
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int find_idle_cpu_in_domain(struct sched_domain *sd, int this_cpu,
+ int sd_flags, struct task_struct *p,
+ int *idle_cpu)
+{
+ struct sched_group *sg;
+ int ret = -1;
+ struct shallowest_idle shidle = {
+ .exit_latency = UINT_MAX,
+ .cpu = -1,
+ .idle_stamp = 0,
+ };
+ int local_sg;
+
+ while (sd) {
+ if (!(sd->flags & sd_flags)) {
+ sd = sd->child;
+ continue;
+ }
+ break;
+ }
+
+ sg = sd->groups;
+ local_sg = cpumask_test_cpu(this_cpu, sched_group_span(sg));
+ do {
+
+ if (!cpumask_intersects(sched_group_span(sg),
+ &p->cpus_allowed))
+ continue;
+ ret = find_shallowest_idle_in_group(&shidle, sg,
+ &p->cpus_allowed);
+ /* Promote idle cpu in local group */
+ if (!ret && local_sg) {
+ *idle_cpu = shidle.cpu;
+ return 0;
+ }
+ } while (sg = sg->next, sg != sd->groups);
+
+ if (shidle.cpu != -1) {
+ *idle_cpu = shidle.cpu;
+ return 0;
+ }
+
+ return -1;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
}
if (unlikely(sd)) {
+ int idle_cpu, ret;
+
+ ret = find_idle_cpu_in_domain(sd, cpu, sd_flag, p,
+ &idle_cpu);
+ if (!ret) {
+ new_cpu = idle_cpu;
+ goto unlock;
+ }
/* Slow path */
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
} else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */