}
#endif
+static inline bool check_cpu_spare_capacity(int cpu,
+ unsigned int needed_spare_capacity)
+{
+ return (capacity_of(cpu) >
+ (cpu_util(cpu) + (needed_spare_capacity / 2)));
+
+}
+
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
- int tsk_cache_hot;
+ int tsk_cache_hot = 0;
+ int ret = 0;
lockdep_assert_held(&env->src_rq->lock);
* 3) running (obviously), or
* 4) are cache-hot on their current CPU.
*/
- if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
- return 0;
+ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) {
+ ret = 0;
+ goto out;
+ }
if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
int cpu;
* Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
* already computed one in current iteration.
*/
- if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED))
- return 0;
+ if (env->idle == CPU_NEWLY_IDLE || (env->flags &
+ LBF_DST_PINNED)) {
+ ret = 0;
+ goto out;
+ }
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
}
}
- return 0;
+ ret = 0;
+ goto out;
}
/* Record that we found atleast one task that could run on dst_cpu */
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
- return 0;
+ ret = 0;
+ goto out;
}
/*
schedstat_inc(env->sd->lb_hot_gained[env->idle]);
schedstat_inc(p->se.statistics.nr_forced_migrations);
}
- return 1;
+ ret = 1;
+ goto out;
}
schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
- return 0;
+out:
+ trace_sched_can_migrate_task(p->pid, ret, tsk_cache_hot, env->src_cpu,
+ env->dst_cpu);
+ return ret;
}
/*
*/
#define MAX_PINNED_INTERVAL 512
+static inline int need_park_into_spare_capacity(struct lb_env *env)
+{
+ bool fits_in = check_cpu_spare_capacity(env->dst_cpu,
+ cpu_util(env->src_cpu));
+ int ret;
+
+ if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ env->src_rq->cfs.h_nr_running == 1 &&
+ cpu_overutilized(env->src_cpu) &&
+ !cpu_overutilized(env->dst_cpu) &&
+ fits_in) {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ trace_sched_migrate_capacity_comparison(env->src_cpu, env->dst_cpu,
+ capacity_of(env->src_cpu),
+ capacity_of(env->dst_cpu),
+ cpu_util(env->src_cpu),
+ cpu_util(env->dst_cpu), ret);
+
+ return ret;
+
+}
+
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
+ int need_balance = sd->nr_balance_failed > sd->cache_nice_tries + 2;
if (env->idle == CPU_NEWLY_IDLE) {
return 1;
}
- if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
- env->src_rq->cfs.h_nr_running == 1 &&
- cpu_overutilized(env->src_cpu) &&
- !cpu_overutilized(env->dst_cpu)) {
- return 1;
- }
+ if (need_park_into_spare_capacity(env))
+ return 1;
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+ trace_sched_need_active_balance(need_balance);
+ return 0;
}
static int active_load_balance_cpu_stop(void *data);
if (need_active_balance(&env)) {
unsigned long flags;
+
raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the active_load_balance_cpu_stop,
struct task_struct *p = NULL;
struct rq_flags rf;
+ trace_sched_active_lb_stop_cpu(busiest_cpu, target_cpu);
+
rq_lock_irq(busiest_rq, &rf);
/*
* Between queueing the stop-work and running it is a hole in which