}
#endif
+static inline bool check_cpu_spare_capacity(int cpu,
+ unsigned int needed_spare_capacity)
+{
+ return (capacity_of(cpu) >
+ (cpu_util(cpu) + needed_spare_capacity));
+
+}
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
*/
#define MAX_PINNED_INTERVAL 512
+static inline bool check_cpu_lite_util(int cpu)
+{
+ /* Lite utilization is defined as less then ~6% */
+ return (capacity_of(cpu) >> 4 >= cpu_util(cpu));
+}
+
+static inline int need_park_into_spare_capacity(struct lb_env *env)
+{
+ bool fits_in = check_cpu_spare_capacity(env->dst_cpu,
+ cpu_util(env->src_cpu) / 2);
+ int ret;
+
+ if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ env->src_rq->cfs.h_nr_running == 1 &&
+ cpu_overutilized(env->src_cpu) &&
+ !cpu_overutilized(env->dst_cpu) &&
+ (fits_in || check_cpu_lite_util(env->dst_cpu))) {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ trace_sched_migrate_capacity_comparison(env->src_cpu, env->dst_cpu,
+ capacity_of(env->src_cpu),
+ capacity_of(env->dst_cpu),
+ cpu_util(env->src_cpu),
+ cpu_util(env->dst_cpu), ret);
+
+ return ret;
+
+}
+
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
+ int need_balance = sd->nr_balance_failed > sd->cache_nice_tries + 2;
if (env->idle == CPU_NEWLY_IDLE) {
return 1;
}
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+ if (need_park_into_spare_capacity(env))
+ return 1;
+
+ trace_sched_need_active_balance(need_balance);
+ return 0;
}
static int active_load_balance_cpu_stop(void *data);