sched/fair: change active balance path and add tracing
authorLukasz Luba <l.luba@partner.samsung.com>
Mon, 3 Sep 2018 13:55:40 +0000 (15:55 +0200)
committerLukasz Luba <l.luba@partner.samsung.com>
Wed, 26 Sep 2018 10:34:27 +0000 (12:34 +0200)
Patch changes default EAS behavior in active balance path.
It tries to enable all CPUs in some workload in sysbench.

Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
kernel/sched/fair.c

index 20146270020dbeca3a37914d70584b373bc16983..815fab24aa4c23d9c1ec5bae3b1e3c7d77582253 100644 (file)
@@ -7225,6 +7225,13 @@ static inline int migrate_degrades_locality(struct task_struct *p,
 }
 #endif
 
+static inline bool check_cpu_spare_capacity(int cpu,
+                                         unsigned int needed_spare_capacity)
+{
+       return (capacity_of(cpu) >
+               (cpu_util(cpu) + needed_spare_capacity));
+
+}
 /*
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
@@ -8716,9 +8723,42 @@ static struct rq *find_busiest_queue(struct lb_env *env,
  */
 #define MAX_PINNED_INTERVAL    512
 
+static inline bool check_cpu_lite_util(int cpu)
+{
+       /* Lite utilization is defined as less then ~6% */
+       return (capacity_of(cpu) >> 4 >= cpu_util(cpu));
+}
+
+static inline int need_park_into_spare_capacity(struct lb_env *env)
+{
+       bool fits_in = check_cpu_spare_capacity(env->dst_cpu,
+                                               cpu_util(env->src_cpu) / 2);
+       int ret;
+
+       if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+                               env->src_rq->cfs.h_nr_running == 1 &&
+                               cpu_overutilized(env->src_cpu) &&
+                               !cpu_overutilized(env->dst_cpu) &&
+                               (fits_in || check_cpu_lite_util(env->dst_cpu))) {
+               ret = 1;
+       } else {
+               ret = 0;
+       }
+
+       trace_sched_migrate_capacity_comparison(env->src_cpu, env->dst_cpu,
+                                               capacity_of(env->src_cpu),
+                                               capacity_of(env->dst_cpu),
+                                               cpu_util(env->src_cpu),
+                                               cpu_util(env->dst_cpu), ret);
+
+       return ret;
+
+}
+
 static int need_active_balance(struct lb_env *env)
 {
        struct sched_domain *sd = env->sd;
+       int need_balance = sd->nr_balance_failed > sd->cache_nice_tries + 2;
 
        if (env->idle == CPU_NEWLY_IDLE) {
 
@@ -8755,7 +8795,11 @@ static int need_active_balance(struct lb_env *env)
                return 1;
        }
 
-       return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+       if (need_park_into_spare_capacity(env))
+               return 1;
+
+       trace_sched_need_active_balance(need_balance);
+       return 0;
 }
 
 static int active_load_balance_cpu_stop(void *data);