sched/fair: change finding idle group sandbox/lluba/eas/20171211_0913/v4.14_tizen
authorLukasz Luba <l.luba@partner.samsung.com>
Mon, 27 Aug 2018 12:36:21 +0000 (14:36 +0200)
committerLukasz Luba <l.luba@partner.samsung.com>
Thu, 16 May 2019 13:21:51 +0000 (15:21 +0200)
Check if some of the CPUs in the group were actually idle.
If it was balancing due to fork and all CPUs are loaded,
try to run in the same group.

Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
include/trace/events/sched.h
kernel/sched/fair.c

index a251458d2a27d798cb8052ec6efc9813823e6167..359c6f7f17e3e36d74a4b1fc1a2573b6bab58945 100644 (file)
@@ -885,6 +885,29 @@ TRACE_EVENT(sched_need_active_balance,
        TP_printk("needed=%d", __entry->needed)
 );
 
+TRACE_EVENT(sched_find_idlest_cpu,
+
+       TP_PROTO(const struct cpumask *group_cpus, int cpu, int new_cpu),
+
+       TP_ARGS(group_cpus, cpu, new_cpu),
+
+       TP_STRUCT__entry(
+               __bitmask(cpumask, num_possible_cpus())
+               __field(        int,    cpu     )
+               __field(        int,    new_cpu )
+       ),
+
+       TP_fast_assign(
+               __assign_bitmask(cpumask, cpumask_bits(group_cpus),
+                                num_possible_cpus());
+               __entry->cpu            = cpu;
+               __entry->new_cpu        = new_cpu;
+       ),
+
+       TP_printk("group_cpus=%s cpu=%d new_cpu=%d",
+                __get_bitmask(cpumask), __entry->cpu, __entry->new_cpu)
+);
+
 #endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
index d6c9e4b413305c69f72fad416d6491d72829c651..6a20f1c0e6fa417bb914e1efe6431e1a928d9452 100644 (file)
@@ -6214,6 +6214,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
        struct sched_group *most_spare_sg = NULL;
+       struct sched_group *group_with_idle = NULL;
        unsigned long min_runnable_load = ULONG_MAX;
        unsigned long this_runnable_load = ULONG_MAX;
        unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
@@ -6222,6 +6223,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
        unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
                                (sd->imbalance_pct-100) / 100;
+       bool found_local_idle = false;
+       int found_idle_cpu = -1;
 
        if (sd_flag & SD_BALANCE_WAKE)
                load_idx = sd->wake_idx;
@@ -6263,6 +6266,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                        if (spare_cap > max_spare_cap)
                                max_spare_cap = spare_cap;
+
+                       /* If there is an idle CPU, don't be stupit */
+                       if (idle_cpu(i)) {
+                               if (found_local_idle)
+                                       continue;
+                               if (local_group)
+                                       found_local_idle = true;
+
+                               found_idle_cpu = i;
+                               group_with_idle = group;
+                       }
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -6313,7 +6327,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
         * utilization.
         */
        if (sd_flag & SD_BALANCE_FORK)
-               goto skip_spare;
+               goto try_skip_packing;
 
        if (this_spare > task_util(p) / 2 &&
            imbalance_scale*this_spare > 100*most_spare)
@@ -6322,7 +6336,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        if (most_spare > task_util(p) / 2)
                return most_spare_sg;
 
-skip_spare:
+try_skip_packing:
+       if (found_idle_cpu != -1)
+               return group_with_idle;
+
        if (!idlest)
                return NULL;
 
@@ -6333,6 +6350,10 @@ skip_spare:
             (100*this_avg_load < imbalance_scale*min_avg_load))
                return NULL;
 
+       /* Last try: all CPUs are loaded, so keep continue on current */
+       if (found_idle_cpu == -1 && sd_flag & SD_BALANCE_FORK)
+               return NULL;
+
        return idlest;
 }
 
@@ -6393,6 +6414,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
                                  int cpu, int prev_cpu, int sd_flag)
 {
        int new_cpu = cpu;
+       int prop_cpu = cpu;
 
        if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
                return prev_cpu;
@@ -6414,6 +6436,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
                }
 
                new_cpu = find_idlest_group_cpu(group, p, cpu);
+               trace_sched_find_idlest_cpu(sched_group_span(group), cpu,
+                                           new_cpu);
                if (new_cpu == cpu) {
                        /* Now try balancing at a lower domain level of cpu */
                        sd = sd->child;