From: Lukasz Luba Date: Mon, 27 Aug 2018 12:36:21 +0000 (+0200) Subject: sched/fair: change finding idle group X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e7b73cbd1b888f50c4635359f34f0ae9a7604111;p=platform%2Fkernel%2Flinux-exynos.git sched/fair: change finding idle group Check if some of the CPUs in the group were actually idle. If it was balancing due to fork and all CPUs are loaded, try to run in the same group. Signed-off-by: Lukasz Luba --- diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index a251458d2a27..359c6f7f17e3 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -885,6 +885,29 @@ TRACE_EVENT(sched_need_active_balance, TP_printk("needed=%d", __entry->needed) ); +TRACE_EVENT(sched_find_idlest_cpu, + + TP_PROTO(const struct cpumask *group_cpus, int cpu, int new_cpu), + + TP_ARGS(group_cpus, cpu, new_cpu), + + TP_STRUCT__entry( + __bitmask(cpumask, num_possible_cpus()) + __field( int, cpu ) + __field( int, new_cpu ) + ), + + TP_fast_assign( + __assign_bitmask(cpumask, cpumask_bits(group_cpus), + num_possible_cpus()); + __entry->cpu = cpu; + __entry->new_cpu = new_cpu; + ), + + TP_printk("group_cpus=%s cpu=%d new_cpu=%d", + __get_bitmask(cpumask), __entry->cpu, __entry->new_cpu) +); + #endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d6c9e4b41330..6a20f1c0e6fa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6214,6 +6214,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; + struct sched_group *group_with_idle = NULL; unsigned long min_runnable_load = ULONG_MAX; unsigned long this_runnable_load = ULONG_MAX; unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; @@ -6222,6 +6223,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; + bool found_local_idle = false; + int found_idle_cpu = -1; if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; @@ -6263,6 +6266,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (spare_cap > max_spare_cap) max_spare_cap = spare_cap; + + /* If there is an idle CPU, don't be stupit */ + if (idle_cpu(i)) { + if (found_local_idle) + continue; + if (local_group) + found_local_idle = true; + + found_idle_cpu = i; + group_with_idle = group; + } } /* Adjust by relative CPU capacity of the group */ @@ -6313,7 +6327,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, * utilization. */ if (sd_flag & SD_BALANCE_FORK) - goto skip_spare; + goto try_skip_packing; if (this_spare > task_util(p) / 2 && imbalance_scale*this_spare > 100*most_spare) @@ -6322,7 +6336,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (most_spare > task_util(p) / 2) return most_spare_sg; -skip_spare: +try_skip_packing: + if (found_idle_cpu != -1) + return group_with_idle; + if (!idlest) return NULL; @@ -6333,6 +6350,10 @@ skip_spare: (100*this_avg_load < imbalance_scale*min_avg_load)) return NULL; + /* Last try: all CPUs are loaded, so keep continue on current */ + if (found_idle_cpu == -1 && sd_flag & SD_BALANCE_FORK) + return NULL; + return idlest; } @@ -6393,6 +6414,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p int cpu, int prev_cpu, int sd_flag) { int new_cpu = cpu; + int prop_cpu = cpu; if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) return prev_cpu; @@ -6414,6 +6436,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p } new_cpu = find_idlest_group_cpu(group, p, cpu); + trace_sched_find_idlest_cpu(sched_group_span(group), cpu, + new_cpu); if (new_cpu == cpu) { /* Now try balancing at a lower domain level of cpu */ sd = sd->child;