sched/uclamp: Make select_idle_capacity() use util_fits_cpu()
authorQais Yousef <qais.yousef@arm.com>
Thu, 4 Aug 2022 14:36:04 +0000 (15:36 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 27 Oct 2022 09:01:18 +0000 (11:01 +0200)
Use the new util_fits_cpu() to ensure migration margin and capacity
pressure are taken into account correctly when uclamp is being used
otherwise we will fail to consider CPUs as fitting in scenarios where
they should.

Fixes: b4c9c9f15649 ("sched/fair: Prefer prev cpu in asymmetric wakeup path")
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220804143609.515789-5-qais.yousef@arm.com
kernel/sched/fair.c

index c8eb5ff..c877bbf 100644 (file)
@@ -6779,21 +6779,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
 static int
 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
 {
-       unsigned long task_util, best_cap = 0;
+       unsigned long task_util, util_min, util_max, best_cap = 0;
        int cpu, best_cpu = -1;
        struct cpumask *cpus;
 
        cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
        cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
-       task_util = uclamp_task_util(p);
+       task_util = task_util_est(p);
+       util_min = uclamp_eff_value(p, UCLAMP_MIN);
+       util_max = uclamp_eff_value(p, UCLAMP_MAX);
 
        for_each_cpu_wrap(cpu, cpus, target) {
                unsigned long cpu_cap = capacity_of(cpu);
 
                if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
                        continue;
-               if (fits_capacity(task_util, cpu_cap))
+               if (util_fits_cpu(task_util, util_min, util_max, cpu))
                        return cpu;
 
                if (cpu_cap > best_cap) {