sched/fair: Remove wake_cap()
authorMorten Rasmussen <morten.rasmussen@arm.com>
Thu, 6 Feb 2020 19:19:57 +0000 (19:19 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 20 Feb 2020 20:03:15 +0000 (21:03 +0100)
Capacity-awareness in the wake-up path previously involved disabling
wake_affine in certain scenarios. We have just made select_idle_sibling()
capacity-aware, so this isn't needed anymore.

Remove wake_cap() entirely.

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
[Changelog tweaks]
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[Changelog tweaks]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200206191957.12325-5-valentin.schneider@arm.com
kernel/sched/fair.c

index 6fb47a2..a7e11b1 100644 (file)
@@ -6146,33 +6146,6 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
 }
 
 /*
- * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
- * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
- *
- * In that case WAKE_AFFINE doesn't make sense and we'll let
- * BALANCE_WAKE sort things out.
- */
-static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
-{
-       long min_cap, max_cap;
-
-       if (!static_branch_unlikely(&sched_asym_cpucapacity))
-               return 0;
-
-       min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
-       max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
-
-       /* Minimum capacity is close to max, no need to abort wake_affine */
-       if (max_cap - min_cap < max_cap >> 3)
-               return 0;
-
-       /* Bring task utilization in sync with prev_cpu */
-       sync_entity_load_avg(&p->se);
-
-       return !task_fits_capacity(p, min_cap);
-}
-
-/*
  * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
  * to @dst_cpu.
  */
@@ -6436,8 +6409,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                        new_cpu = prev_cpu;
                }
 
-               want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
-                             cpumask_test_cpu(cpu, p->cpus_ptr);
+               want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
        }
 
        rcu_read_lock();