x86/percpu, sched/fair: Avoid local_clock()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 27 Feb 2019 09:27:58 +0000 (10:27 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 17 Jun 2019 10:43:43 +0000 (12:43 +0200)
Nadav reported that code-gen changed because of the this_cpu_*()
constraints, avoid this for select_idle_cpu() because that runs with
preemption (and IRQs) disabled anyway.

Reported-by: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index f35930f..8591529 100644 (file)
@@ -6189,6 +6189,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
        u64 time, cost;
        s64 delta;
        int cpu, nr = INT_MAX;
+       int this = smp_processor_id();
 
        this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
        if (!this_sd)
@@ -6212,7 +6213,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
                        nr = 4;
        }
 
-       time = local_clock();
+       time = cpu_clock(this);
 
        for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
                if (!--nr)
@@ -6223,7 +6224,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
                        break;
        }
 
-       time = local_clock() - time;
+       time = cpu_clock(this) - time;
        cost = this_sd->avg_scan_cost;
        delta = (s64)(time - cost) / 8;
        this_sd->avg_scan_cost += delta;