sched/core: Optimize ttwu_stat()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 16 Jan 2018 19:51:06 +0000 (20:51 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Feb 2018 09:20:31 +0000 (10:20 +0100)
The whole of ttwu_stat() is guarded by a single schedstat_enabled(),
there is absolutely no point in then issuing another static_branch for
every single schedstat_inc() in there.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/stats.h

index ee420d7..b40540e 100644 (file)
@@ -1630,16 +1630,16 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 
 #ifdef CONFIG_SMP
        if (cpu == rq->cpu) {
-               schedstat_inc(rq->ttwu_local);
-               schedstat_inc(p->se.statistics.nr_wakeups_local);
+               __schedstat_inc(rq->ttwu_local);
+               __schedstat_inc(p->se.statistics.nr_wakeups_local);
        } else {
                struct sched_domain *sd;
 
-               schedstat_inc(p->se.statistics.nr_wakeups_remote);
+               __schedstat_inc(p->se.statistics.nr_wakeups_remote);
                rcu_read_lock();
                for_each_domain(rq->cpu, sd) {
                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               schedstat_inc(sd->ttwu_wake_remote);
+                               __schedstat_inc(sd->ttwu_wake_remote);
                                break;
                        }
                }
@@ -1647,14 +1647,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
        }
 
        if (wake_flags & WF_MIGRATED)
-               schedstat_inc(p->se.statistics.nr_wakeups_migrate);
+               __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
 #endif /* CONFIG_SMP */
 
-       schedstat_inc(rq->ttwu_count);
-       schedstat_inc(p->se.statistics.nr_wakeups);
+       __schedstat_inc(rq->ttwu_count);
+       __schedstat_inc(p->se.statistics.nr_wakeups);
 
        if (wake_flags & WF_SYNC)
-               schedstat_inc(p->se.statistics.nr_wakeups_sync);
+               __schedstat_inc(p->se.statistics.nr_wakeups_sync);
 }
 
 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
index baf500d..61500bb 100644 (file)
@@ -31,6 +31,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
                rq->rq_sched_info.run_delay += delta;
 }
 #define schedstat_enabled()            static_branch_unlikely(&sched_schedstats)
+#define __schedstat_inc(var)           do { var++; } while (0)
 #define schedstat_inc(var)             do { if (schedstat_enabled()) { var++; } } while (0)
 #define schedstat_add(var, amt)                do { if (schedstat_enabled()) { var += (amt); } } while (0)
 #define schedstat_set(var, val)                do { if (schedstat_enabled()) { var = (val); } } while (0)
@@ -48,6 +49,7 @@ static inline void
 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 {}
 #define schedstat_enabled()            0
+#define __schedstat_inc(var)           do { } while (0)
 #define schedstat_inc(var)             do { } while (0)
 #define schedstat_add(var, amt)                do { } while (0)
 #define schedstat_set(var, val)                do { } while (0)