sched/core: Optimize update_stats_*()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 23 Jan 2018 19:34:30 +0000 (20:34 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Feb 2018 09:20:32 +0000 (10:20 +0100)
These functions are already gated by schedstats_enabled(), there is no
point in then issuing another static_branch for every individual
update in them.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/stats.h

index 7b65359..a6b8157 100644 (file)
@@ -871,7 +871,7 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
            likely(wait_start > prev_wait_start))
                wait_start -= prev_wait_start;
 
-       schedstat_set(se->statistics.wait_start, wait_start);
+       __schedstat_set(se->statistics.wait_start, wait_start);
 }
 
 static inline void
@@ -893,17 +893,17 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
                         * time stamp can be adjusted to accumulate wait time
                         * prior to migration.
                         */
-                       schedstat_set(se->statistics.wait_start, delta);
+                       __schedstat_set(se->statistics.wait_start, delta);
                        return;
                }
                trace_sched_stat_wait(p, delta);
        }
 
-       schedstat_set(se->statistics.wait_max,
+       __schedstat_set(se->statistics.wait_max,
                      max(schedstat_val(se->statistics.wait_max), delta));
-       schedstat_inc(se->statistics.wait_count);
-       schedstat_add(se->statistics.wait_sum, delta);
-       schedstat_set(se->statistics.wait_start, 0);
+       __schedstat_inc(se->statistics.wait_count);
+       __schedstat_add(se->statistics.wait_sum, delta);
+       __schedstat_set(se->statistics.wait_start, 0);
 }
 
 static inline void
@@ -928,10 +928,10 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                        delta = 0;
 
                if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
-                       schedstat_set(se->statistics.sleep_max, delta);
+                       __schedstat_set(se->statistics.sleep_max, delta);
 
-               schedstat_set(se->statistics.sleep_start, 0);
-               schedstat_add(se->statistics.sum_sleep_runtime, delta);
+               __schedstat_set(se->statistics.sleep_start, 0);
+               __schedstat_add(se->statistics.sum_sleep_runtime, delta);
 
                if (tsk) {
                        account_scheduler_latency(tsk, delta >> 10, 1);
@@ -945,15 +945,15 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                        delta = 0;
 
                if (unlikely(delta > schedstat_val(se->statistics.block_max)))
-                       schedstat_set(se->statistics.block_max, delta);
+                       __schedstat_set(se->statistics.block_max, delta);
 
-               schedstat_set(se->statistics.block_start, 0);
-               schedstat_add(se->statistics.sum_sleep_runtime, delta);
+               __schedstat_set(se->statistics.block_start, 0);
+               __schedstat_add(se->statistics.sum_sleep_runtime, delta);
 
                if (tsk) {
                        if (tsk->in_iowait) {
-                               schedstat_add(se->statistics.iowait_sum, delta);
-                               schedstat_inc(se->statistics.iowait_count);
+                               __schedstat_add(se->statistics.iowait_sum, delta);
+                               __schedstat_inc(se->statistics.iowait_count);
                                trace_sched_stat_iowait(tsk, delta);
                        }
 
@@ -1012,10 +1012,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                struct task_struct *tsk = task_of(se);
 
                if (tsk->state & TASK_INTERRUPTIBLE)
-                       schedstat_set(se->statistics.sleep_start,
+                       __schedstat_set(se->statistics.sleep_start,
                                      rq_clock(rq_of(cfs_rq)));
                if (tsk->state & TASK_UNINTERRUPTIBLE)
-                       schedstat_set(se->statistics.block_start,
+                       __schedstat_set(se->statistics.block_start,
                                      rq_clock(rq_of(cfs_rq)));
        }
 }
index 61500bb..8e7b58d 100644 (file)
@@ -33,7 +33,9 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 #define schedstat_enabled()            static_branch_unlikely(&sched_schedstats)
 #define __schedstat_inc(var)           do { var++; } while (0)
 #define schedstat_inc(var)             do { if (schedstat_enabled()) { var++; } } while (0)
+#define __schedstat_add(var, amt)      do { var += (amt); } while (0)
 #define schedstat_add(var, amt)                do { if (schedstat_enabled()) { var += (amt); } } while (0)
+#define __schedstat_set(var, val)              do { var = (val); } while (0)
 #define schedstat_set(var, val)                do { if (schedstat_enabled()) { var = (val); } } while (0)
 #define schedstat_val(var)             (var)
 #define schedstat_val_or_zero(var)     ((schedstat_enabled()) ? (var) : 0)
@@ -51,7 +53,9 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 #define schedstat_enabled()            0
 #define __schedstat_inc(var)           do { } while (0)
 #define schedstat_inc(var)             do { } while (0)
+#define __schedstat_add(var, amt)      do { } while (0)
 #define schedstat_add(var, amt)                do { } while (0)
+#define __schedstat_set(var, val)      do { } while (0)
 #define schedstat_set(var, val)                do { } while (0)
 #define schedstat_val(var)             0
 #define schedstat_val_or_zero(var)     0