sched/fair: Add cfs bandwidth burst statistics
authorHuaixin Chang <changhuaixin@linux.alibaba.com>
Mon, 30 Aug 2021 03:22:14 +0000 (11:22 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 5 Oct 2021 13:51:40 +0000 (15:51 +0200)
Two new statistics are introduced to show the internal of burst feature
and explain why burst helps or not.

nr_bursts:  number of periods bandwidth burst occurs
burst_time: cumulative wall-time (in nanoseconds) that any cpus has
    used above quota in respective periods

Co-developed-by: Shanpei Chen <shanpeic@linux.alibaba.com>
Signed-off-by: Shanpei Chen <shanpeic@linux.alibaba.com>
Co-developed-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Signed-off-by: Huaixin Chang <changhuaixin@linux.alibaba.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20210830032215.16302-2-changhuaixin@linux.alibaba.com
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index f963c81..ccb604a 100644 (file)
@@ -10406,6 +10406,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
                seq_printf(sf, "wait_sum %llu\n", ws);
        }
 
+       seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
+       seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
+
        return 0;
 }
 #endif /* CONFIG_CFS_BANDWIDTH */
@@ -10521,16 +10524,20 @@ static int cpu_extra_stat_show(struct seq_file *sf,
        {
                struct task_group *tg = css_tg(css);
                struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
-               u64 throttled_usec;
+               u64 throttled_usec, burst_usec;
 
                throttled_usec = cfs_b->throttled_time;
                do_div(throttled_usec, NSEC_PER_USEC);
+               burst_usec = cfs_b->burst_time;
+               do_div(burst_usec, NSEC_PER_USEC);
 
                seq_printf(sf, "nr_periods %d\n"
                           "nr_throttled %d\n"
-                          "throttled_usec %llu\n",
+                          "throttled_usec %llu\n"
+                          "nr_bursts %d\n"
+                          "burst_usec %llu\n",
                           cfs_b->nr_periods, cfs_b->nr_throttled,
-                          throttled_usec);
+                          throttled_usec, cfs_b->nr_burst, burst_usec);
        }
 #endif
        return 0;
index 5457c80..fd41abe 100644 (file)
@@ -4715,11 +4715,20 @@ static inline u64 sched_cfs_bandwidth_slice(void)
  */
 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
 {
+       s64 runtime;
+
        if (unlikely(cfs_b->quota == RUNTIME_INF))
                return;
 
        cfs_b->runtime += cfs_b->quota;
+       runtime = cfs_b->runtime_snap - cfs_b->runtime;
+       if (runtime > 0) {
+               cfs_b->burst_time += runtime;
+               cfs_b->nr_burst++;
+       }
+
        cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
+       cfs_b->runtime_snap = cfs_b->runtime;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
index 15a8895..8712fc4 100644 (file)
@@ -369,6 +369,7 @@ struct cfs_bandwidth {
        u64                     quota;
        u64                     runtime;
        u64                     burst;
+       u64                     runtime_snap;
        s64                     hierarchical_quota;
 
        u8                      idle;
@@ -381,7 +382,9 @@ struct cfs_bandwidth {
        /* Statistics: */
        int                     nr_periods;
        int                     nr_throttled;
+       int                     nr_burst;
        u64                     throttled_time;
+       u64                     burst_time;
 #endif
 };