sched/events: Introduce task_group load tracking trace event
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Fri, 17 Mar 2017 21:23:35 +0000 (21:23 +0000)
committerLukasz Luba <l.luba@partner.samsung.com>
Mon, 10 Sep 2018 08:24:43 +0000 (10:24 +0200)
The trace event key load is mapped to:

 (1) load : cfs_rq->tg->load_avg

The cfs_rq owned by the task_group is used as the only parameter for the
trace event because it has a reference to the taskgroup and the cpu.
Using the taskgroup as a parameter instead would require the cpu as a
second parameter. A task_group is global and not per-cpu data. The cpu
key only tells on which cpu the value was gathered.

The following list shows examples of the key=value pairs for:

 (1) a task group:

     cpu=1 path=/tg1/tg11/tg111 load=517

 (2) an autogroup:

     cpu=1 path=/autogroup-10 load=1050

We don't maintain a load signal for a root task group.

The trace event is only defined if cfs group scheduling support
(CONFIG_FAIR_GROUP_SCHED) is enabled.

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
include/trace/events/sched.h
kernel/sched/fair.c

index 9342a91f3e658329546303036b9c37d45af3599e..ee18c47fb7ace246f5e5eea3d6caf6acfdbeac17 100644 (file)
@@ -684,6 +684,35 @@ TRACE_EVENT(sched_load_se,
                  __entry->cpu, __get_str(path), __entry->comm, __entry->pid,
                  __entry->load, __entry->rbl_load, __entry->util)
 );
+
+/*
+ * Tracepoint for task_group load tracking:
+ */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+TRACE_EVENT(sched_load_tg,
+
+       TP_PROTO(struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cfs_rq),
+
+       TP_STRUCT__entry(
+               __field(        int,    cpu                             )
+               __dynamic_array(char,   path,
+                               __trace_sched_path(cfs_rq, NULL, 0)     )
+               __field(        long,   load                            )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu    = cfs_rq->rq->cpu;
+               __trace_sched_path(cfs_rq, __get_dynamic_array(path),
+                                  __get_dynamic_array_len(path));
+               __entry->load   = atomic_long_read(&cfs_rq->tg->load_avg);
+       ),
+
+       TP_printk("cpu=%d path=%s load=%ld", __entry->cpu, __get_str(path),
+                 __entry->load)
+);
+#endif /* CONFIG_FAIR_GROUP_SCHED */
 #endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
index 5dc1c167452a913c69de50eae113e32b26f399a7..062b1ecd916d105abc611b059fb29acb71e6fcb9 100644 (file)
@@ -3365,6 +3365,8 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
        if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
                atomic_long_add(delta, &cfs_rq->tg->load_avg);
                cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
+
+               trace_sched_load_tg(cfs_rq);
        }
 }