From: Dietmar Eggemann Date: Fri, 17 Mar 2017 20:27:06 +0000 (+0000) Subject: sched/events: Introduce cfs_rq load tracking trace event X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=78af88e85c953ba617442ae306ade9c02cd59038;p=platform%2Fkernel%2Flinux-exynos.git sched/events: Introduce cfs_rq load tracking trace event The following trace event keys are mapped to: (1) load : cfs_rq->avg.load_avg (2) rbl_load : cfs_rq->avg.runnable_load_avg (2) util : cfs_rq->avg.util_avg To let this trace event work for configurations w/ and w/o group scheduling support for cfs (CONFIG_FAIR_GROUP_SCHED) the following special handling is necessary for a non-existent key=value pair: path = "(null)" : In case of !CONFIG_FAIR_GROUP_SCHED. The following list shows examples of the key=value pairs in different configurations for: (1) a root task_group: cpu=4 path=/ load=6 rbl_load=6 util=331 (2) a task_group: cpu=1 path=/tg1/tg11/tg111 load=538 rbl_load=538 util=522 (3) an autogroup: cpu=3 path=/autogroup-18 load=997 rbl_load=997 util=517 (4) w/o CONFIG_FAIR_GROUP_SCHED: cpu=0 path=(null) load=314 rbl_load=314 util=289 The trace event is only defined for CONFIG_SMP. The helper function __trace_sched_path() can be used to get the length parameter of the dynamic array (path == NULL) and to copy the path into it (path != NULL). Signed-off-by: Dietmar Eggemann Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Steven Rostedt --- diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 0be866c91f62..8c79c661772f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -572,6 +572,72 @@ TRACE_EVENT(sched_wake_idle_without_ipi, TP_printk("cpu=%d", __entry->cpu) ); + +#ifdef CONFIG_SMP +#ifdef CREATE_TRACE_POINTS +static inline +int __trace_sched_cpu(struct cfs_rq *cfs_rq) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + struct rq *rq = cfs_rq->rq; +#else + struct rq *rq = container_of(cfs_rq, struct rq, cfs); +#endif + return cpu_of(rq); +} + +static inline +int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + int l = path ? len : 0; + + if (task_group_is_autogroup(cfs_rq->tg)) + return autogroup_path(cfs_rq->tg, path, l) + 1; + else + return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1; +#else + if (path) + strcpy(path, "(null)"); + + return strlen("(null)"); +#endif +} + +#endif /* CREATE_TRACE_POINTS */ + +/* + * Tracepoint for cfs_rq load tracking: + */ +TRACE_EVENT(sched_load_cfs_rq, + + TP_PROTO(struct cfs_rq *cfs_rq), + + TP_ARGS(cfs_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __dynamic_array(char, path, + __trace_sched_path(cfs_rq, NULL, 0) ) + __field( unsigned long, load ) + __field( unsigned long, rbl_load ) + __field( unsigned long, util ) + ), + + TP_fast_assign( + __entry->cpu = __trace_sched_cpu(cfs_rq); + __trace_sched_path(cfs_rq, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + __entry->load = cfs_rq->avg.load_avg; + __entry->rbl_load = cfs_rq->avg.runnable_load_avg; + __entry->util = cfs_rq->avg.util_avg; + ), + + TP_printk("cpu=%d path=%s load=%lu rbl_load=%lu util=%lu", + __entry->cpu, __get_str(path), __entry->load, + __entry->rbl_load,__entry->util) +); +#endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 37c8047a7256..8a4abcc469a4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3252,6 +3252,8 @@ static inline int propagate_entity_load_avg(struct sched_entity *se) update_tg_cfs_util(cfs_rq, se, gcfs_rq); update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); + trace_sched_load_cfs_rq(cfs_rq); + return 1; } @@ -3403,6 +3405,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); cfs_rq_util_change(cfs_rq, flags); + + trace_sched_load_cfs_rq(cfs_rq); } /** @@ -3422,6 +3426,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); cfs_rq_util_change(cfs_rq, 0); + + trace_sched_load_cfs_rq(cfs_rq); } /* diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 35475c0c5419..f042ee0f7b08 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -29,6 +29,8 @@ #include "sched-pelt.h" #include "pelt.h" +#include + /* * Approximate: * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) @@ -304,6 +306,9 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) cfs_rq->curr != NULL)) { ___update_load_avg(&cfs_rq->avg, 1, 1); + + trace_sched_load_cfs_rq(cfs_rq); + return 1; }