sched/events: Introduce util_est trace events
authorPatrick Bellasi <patrick.bellasi@arm.com>
Fri, 27 Oct 2017 15:12:51 +0000 (16:12 +0100)
committerLukasz Luba <l.luba@partner.samsung.com>
Mon, 10 Sep 2018 08:24:45 +0000 (10:24 +0200)
Change-Id: I65e294c454369cbc15a29370d8a13ce358a95c39
Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
include/trace/events/sched.h
kernel/sched/fair.c

index ee18c47fb7ace246f5e5eea3d6caf6acfdbeac17..79d3af7e627eaa7ea5fe17a8a167361a5974d946 100644 (file)
@@ -713,6 +713,70 @@ TRACE_EVENT(sched_load_tg,
                  __entry->load)
 );
 #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * Tracepoint for tasks' estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_task,
+
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                     )
+               __field( int,           cpu                     )
+               __field( unsigned long, util_avg                )
+               __field( unsigned long, est_last                )
+               __field( unsigned long, est_ewma                )
+
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->util_avg               = avg->util_avg;
+               __entry->est_last               = tsk->util_est.last;
+               __entry->est_ewma               = tsk->util_est.ewma;
+       ),
+
+       TP_printk("comm=%s pid=%d cpu=%d util_avg=%lu est_ewma=%lu est_last=%lu",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->est_ewma,
+                 __entry->est_last)
+);
+
+/*
+ * Tracepoint for root cfs_rq's estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_cpu,
+
+       TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cpu, cfs_rq),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu                     )
+               __field( unsigned long, util_avg                )
+               __field( unsigned long, util_est_runnable       )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu                    = cpu;
+               __entry->util_avg               = cfs_rq->avg.util_avg;
+               __entry->util_est_runnable      = cfs_rq->util_est_runnable;
+       ),
+
+       TP_printk("cpu=%d util_avg=%lu util_runnable=%lu",
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->util_est_runnable)
+);
 #endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
index 062b1ecd916d105abc611b059fb29acb71e6fcb9..cea6df0949a8d91fc00d8e526c88c2da3543ffb7 100644 (file)
@@ -3302,6 +3302,8 @@ __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
 static int
 __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+       struct task_struct *tsk;
+
        if (entity_is_task(se))
                se->runnable_weight = se->load.weight;
 
@@ -3312,6 +3314,15 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit
 
                trace_sched_load_se(se);
 
+               /* Trace utilization only for actual tasks */
+               if (entity_is_task(se)) {
+                       tsk = task_of(se);
+                       trace_sched_util_est_task(tsk, &se->avg);
+                       /* Trace utilization only for top level CFS RQ */
+                       cfs_rq = &(task_rq(tsk)->cfs);
+                       trace_sched_util_est_cpu(cpu, cfs_rq);
+               }
+
                return 1;
        }
 
@@ -5206,6 +5217,10 @@ static inline void util_est_enqueue(struct task_struct *p)
 
        /* Update root cfs_rq's estimated utilization */
        cfs_rq->util_est_runnable += task_util_est(p);
+
+       /* Update plots for Task and CPU estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
 }
 
 /*
@@ -5306,6 +5321,8 @@ static inline void util_est_dequeue(struct task_struct *p, int flags)
                cfs_rq->util_est_runnable = 0;
        }
 
+       /* Update plots for CPU's estimated utilization */
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
        /*
         * Skip update of task's estimated utilization when the task has not
         * yet completed an activation, e.g. being migrated.
@@ -5345,6 +5362,9 @@ static inline void util_est_dequeue(struct task_struct *p, int flags)
                ewma = util_last;
        }
        p->util_est.ewma = ewma;
+
+       /* Update plots for Task's estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
 }
 
 static void set_next_buddy(struct sched_entity *se);