sched/events: Introduce util_est trace events
authorPatrick Bellasi <patrick.bellasi@arm.com>
Fri, 27 Oct 2017 15:12:51 +0000 (16:12 +0100)
committerDouglas RAILLARD <douglas.raillard@arm.com>
Tue, 14 Aug 2018 15:32:36 +0000 (16:32 +0100)
Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
---
Change-Id: I65e294c454369cbc15a29370d8a13ce358a95c39

include/trace/events/sched.h
kernel/sched/fair.c

index 566d66d26643ec9bcd5839b58d3c8c02913f9181..1b73c549bb906f2200c5cba8391d81ff89d97543 100644 (file)
@@ -715,6 +715,70 @@ TRACE_EVENT(sched_load_tg,
                  __entry->load)
 );
 #endif /* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * Tracepoint for tasks' estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_task,
+
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                     )
+               __field( int,           cpu                     )
+               __field( unsigned int,  util_avg                )
+               __field( unsigned int,  est_enqueued            )
+               __field( unsigned int,  est_ewma                )
+
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->util_avg               = avg->util_avg;
+               __entry->est_enqueued           = avg->util_est.enqueued;
+               __entry->est_ewma               = avg->util_est.ewma;
+       ),
+
+       TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->est_ewma,
+                 __entry->est_enqueued)
+);
+
+/*
+ * Tracepoint for root cfs_rq's estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_cpu,
+
+       TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cpu, cfs_rq),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu                     )
+               __field( unsigned int,  util_avg                )
+               __field( unsigned int,  util_est_enqueued       )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu                    = cpu;
+               __entry->util_avg               = cfs_rq->avg.util_avg;
+               __entry->util_est_enqueued      = cfs_rq->avg.util_est.enqueued;
+       ),
+
+       TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
+                 __entry->cpu,
+                 __entry->util_avg,
+                 __entry->util_est_enqueued)
+);
 #endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
index 92d6a55793024502978cca6195c2e3e9821628fe..20146270020dbeca3a37914d70584b373bc16983 100644 (file)
@@ -3578,6 +3578,10 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
        enqueued  = cfs_rq->avg.util_est.enqueued;
        enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+       /* Update plots for Task and CPU estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
 }
 
 /*
@@ -3608,6 +3612,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
                             (_task_util_est(p) | UTIL_AVG_UNCHANGED));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
+       /* Update plots for CPU's estimated utilization */
+       trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
        /*
         * Skip update of task's estimated utilization when the task has not
         * yet completed an activation, e.g. being migrated.
@@ -3653,6 +3660,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        ue.ewma  += last_ewma_diff;
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
        WRITE_ONCE(p->se.avg.util_est, ue);
+
+       /* Update plots for Task's estimated utilization */
+       trace_sched_util_est_task(p, &p->se.avg);
 }
 
 static inline int task_fits_capacity(struct task_struct *p, long capacity)