__entry->load)
);
#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * Tracepoint for tasks' estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_task,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, util_avg )
+ __field( unsigned long, est_last )
+ __field( unsigned long, est_ewma )
+
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->util_avg = avg->util_avg;
+ __entry->est_last = tsk->util_est.last;
+ __entry->est_ewma = tsk->util_est.ewma;
+ ),
+
+ TP_printk("comm=%s pid=%d cpu=%d util_avg=%lu est_ewma=%lu est_last=%lu",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->util_avg,
+ __entry->est_ewma,
+ __entry->est_last)
+);
+
+/*
+ * Tracepoint for root cfs_rq's estimated utilization.
+ */
+TRACE_EVENT(sched_util_est_cpu,
+
+ TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+ TP_ARGS(cpu, cfs_rq),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, util_avg )
+ __field( unsigned long, util_est_runnable )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util_avg = cfs_rq->avg.util_avg;
+ __entry->util_est_runnable = cfs_rq->util_est_runnable;
+ ),
+
+ TP_printk("cpu=%d util_avg=%lu util_runnable=%lu",
+ __entry->cpu,
+ __entry->util_avg,
+ __entry->util_est_runnable)
+);
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */
static int
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ struct task_struct *tsk;
+
if (entity_is_task(se))
se->runnable_weight = se->load.weight;
trace_sched_load_se(se);
+ /* Trace utilization only for actual tasks */
+ if (entity_is_task(se)) {
+ tsk = task_of(se);
+ trace_sched_util_est_task(tsk, &se->avg);
+ /* Trace utilization only for top level CFS RQ */
+ cfs_rq = &(task_rq(tsk)->cfs);
+ trace_sched_util_est_cpu(cpu, cfs_rq);
+ }
+
return 1;
}
/* Update root cfs_rq's estimated utilization */
cfs_rq->util_est_runnable += task_util_est(p);
+
+ /* Update plots for Task and CPU estimated utilization */
+ trace_sched_util_est_task(p, &p->se.avg);
+ trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
/*
cfs_rq->util_est_runnable = 0;
}
+ /* Update plots for CPU's estimated utilization */
+ trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
ewma = util_last;
}
p->util_est.ewma = ewma;
+
+ /* Update plots for Task's estimated utilization */
+ trace_sched_util_est_task(p, &p->se.avg);
}
static void set_next_buddy(struct sched_entity *se);