5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14 #include "util/thread_map.h"
15 #include "util/color.h"
16 #include "util/stat.h"
17 #include "util/callchain.h"
18 #include "util/time-utils.h"
20 #include <subcmd/parse-options.h>
21 #include "util/trace-event.h"
23 #include "util/debug.h"
25 #include <linux/log2.h>
26 #include <sys/prctl.h>
27 #include <sys/resource.h>
29 #include <semaphore.h>
32 #include <api/fs/fs.h>
33 #include <linux/time64.h>
35 #define PR_SET_NAME 15 /* Set process name */
39 #define MAX_PID 1024000
48 unsigned long nr_events;
49 unsigned long curr_event;
50 struct sched_atom **atoms;
61 enum sched_event_type {
65 SCHED_EVENT_MIGRATION,
69 enum sched_event_type type;
75 struct task_desc *wakee;
78 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
88 struct list_head list;
89 enum thread_state state;
97 struct list_head work_list;
98 struct thread *thread;
108 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
112 struct trace_sched_handler {
113 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
114 struct perf_sample *sample, struct machine *machine);
116 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
117 struct perf_sample *sample, struct machine *machine);
119 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
120 struct perf_sample *sample, struct machine *machine);
122 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
123 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
124 struct machine *machine);
126 int (*migrate_task_event)(struct perf_sched *sched,
127 struct perf_evsel *evsel,
128 struct perf_sample *sample,
129 struct machine *machine);
132 #define COLOR_PIDS PERF_COLOR_BLUE
133 #define COLOR_CPUS PERF_COLOR_BG_RED
135 struct perf_sched_map {
136 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
139 struct thread_map *color_pids;
140 const char *color_pids_str;
141 struct cpu_map *color_cpus;
142 const char *color_cpus_str;
143 struct cpu_map *cpus;
144 const char *cpus_str;
148 struct perf_tool tool;
149 const char *sort_order;
150 unsigned long nr_tasks;
151 struct task_desc **pid_to_task;
152 struct task_desc **tasks;
153 const struct trace_sched_handler *tp_handler;
154 pthread_mutex_t start_work_mutex;
155 pthread_mutex_t work_done_wait_mutex;
158 * Track the current task - that way we can know whether there's any
159 * weird events, such as a task being switched away that is not current.
162 u32 curr_pid[MAX_CPUS];
163 struct thread *curr_thread[MAX_CPUS];
164 char next_shortname1;
165 char next_shortname2;
166 unsigned int replay_repeat;
167 unsigned long nr_run_events;
168 unsigned long nr_sleep_events;
169 unsigned long nr_wakeup_events;
170 unsigned long nr_sleep_corrections;
171 unsigned long nr_run_events_optimized;
172 unsigned long targetless_wakeups;
173 unsigned long multitarget_wakeups;
174 unsigned long nr_runs;
175 unsigned long nr_timestamps;
176 unsigned long nr_unordered_timestamps;
177 unsigned long nr_context_switch_bugs;
178 unsigned long nr_events;
179 unsigned long nr_lost_chunks;
180 unsigned long nr_lost_events;
181 u64 run_measurement_overhead;
182 u64 sleep_measurement_overhead;
185 u64 runavg_cpu_usage;
186 u64 parent_cpu_usage;
187 u64 runavg_parent_cpu_usage;
193 u64 cpu_last_switched[MAX_CPUS];
194 struct rb_root atom_root, sorted_atom_root, merged_atom_root;
195 struct list_head sort_list, cmp_pid;
198 struct perf_sched_map map;
200 /* options for timehist command */
205 unsigned int max_stack;
206 bool show_cpu_visual;
208 bool show_migrations;
210 const char *time_str;
211 struct perf_time_interval ptime;
214 /* per thread run time data */
215 struct thread_runtime {
216 u64 last_time; /* time of previous sched in/out event */
217 u64 dt_run; /* run time */
218 u64 dt_wait; /* time between CPU access (off cpu) */
219 u64 dt_delay; /* time between wakeup and sched-in */
220 u64 ready_to_run; /* time of wakeup */
222 struct stats run_stats;
228 /* per event run time data */
229 struct evsel_runtime {
230 u64 *last_time; /* time this event was last seen per cpu */
231 u32 ncpu; /* highest cpu slot allocated */
234 /* per cpu idle time data */
235 struct idle_thread_runtime {
236 struct thread_runtime tr;
237 struct thread *last_thread;
238 struct rb_root sorted_root;
239 struct callchain_root callchain;
240 struct callchain_cursor cursor;
243 /* track idle times per cpu */
244 static struct thread **idle_threads;
245 static int idle_max_cpu;
246 static char idle_comm[] = "<idle>";
248 static u64 get_nsecs(void)
252 clock_gettime(CLOCK_MONOTONIC, &ts);
254 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
257 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
259 u64 T0 = get_nsecs(), T1;
263 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
266 static void sleep_nsecs(u64 nsecs)
270 ts.tv_nsec = nsecs % 999999999;
271 ts.tv_sec = nsecs / 999999999;
273 nanosleep(&ts, NULL);
276 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
278 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
281 for (i = 0; i < 10; i++) {
283 burn_nsecs(sched, 0);
286 min_delta = min(min_delta, delta);
288 sched->run_measurement_overhead = min_delta;
290 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
293 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
295 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
298 for (i = 0; i < 10; i++) {
303 min_delta = min(min_delta, delta);
306 sched->sleep_measurement_overhead = min_delta;
308 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
311 static struct sched_atom *
312 get_new_event(struct task_desc *task, u64 timestamp)
314 struct sched_atom *event = zalloc(sizeof(*event));
315 unsigned long idx = task->nr_events;
318 event->timestamp = timestamp;
322 size = sizeof(struct sched_atom *) * task->nr_events;
323 task->atoms = realloc(task->atoms, size);
324 BUG_ON(!task->atoms);
326 task->atoms[idx] = event;
331 static struct sched_atom *last_event(struct task_desc *task)
333 if (!task->nr_events)
336 return task->atoms[task->nr_events - 1];
339 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
340 u64 timestamp, u64 duration)
342 struct sched_atom *event, *curr_event = last_event(task);
345 * optimize an existing RUN event by merging this one
348 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
349 sched->nr_run_events_optimized++;
350 curr_event->duration += duration;
354 event = get_new_event(task, timestamp);
356 event->type = SCHED_EVENT_RUN;
357 event->duration = duration;
359 sched->nr_run_events++;
362 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
363 u64 timestamp, struct task_desc *wakee)
365 struct sched_atom *event, *wakee_event;
367 event = get_new_event(task, timestamp);
368 event->type = SCHED_EVENT_WAKEUP;
369 event->wakee = wakee;
371 wakee_event = last_event(wakee);
372 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
373 sched->targetless_wakeups++;
376 if (wakee_event->wait_sem) {
377 sched->multitarget_wakeups++;
381 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
382 sem_init(wakee_event->wait_sem, 0, 0);
383 wakee_event->specific_wait = 1;
384 event->wait_sem = wakee_event->wait_sem;
386 sched->nr_wakeup_events++;
389 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
390 u64 timestamp, u64 task_state __maybe_unused)
392 struct sched_atom *event = get_new_event(task, timestamp);
394 event->type = SCHED_EVENT_SLEEP;
396 sched->nr_sleep_events++;
399 static struct task_desc *register_pid(struct perf_sched *sched,
400 unsigned long pid, const char *comm)
402 struct task_desc *task;
405 if (sched->pid_to_task == NULL) {
406 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
408 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
410 if (pid >= (unsigned long)pid_max) {
411 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
412 sizeof(struct task_desc *))) == NULL);
413 while (pid >= (unsigned long)pid_max)
414 sched->pid_to_task[pid_max++] = NULL;
417 task = sched->pid_to_task[pid];
422 task = zalloc(sizeof(*task));
424 task->nr = sched->nr_tasks;
425 strcpy(task->comm, comm);
427 * every task starts in sleeping state - this gets ignored
428 * if there's no wakeup pointing to this sleep state:
430 add_sched_event_sleep(sched, task, 0, 0);
432 sched->pid_to_task[pid] = task;
434 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
435 BUG_ON(!sched->tasks);
436 sched->tasks[task->nr] = task;
439 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
445 static void print_task_traces(struct perf_sched *sched)
447 struct task_desc *task;
450 for (i = 0; i < sched->nr_tasks; i++) {
451 task = sched->tasks[i];
452 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
453 task->nr, task->comm, task->pid, task->nr_events);
457 static void add_cross_task_wakeups(struct perf_sched *sched)
459 struct task_desc *task1, *task2;
462 for (i = 0; i < sched->nr_tasks; i++) {
463 task1 = sched->tasks[i];
465 if (j == sched->nr_tasks)
467 task2 = sched->tasks[j];
468 add_sched_event_wakeup(sched, task1, 0, task2);
472 static void perf_sched__process_event(struct perf_sched *sched,
473 struct sched_atom *atom)
477 switch (atom->type) {
478 case SCHED_EVENT_RUN:
479 burn_nsecs(sched, atom->duration);
481 case SCHED_EVENT_SLEEP:
483 ret = sem_wait(atom->wait_sem);
486 case SCHED_EVENT_WAKEUP:
488 ret = sem_post(atom->wait_sem);
491 case SCHED_EVENT_MIGRATION:
498 static u64 get_cpu_usage_nsec_parent(void)
504 err = getrusage(RUSAGE_SELF, &ru);
507 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
508 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
513 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
515 struct perf_event_attr attr;
516 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
519 bool need_privilege = false;
521 memset(&attr, 0, sizeof(attr));
523 attr.type = PERF_TYPE_SOFTWARE;
524 attr.config = PERF_COUNT_SW_TASK_CLOCK;
527 fd = sys_perf_event_open(&attr, 0, -1, -1,
528 perf_event_open_cloexec_flag());
531 if (errno == EMFILE) {
533 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
534 limit.rlim_cur += sched->nr_tasks - cur_task;
535 if (limit.rlim_cur > limit.rlim_max) {
536 limit.rlim_max = limit.rlim_cur;
537 need_privilege = true;
539 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
540 if (need_privilege && errno == EPERM)
541 strcpy(info, "Need privilege\n");
545 strcpy(info, "Have a try with -f option\n");
547 pr_err("Error: sys_perf_event_open() syscall returned "
548 "with %d (%s)\n%s", fd,
549 str_error_r(errno, sbuf, sizeof(sbuf)), info);
555 static u64 get_cpu_usage_nsec_self(int fd)
560 ret = read(fd, &runtime, sizeof(runtime));
561 BUG_ON(ret != sizeof(runtime));
566 struct sched_thread_parms {
567 struct task_desc *task;
568 struct perf_sched *sched;
572 static void *thread_func(void *ctx)
574 struct sched_thread_parms *parms = ctx;
575 struct task_desc *this_task = parms->task;
576 struct perf_sched *sched = parms->sched;
577 u64 cpu_usage_0, cpu_usage_1;
578 unsigned long i, ret;
584 sprintf(comm2, ":%s", this_task->comm);
585 prctl(PR_SET_NAME, comm2);
589 ret = sem_post(&this_task->ready_for_work);
591 ret = pthread_mutex_lock(&sched->start_work_mutex);
593 ret = pthread_mutex_unlock(&sched->start_work_mutex);
596 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
598 for (i = 0; i < this_task->nr_events; i++) {
599 this_task->curr_event = i;
600 perf_sched__process_event(sched, this_task->atoms[i]);
603 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
604 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
605 ret = sem_post(&this_task->work_done_sem);
608 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
610 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
616 static void create_tasks(struct perf_sched *sched)
618 struct task_desc *task;
623 err = pthread_attr_init(&attr);
625 err = pthread_attr_setstacksize(&attr,
626 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
628 err = pthread_mutex_lock(&sched->start_work_mutex);
630 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
632 for (i = 0; i < sched->nr_tasks; i++) {
633 struct sched_thread_parms *parms = malloc(sizeof(*parms));
634 BUG_ON(parms == NULL);
635 parms->task = task = sched->tasks[i];
636 parms->sched = sched;
637 parms->fd = self_open_counters(sched, i);
638 sem_init(&task->sleep_sem, 0, 0);
639 sem_init(&task->ready_for_work, 0, 0);
640 sem_init(&task->work_done_sem, 0, 0);
641 task->curr_event = 0;
642 err = pthread_create(&task->thread, &attr, thread_func, parms);
647 static void wait_for_tasks(struct perf_sched *sched)
649 u64 cpu_usage_0, cpu_usage_1;
650 struct task_desc *task;
651 unsigned long i, ret;
653 sched->start_time = get_nsecs();
654 sched->cpu_usage = 0;
655 pthread_mutex_unlock(&sched->work_done_wait_mutex);
657 for (i = 0; i < sched->nr_tasks; i++) {
658 task = sched->tasks[i];
659 ret = sem_wait(&task->ready_for_work);
661 sem_init(&task->ready_for_work, 0, 0);
663 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
666 cpu_usage_0 = get_cpu_usage_nsec_parent();
668 pthread_mutex_unlock(&sched->start_work_mutex);
670 for (i = 0; i < sched->nr_tasks; i++) {
671 task = sched->tasks[i];
672 ret = sem_wait(&task->work_done_sem);
674 sem_init(&task->work_done_sem, 0, 0);
675 sched->cpu_usage += task->cpu_usage;
679 cpu_usage_1 = get_cpu_usage_nsec_parent();
680 if (!sched->runavg_cpu_usage)
681 sched->runavg_cpu_usage = sched->cpu_usage;
682 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
684 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
685 if (!sched->runavg_parent_cpu_usage)
686 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
687 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
688 sched->parent_cpu_usage)/sched->replay_repeat;
690 ret = pthread_mutex_lock(&sched->start_work_mutex);
693 for (i = 0; i < sched->nr_tasks; i++) {
694 task = sched->tasks[i];
695 sem_init(&task->sleep_sem, 0, 0);
696 task->curr_event = 0;
700 static void run_one_test(struct perf_sched *sched)
702 u64 T0, T1, delta, avg_delta, fluct;
705 wait_for_tasks(sched);
709 sched->sum_runtime += delta;
712 avg_delta = sched->sum_runtime / sched->nr_runs;
713 if (delta < avg_delta)
714 fluct = avg_delta - delta;
716 fluct = delta - avg_delta;
717 sched->sum_fluct += fluct;
719 sched->run_avg = delta;
720 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
722 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
724 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
726 printf("cpu: %0.2f / %0.2f",
727 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
731 * rusage statistics done by the parent, these are less
732 * accurate than the sched->sum_exec_runtime based statistics:
734 printf(" [%0.2f / %0.2f]",
735 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
736 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
741 if (sched->nr_sleep_corrections)
742 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
743 sched->nr_sleep_corrections = 0;
746 static void test_calibrations(struct perf_sched *sched)
751 burn_nsecs(sched, NSEC_PER_MSEC);
754 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
757 sleep_nsecs(NSEC_PER_MSEC);
760 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
764 replay_wakeup_event(struct perf_sched *sched,
765 struct perf_evsel *evsel, struct perf_sample *sample,
766 struct machine *machine __maybe_unused)
768 const char *comm = perf_evsel__strval(evsel, sample, "comm");
769 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
770 struct task_desc *waker, *wakee;
773 printf("sched_wakeup event %p\n", evsel);
775 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
778 waker = register_pid(sched, sample->tid, "<unknown>");
779 wakee = register_pid(sched, pid, comm);
781 add_sched_event_wakeup(sched, waker, sample->time, wakee);
785 static int replay_switch_event(struct perf_sched *sched,
786 struct perf_evsel *evsel,
787 struct perf_sample *sample,
788 struct machine *machine __maybe_unused)
790 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
791 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
792 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
793 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
794 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
795 struct task_desc *prev, __maybe_unused *next;
796 u64 timestamp0, timestamp = sample->time;
797 int cpu = sample->cpu;
801 printf("sched_switch event %p\n", evsel);
803 if (cpu >= MAX_CPUS || cpu < 0)
806 timestamp0 = sched->cpu_last_switched[cpu];
808 delta = timestamp - timestamp0;
813 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
817 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
818 prev_comm, prev_pid, next_comm, next_pid, delta);
820 prev = register_pid(sched, prev_pid, prev_comm);
821 next = register_pid(sched, next_pid, next_comm);
823 sched->cpu_last_switched[cpu] = timestamp;
825 add_sched_event_run(sched, prev, timestamp, delta);
826 add_sched_event_sleep(sched, prev, timestamp, prev_state);
831 static int replay_fork_event(struct perf_sched *sched,
832 union perf_event *event,
833 struct machine *machine)
835 struct thread *child, *parent;
837 child = machine__findnew_thread(machine, event->fork.pid,
839 parent = machine__findnew_thread(machine, event->fork.ppid,
842 if (child == NULL || parent == NULL) {
843 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
849 printf("fork event\n");
850 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
851 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
854 register_pid(sched, parent->tid, thread__comm_str(parent));
855 register_pid(sched, child->tid, thread__comm_str(child));
862 struct sort_dimension {
865 struct list_head list;
869 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
871 struct sort_dimension *sort;
874 BUG_ON(list_empty(list));
876 list_for_each_entry(sort, list, list) {
877 ret = sort->cmp(l, r);
885 static struct work_atoms *
886 thread_atoms_search(struct rb_root *root, struct thread *thread,
887 struct list_head *sort_list)
889 struct rb_node *node = root->rb_node;
890 struct work_atoms key = { .thread = thread };
893 struct work_atoms *atoms;
896 atoms = container_of(node, struct work_atoms, node);
898 cmp = thread_lat_cmp(sort_list, &key, atoms);
900 node = node->rb_left;
902 node = node->rb_right;
904 BUG_ON(thread != atoms->thread);
912 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
913 struct list_head *sort_list)
915 struct rb_node **new = &(root->rb_node), *parent = NULL;
918 struct work_atoms *this;
921 this = container_of(*new, struct work_atoms, node);
924 cmp = thread_lat_cmp(sort_list, data, this);
927 new = &((*new)->rb_left);
929 new = &((*new)->rb_right);
932 rb_link_node(&data->node, parent, new);
933 rb_insert_color(&data->node, root);
936 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
938 struct work_atoms *atoms = zalloc(sizeof(*atoms));
940 pr_err("No memory at %s\n", __func__);
944 atoms->thread = thread__get(thread);
945 INIT_LIST_HEAD(&atoms->work_list);
946 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
950 static char sched_out_state(u64 prev_state)
952 const char *str = TASK_STATE_TO_CHAR_STR;
954 return str[prev_state];
958 add_sched_out_event(struct work_atoms *atoms,
962 struct work_atom *atom = zalloc(sizeof(*atom));
964 pr_err("Non memory at %s", __func__);
968 atom->sched_out_time = timestamp;
970 if (run_state == 'R') {
971 atom->state = THREAD_WAIT_CPU;
972 atom->wake_up_time = atom->sched_out_time;
975 list_add_tail(&atom->list, &atoms->work_list);
980 add_runtime_event(struct work_atoms *atoms, u64 delta,
981 u64 timestamp __maybe_unused)
983 struct work_atom *atom;
985 BUG_ON(list_empty(&atoms->work_list));
987 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
989 atom->runtime += delta;
990 atoms->total_runtime += delta;
994 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
996 struct work_atom *atom;
999 if (list_empty(&atoms->work_list))
1002 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1004 if (atom->state != THREAD_WAIT_CPU)
1007 if (timestamp < atom->wake_up_time) {
1008 atom->state = THREAD_IGNORE;
1012 atom->state = THREAD_SCHED_IN;
1013 atom->sched_in_time = timestamp;
1015 delta = atom->sched_in_time - atom->wake_up_time;
1016 atoms->total_lat += delta;
1017 if (delta > atoms->max_lat) {
1018 atoms->max_lat = delta;
1019 atoms->max_lat_at = timestamp;
1024 static int latency_switch_event(struct perf_sched *sched,
1025 struct perf_evsel *evsel,
1026 struct perf_sample *sample,
1027 struct machine *machine)
1029 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1030 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1031 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1032 struct work_atoms *out_events, *in_events;
1033 struct thread *sched_out, *sched_in;
1034 u64 timestamp0, timestamp = sample->time;
1035 int cpu = sample->cpu, err = -1;
1038 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1040 timestamp0 = sched->cpu_last_switched[cpu];
1041 sched->cpu_last_switched[cpu] = timestamp;
1043 delta = timestamp - timestamp0;
1048 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1052 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1053 sched_in = machine__findnew_thread(machine, -1, next_pid);
1054 if (sched_out == NULL || sched_in == NULL)
1057 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1059 if (thread_atoms_insert(sched, sched_out))
1061 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1063 pr_err("out-event: Internal tree error");
1067 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1070 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1072 if (thread_atoms_insert(sched, sched_in))
1074 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1076 pr_err("in-event: Internal tree error");
1080 * Take came in we have not heard about yet,
1081 * add in an initial atom in runnable state:
1083 if (add_sched_out_event(in_events, 'R', timestamp))
1086 add_sched_in_event(in_events, timestamp);
1089 thread__put(sched_out);
1090 thread__put(sched_in);
1094 static int latency_runtime_event(struct perf_sched *sched,
1095 struct perf_evsel *evsel,
1096 struct perf_sample *sample,
1097 struct machine *machine)
1099 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1100 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1101 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1102 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1103 u64 timestamp = sample->time;
1104 int cpu = sample->cpu, err = -1;
1109 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1111 if (thread_atoms_insert(sched, thread))
1113 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1115 pr_err("in-event: Internal tree error");
1118 if (add_sched_out_event(atoms, 'R', timestamp))
1122 add_runtime_event(atoms, runtime, timestamp);
1125 thread__put(thread);
1129 static int latency_wakeup_event(struct perf_sched *sched,
1130 struct perf_evsel *evsel,
1131 struct perf_sample *sample,
1132 struct machine *machine)
1134 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1135 struct work_atoms *atoms;
1136 struct work_atom *atom;
1137 struct thread *wakee;
1138 u64 timestamp = sample->time;
1141 wakee = machine__findnew_thread(machine, -1, pid);
1144 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1146 if (thread_atoms_insert(sched, wakee))
1148 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1150 pr_err("wakeup-event: Internal tree error");
1153 if (add_sched_out_event(atoms, 'S', timestamp))
1157 BUG_ON(list_empty(&atoms->work_list));
1159 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1162 * As we do not guarantee the wakeup event happens when
1163 * task is out of run queue, also may happen when task is
1164 * on run queue and wakeup only change ->state to TASK_RUNNING,
1165 * then we should not set the ->wake_up_time when wake up a
1166 * task which is on run queue.
1168 * You WILL be missing events if you've recorded only
1169 * one CPU, or are only looking at only one, so don't
1170 * skip in this case.
1172 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1175 sched->nr_timestamps++;
1176 if (atom->sched_out_time > timestamp) {
1177 sched->nr_unordered_timestamps++;
1181 atom->state = THREAD_WAIT_CPU;
1182 atom->wake_up_time = timestamp;
1190 static int latency_migrate_task_event(struct perf_sched *sched,
1191 struct perf_evsel *evsel,
1192 struct perf_sample *sample,
1193 struct machine *machine)
1195 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1196 u64 timestamp = sample->time;
1197 struct work_atoms *atoms;
1198 struct work_atom *atom;
1199 struct thread *migrant;
1203 * Only need to worry about migration when profiling one CPU.
1205 if (sched->profile_cpu == -1)
1208 migrant = machine__findnew_thread(machine, -1, pid);
1209 if (migrant == NULL)
1211 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1213 if (thread_atoms_insert(sched, migrant))
1215 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1216 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1218 pr_err("migration-event: Internal tree error");
1221 if (add_sched_out_event(atoms, 'R', timestamp))
1225 BUG_ON(list_empty(&atoms->work_list));
1227 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1228 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1230 sched->nr_timestamps++;
1232 if (atom->sched_out_time > timestamp)
1233 sched->nr_unordered_timestamps++;
1236 thread__put(migrant);
1240 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1245 char max_lat_at[32];
1247 if (!work_list->nb_atoms)
1250 * Ignore idle threads:
1252 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1255 sched->all_runtime += work_list->total_runtime;
1256 sched->all_count += work_list->nb_atoms;
1258 if (work_list->num_merged > 1)
1259 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1261 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1263 for (i = 0; i < 24 - ret; i++)
1266 avg = work_list->total_lat / work_list->nb_atoms;
1267 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1269 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1270 (double)work_list->total_runtime / NSEC_PER_MSEC,
1271 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1272 (double)work_list->max_lat / NSEC_PER_MSEC,
1276 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1278 if (l->thread == r->thread)
1280 if (l->thread->tid < r->thread->tid)
1282 if (l->thread->tid > r->thread->tid)
1284 return (int)(l->thread - r->thread);
1287 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1297 avgl = l->total_lat / l->nb_atoms;
1298 avgr = r->total_lat / r->nb_atoms;
1308 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1310 if (l->max_lat < r->max_lat)
1312 if (l->max_lat > r->max_lat)
1318 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1320 if (l->nb_atoms < r->nb_atoms)
1322 if (l->nb_atoms > r->nb_atoms)
1328 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1330 if (l->total_runtime < r->total_runtime)
1332 if (l->total_runtime > r->total_runtime)
1338 static int sort_dimension__add(const char *tok, struct list_head *list)
1341 static struct sort_dimension avg_sort_dimension = {
1345 static struct sort_dimension max_sort_dimension = {
1349 static struct sort_dimension pid_sort_dimension = {
1353 static struct sort_dimension runtime_sort_dimension = {
1357 static struct sort_dimension switch_sort_dimension = {
1361 struct sort_dimension *available_sorts[] = {
1362 &pid_sort_dimension,
1363 &avg_sort_dimension,
1364 &max_sort_dimension,
1365 &switch_sort_dimension,
1366 &runtime_sort_dimension,
1369 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1370 if (!strcmp(available_sorts[i]->name, tok)) {
1371 list_add_tail(&available_sorts[i]->list, list);
1380 static void perf_sched__sort_lat(struct perf_sched *sched)
1382 struct rb_node *node;
1383 struct rb_root *root = &sched->atom_root;
1386 struct work_atoms *data;
1387 node = rb_first(root);
1391 rb_erase(node, root);
1392 data = rb_entry(node, struct work_atoms, node);
1393 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1395 if (root == &sched->atom_root) {
1396 root = &sched->merged_atom_root;
1401 static int process_sched_wakeup_event(struct perf_tool *tool,
1402 struct perf_evsel *evsel,
1403 struct perf_sample *sample,
1404 struct machine *machine)
1406 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1408 if (sched->tp_handler->wakeup_event)
1409 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1419 static bool thread__has_color(struct thread *thread)
1421 union map_priv priv = {
1422 .ptr = thread__priv(thread),
1428 static struct thread*
1429 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1431 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1432 union map_priv priv = {
1436 if (!sched->map.color_pids || !thread || thread__priv(thread))
1439 if (thread_map__has(sched->map.color_pids, tid))
1442 thread__set_priv(thread, priv.ptr);
1446 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1447 struct perf_sample *sample, struct machine *machine)
1449 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1450 struct thread *sched_in;
1452 u64 timestamp0, timestamp = sample->time;
1454 int i, this_cpu = sample->cpu;
1456 bool new_cpu = false;
1457 const char *color = PERF_COLOR_NORMAL;
1458 char stimestamp[32];
1460 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1462 if (this_cpu > sched->max_cpu)
1463 sched->max_cpu = this_cpu;
1465 if (sched->map.comp) {
1466 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1467 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1468 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1472 cpus_nr = sched->max_cpu;
1474 timestamp0 = sched->cpu_last_switched[this_cpu];
1475 sched->cpu_last_switched[this_cpu] = timestamp;
1477 delta = timestamp - timestamp0;
1482 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1486 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1487 if (sched_in == NULL)
1490 sched->curr_thread[this_cpu] = thread__get(sched_in);
1495 if (!sched_in->shortname[0]) {
1496 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1498 * Don't allocate a letter-number for swapper:0
1499 * as a shortname. Instead, we use '.' for it.
1501 sched_in->shortname[0] = '.';
1502 sched_in->shortname[1] = ' ';
1504 sched_in->shortname[0] = sched->next_shortname1;
1505 sched_in->shortname[1] = sched->next_shortname2;
1507 if (sched->next_shortname1 < 'Z') {
1508 sched->next_shortname1++;
1510 sched->next_shortname1 = 'A';
1511 if (sched->next_shortname2 < '9')
1512 sched->next_shortname2++;
1514 sched->next_shortname2 = '0';
1520 for (i = 0; i < cpus_nr; i++) {
1521 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1522 struct thread *curr_thread = sched->curr_thread[cpu];
1523 const char *pid_color = color;
1524 const char *cpu_color = color;
1526 if (curr_thread && thread__has_color(curr_thread))
1527 pid_color = COLOR_PIDS;
1529 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1532 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1533 cpu_color = COLOR_CPUS;
1535 if (cpu != this_cpu)
1536 color_fprintf(stdout, color, " ");
1538 color_fprintf(stdout, cpu_color, "*");
1540 if (sched->curr_thread[cpu])
1541 color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
1543 color_fprintf(stdout, color, " ");
1546 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1549 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1550 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1551 if (new_shortname || (verbose && sched_in->tid)) {
1552 const char *pid_color = color;
1554 if (thread__has_color(sched_in))
1555 pid_color = COLOR_PIDS;
1557 color_fprintf(stdout, pid_color, "%s => %s:%d",
1558 sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1561 if (sched->map.comp && new_cpu)
1562 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1565 color_fprintf(stdout, color, "\n");
1567 thread__put(sched_in);
1572 static int process_sched_switch_event(struct perf_tool *tool,
1573 struct perf_evsel *evsel,
1574 struct perf_sample *sample,
1575 struct machine *machine)
1577 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1578 int this_cpu = sample->cpu, err = 0;
1579 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1580 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1582 if (sched->curr_pid[this_cpu] != (u32)-1) {
1584 * Are we trying to switch away a PID that is
1587 if (sched->curr_pid[this_cpu] != prev_pid)
1588 sched->nr_context_switch_bugs++;
1591 if (sched->tp_handler->switch_event)
1592 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1594 sched->curr_pid[this_cpu] = next_pid;
1598 static int process_sched_runtime_event(struct perf_tool *tool,
1599 struct perf_evsel *evsel,
1600 struct perf_sample *sample,
1601 struct machine *machine)
1603 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1605 if (sched->tp_handler->runtime_event)
1606 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1611 static int perf_sched__process_fork_event(struct perf_tool *tool,
1612 union perf_event *event,
1613 struct perf_sample *sample,
1614 struct machine *machine)
1616 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1618 /* run the fork event through the perf machineruy */
1619 perf_event__process_fork(tool, event, sample, machine);
1621 /* and then run additional processing needed for this command */
1622 if (sched->tp_handler->fork_event)
1623 return sched->tp_handler->fork_event(sched, event, machine);
1628 static int process_sched_migrate_task_event(struct perf_tool *tool,
1629 struct perf_evsel *evsel,
1630 struct perf_sample *sample,
1631 struct machine *machine)
1633 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1635 if (sched->tp_handler->migrate_task_event)
1636 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1641 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1642 struct perf_evsel *evsel,
1643 struct perf_sample *sample,
1644 struct machine *machine);
1646 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1647 union perf_event *event __maybe_unused,
1648 struct perf_sample *sample,
1649 struct perf_evsel *evsel,
1650 struct machine *machine)
1654 if (evsel->handler != NULL) {
1655 tracepoint_handler f = evsel->handler;
1656 err = f(tool, evsel, sample, machine);
1662 static int perf_sched__read_events(struct perf_sched *sched)
1664 const struct perf_evsel_str_handler handlers[] = {
1665 { "sched:sched_switch", process_sched_switch_event, },
1666 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1667 { "sched:sched_wakeup", process_sched_wakeup_event, },
1668 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1669 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1671 struct perf_session *session;
1672 struct perf_data_file file = {
1674 .mode = PERF_DATA_MODE_READ,
1675 .force = sched->force,
1679 session = perf_session__new(&file, false, &sched->tool);
1680 if (session == NULL) {
1681 pr_debug("No Memory for session\n");
1685 symbol__init(&session->header.env);
1687 if (perf_session__set_tracepoints_handlers(session, handlers))
1690 if (perf_session__has_traces(session, "record -R")) {
1691 int err = perf_session__process_events(session);
1693 pr_err("Failed to process events, error %d", err);
1697 sched->nr_events = session->evlist->stats.nr_events[0];
1698 sched->nr_lost_events = session->evlist->stats.total_lost;
1699 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1704 perf_session__delete(session);
1709 * scheduling times are printed as msec.usec
1711 static inline void print_sched_time(unsigned long long nsecs, int width)
1713 unsigned long msecs;
1714 unsigned long usecs;
1716 msecs = nsecs / NSEC_PER_MSEC;
1717 nsecs -= msecs * NSEC_PER_MSEC;
1718 usecs = nsecs / NSEC_PER_USEC;
1719 printf("%*lu.%03lu ", width, msecs, usecs);
1723 * returns runtime data for event, allocating memory for it the
1724 * first time it is used.
1726 static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
1728 struct evsel_runtime *r = evsel->priv;
1731 r = zalloc(sizeof(struct evsel_runtime));
1739 * save last time event was seen per cpu
1741 static void perf_evsel__save_time(struct perf_evsel *evsel,
1742 u64 timestamp, u32 cpu)
1744 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1749 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1750 int i, n = __roundup_pow_of_two(cpu+1);
1751 void *p = r->last_time;
1753 p = realloc(r->last_time, n * sizeof(u64));
1758 for (i = r->ncpu; i < n; ++i)
1759 r->last_time[i] = (u64) 0;
1764 r->last_time[cpu] = timestamp;
1767 /* returns last time this event was seen on the given cpu */
1768 static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
1770 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1772 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1775 return r->last_time[cpu];
1778 static int comm_width = 30;
1780 static char *timehist_get_commstr(struct thread *thread)
1782 static char str[32];
1783 const char *comm = thread__comm_str(thread);
1784 pid_t tid = thread->tid;
1785 pid_t pid = thread->pid_;
1789 n = scnprintf(str, sizeof(str), "%s", comm);
1791 else if (tid != pid)
1792 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1795 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1803 static void timehist_header(struct perf_sched *sched)
1805 u32 ncpus = sched->max_cpu + 1;
1808 printf("%15s %6s ", "time", "cpu");
1810 if (sched->show_cpu_visual) {
1812 for (i = 0, j = 0; i < ncpus; ++i) {
1820 printf(" %-*s %9s %9s %9s", comm_width,
1821 "task name", "wait time", "sch delay", "run time");
1828 printf("%15s %-6s ", "", "");
1830 if (sched->show_cpu_visual)
1831 printf(" %*s ", ncpus, "");
1833 printf(" %-*s %9s %9s %9s\n", comm_width,
1834 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1839 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1841 if (sched->show_cpu_visual)
1842 printf(" %.*s ", ncpus, graph_dotted_line);
1844 printf(" %.*s %.9s %.9s %.9s", comm_width,
1845 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1851 static void timehist_print_sample(struct perf_sched *sched,
1852 struct perf_sample *sample,
1853 struct addr_location *al,
1854 struct thread *thread,
1857 struct thread_runtime *tr = thread__priv(thread);
1858 u32 max_cpus = sched->max_cpu + 1;
1861 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
1862 printf("%15s [%04d] ", tstr, sample->cpu);
1864 if (sched->show_cpu_visual) {
1869 for (i = 0; i < max_cpus; ++i) {
1870 /* flag idle times with 'i'; others are sched events */
1871 if (i == sample->cpu)
1872 c = (thread->tid == 0) ? 'i' : 's';
1880 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
1882 print_sched_time(tr->dt_wait, 6);
1883 print_sched_time(tr->dt_delay, 6);
1884 print_sched_time(tr->dt_run, 6);
1886 if (sched->show_wakeups)
1887 printf(" %-*s", comm_width, "");
1889 if (thread->tid == 0)
1892 if (sched->show_callchain)
1895 sample__fprintf_sym(sample, al, 0,
1896 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
1897 EVSEL__PRINT_CALLCHAIN_ARROW |
1898 EVSEL__PRINT_SKIP_IGNORED,
1899 &callchain_cursor, stdout);
1906 * Explanation of delta-time stats:
1908 * t = time of current schedule out event
1909 * tprev = time of previous sched out event
1910 * also time of schedule-in event for current task
1911 * last_time = time of last sched change event for current task
1912 * (i.e, time process was last scheduled out)
1913 * ready_to_run = time of wakeup for current task
1915 * -----|------------|------------|------------|------
1916 * last ready tprev t
1919 * |-------- dt_wait --------|
1920 * |- dt_delay -|-- dt_run --|
1922 * dt_run = run time of current task
1923 * dt_wait = time between last schedule out event for task and tprev
1924 * represents time spent off the cpu
1925 * dt_delay = time between wakeup and schedule-in of task
1928 static void timehist_update_runtime_stats(struct thread_runtime *r,
1935 r->dt_run = t - tprev;
1936 if (r->ready_to_run) {
1937 if (r->ready_to_run > tprev)
1938 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
1940 r->dt_delay = tprev - r->ready_to_run;
1943 if (r->last_time > tprev)
1944 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
1945 else if (r->last_time)
1946 r->dt_wait = tprev - r->last_time;
1949 update_stats(&r->run_stats, r->dt_run);
1950 r->total_run_time += r->dt_run;
1953 static bool is_idle_sample(struct perf_sample *sample,
1954 struct perf_evsel *evsel)
1956 /* pid 0 == swapper == idle task */
1957 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
1958 return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
1960 return sample->pid == 0;
1963 static void save_task_callchain(struct perf_sched *sched,
1964 struct perf_sample *sample,
1965 struct perf_evsel *evsel,
1966 struct machine *machine)
1968 struct callchain_cursor *cursor = &callchain_cursor;
1969 struct thread *thread;
1971 /* want main thread for process - has maps */
1972 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
1973 if (thread == NULL) {
1974 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
1978 if (!symbol_conf.use_callchain || sample->callchain == NULL)
1981 if (thread__resolve_callchain(thread, cursor, evsel, sample,
1982 NULL, NULL, sched->max_stack + 2) != 0) {
1984 error("Failed to resolve callchain. Skipping\n");
1989 callchain_cursor_commit(cursor);
1992 struct callchain_cursor_node *node;
1995 node = callchain_cursor_current(cursor);
2000 if (sym && sym->name) {
2001 if (!strcmp(sym->name, "schedule") ||
2002 !strcmp(sym->name, "__schedule") ||
2003 !strcmp(sym->name, "preempt_schedule"))
2007 callchain_cursor_advance(cursor);
2011 static int init_idle_thread(struct thread *thread)
2013 struct idle_thread_runtime *itr;
2015 thread__set_comm(thread, idle_comm, 0);
2017 itr = zalloc(sizeof(*itr));
2021 init_stats(&itr->tr.run_stats);
2022 callchain_init(&itr->callchain);
2023 callchain_cursor_reset(&itr->cursor);
2024 thread__set_priv(thread, itr);
2030 * Track idle stats per cpu by maintaining a local thread
2031 * struct for the idle task on each cpu.
2033 static int init_idle_threads(int ncpu)
2037 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2041 idle_max_cpu = ncpu;
2043 /* allocate the actual thread struct if needed */
2044 for (i = 0; i < ncpu; ++i) {
2045 idle_threads[i] = thread__new(0, 0);
2046 if (idle_threads[i] == NULL)
2049 ret = init_idle_thread(idle_threads[i]);
2057 static void free_idle_threads(void)
2061 if (idle_threads == NULL)
2064 for (i = 0; i < idle_max_cpu; ++i) {
2065 if ((idle_threads[i]))
2066 thread__delete(idle_threads[i]);
2072 static struct thread *get_idle_thread(int cpu)
2075 * expand/allocate array of pointers to local thread
2078 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2079 int i, j = __roundup_pow_of_two(cpu+1);
2082 p = realloc(idle_threads, j * sizeof(struct thread *));
2086 idle_threads = (struct thread **) p;
2087 for (i = idle_max_cpu; i < j; ++i)
2088 idle_threads[i] = NULL;
2093 /* allocate a new thread struct if needed */
2094 if (idle_threads[cpu] == NULL) {
2095 idle_threads[cpu] = thread__new(0, 0);
2097 if (idle_threads[cpu]) {
2098 if (init_idle_thread(idle_threads[cpu]) < 0)
2103 return idle_threads[cpu];
2106 static void save_idle_callchain(struct idle_thread_runtime *itr,
2107 struct perf_sample *sample)
2109 if (!symbol_conf.use_callchain || sample->callchain == NULL)
2112 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2116 * handle runtime stats saved per thread
2118 static struct thread_runtime *thread__init_runtime(struct thread *thread)
2120 struct thread_runtime *r;
2122 r = zalloc(sizeof(struct thread_runtime));
2126 init_stats(&r->run_stats);
2127 thread__set_priv(thread, r);
2132 static struct thread_runtime *thread__get_runtime(struct thread *thread)
2134 struct thread_runtime *tr;
2136 tr = thread__priv(thread);
2138 tr = thread__init_runtime(thread);
2140 pr_debug("Failed to malloc memory for runtime data.\n");
2146 static struct thread *timehist_get_thread(struct perf_sched *sched,
2147 struct perf_sample *sample,
2148 struct machine *machine,
2149 struct perf_evsel *evsel)
2151 struct thread *thread;
2153 if (is_idle_sample(sample, evsel)) {
2154 thread = get_idle_thread(sample->cpu);
2156 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2159 /* there were samples with tid 0 but non-zero pid */
2160 thread = machine__findnew_thread(machine, sample->pid,
2161 sample->tid ?: sample->pid);
2162 if (thread == NULL) {
2163 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2167 save_task_callchain(sched, sample, evsel, machine);
2168 if (sched->idle_hist) {
2169 struct thread *idle;
2170 struct idle_thread_runtime *itr;
2172 idle = get_idle_thread(sample->cpu);
2174 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2178 itr = thread__priv(idle);
2182 itr->last_thread = thread;
2184 /* copy task callchain when entering to idle */
2185 if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2186 save_idle_callchain(itr, sample);
2193 static bool timehist_skip_sample(struct perf_sched *sched,
2194 struct thread *thread,
2195 struct perf_evsel *evsel,
2196 struct perf_sample *sample)
2200 if (thread__is_filtered(thread)) {
2202 sched->skipped_samples++;
2205 if (sched->idle_hist) {
2206 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
2208 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
2209 perf_evsel__intval(evsel, sample, "next_pid") != 0)
2216 static void timehist_print_wakeup_event(struct perf_sched *sched,
2217 struct perf_evsel *evsel,
2218 struct perf_sample *sample,
2219 struct machine *machine,
2220 struct thread *awakened)
2222 struct thread *thread;
2225 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2229 /* show wakeup unless both awakee and awaker are filtered */
2230 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2231 timehist_skip_sample(sched, awakened, evsel, sample)) {
2235 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2236 printf("%15s [%04d] ", tstr, sample->cpu);
2237 if (sched->show_cpu_visual)
2238 printf(" %*s ", sched->max_cpu + 1, "");
2240 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2243 printf(" %9s %9s %9s ", "", "", "");
2245 printf("awakened: %s", timehist_get_commstr(awakened));
2250 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2251 union perf_event *event __maybe_unused,
2252 struct perf_evsel *evsel,
2253 struct perf_sample *sample,
2254 struct machine *machine)
2256 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2257 struct thread *thread;
2258 struct thread_runtime *tr = NULL;
2259 /* want pid of awakened task not pid in sample */
2260 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2262 thread = machine__findnew_thread(machine, 0, pid);
2266 tr = thread__get_runtime(thread);
2270 if (tr->ready_to_run == 0)
2271 tr->ready_to_run = sample->time;
2273 /* show wakeups if requested */
2274 if (sched->show_wakeups &&
2275 !perf_time__skip_sample(&sched->ptime, sample->time))
2276 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2281 static void timehist_print_migration_event(struct perf_sched *sched,
2282 struct perf_evsel *evsel,
2283 struct perf_sample *sample,
2284 struct machine *machine,
2285 struct thread *migrated)
2287 struct thread *thread;
2289 u32 max_cpus = sched->max_cpu + 1;
2292 if (sched->summary_only)
2295 max_cpus = sched->max_cpu + 1;
2296 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
2297 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
2299 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2303 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2304 timehist_skip_sample(sched, migrated, evsel, sample)) {
2308 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2309 printf("%15s [%04d] ", tstr, sample->cpu);
2311 if (sched->show_cpu_visual) {
2316 for (i = 0; i < max_cpus; ++i) {
2317 c = (i == sample->cpu) ? 'm' : ' ';
2323 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2326 printf(" %9s %9s %9s ", "", "", "");
2328 printf("migrated: %s", timehist_get_commstr(migrated));
2329 printf(" cpu %d => %d", ocpu, dcpu);
2334 static int timehist_migrate_task_event(struct perf_tool *tool,
2335 union perf_event *event __maybe_unused,
2336 struct perf_evsel *evsel,
2337 struct perf_sample *sample,
2338 struct machine *machine)
2340 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2341 struct thread *thread;
2342 struct thread_runtime *tr = NULL;
2343 /* want pid of migrated task not pid in sample */
2344 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2346 thread = machine__findnew_thread(machine, 0, pid);
2350 tr = thread__get_runtime(thread);
2356 /* show migrations if requested */
2357 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2362 static int timehist_sched_change_event(struct perf_tool *tool,
2363 union perf_event *event,
2364 struct perf_evsel *evsel,
2365 struct perf_sample *sample,
2366 struct machine *machine)
2368 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2369 struct perf_time_interval *ptime = &sched->ptime;
2370 struct addr_location al;
2371 struct thread *thread;
2372 struct thread_runtime *tr = NULL;
2373 u64 tprev, t = sample->time;
2376 if (machine__resolve(machine, &al, sample) < 0) {
2377 pr_err("problem processing %d event. skipping it\n",
2378 event->header.type);
2383 thread = timehist_get_thread(sched, sample, machine, evsel);
2384 if (thread == NULL) {
2389 if (timehist_skip_sample(sched, thread, evsel, sample))
2392 tr = thread__get_runtime(thread);
2398 tprev = perf_evsel__get_time(evsel, sample->cpu);
2401 * If start time given:
2402 * - sample time is under window user cares about - skip sample
2403 * - tprev is under window user cares about - reset to start of window
2405 if (ptime->start && ptime->start > t)
2408 if (tprev && ptime->start > tprev)
2409 tprev = ptime->start;
2412 * If end time given:
2413 * - previous sched event is out of window - we are done
2414 * - sample time is beyond window user cares about - reset it
2415 * to close out stats for time window interest
2418 if (tprev > ptime->end)
2425 if (!sched->idle_hist || thread->tid == 0) {
2426 timehist_update_runtime_stats(tr, t, tprev);
2428 if (sched->idle_hist) {
2429 struct idle_thread_runtime *itr = (void *)tr;
2430 struct thread_runtime *last_tr;
2432 BUG_ON(thread->tid != 0);
2434 if (itr->last_thread == NULL)
2437 /* add current idle time as last thread's runtime */
2438 last_tr = thread__get_runtime(itr->last_thread);
2439 if (last_tr == NULL)
2442 timehist_update_runtime_stats(last_tr, t, tprev);
2444 * remove delta time of last thread as it's not updated
2445 * and otherwise it will show an invalid value next
2446 * time. we only care total run time and run stat.
2448 last_tr->dt_run = 0;
2449 last_tr->dt_wait = 0;
2450 last_tr->dt_delay = 0;
2453 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2455 itr->last_thread = NULL;
2459 if (!sched->summary_only)
2460 timehist_print_sample(sched, sample, &al, thread, t);
2464 /* time of this sched_switch event becomes last time task seen */
2465 tr->last_time = sample->time;
2467 /* sched out event for task so reset ready to run time */
2468 tr->ready_to_run = 0;
2471 perf_evsel__save_time(evsel, sample->time, sample->cpu);
2476 static int timehist_sched_switch_event(struct perf_tool *tool,
2477 union perf_event *event,
2478 struct perf_evsel *evsel,
2479 struct perf_sample *sample,
2480 struct machine *machine __maybe_unused)
2482 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2485 static int process_lost(struct perf_tool *tool __maybe_unused,
2486 union perf_event *event,
2487 struct perf_sample *sample,
2488 struct machine *machine __maybe_unused)
2492 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2493 printf("%15s ", tstr);
2494 printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2500 static void print_thread_runtime(struct thread *t,
2501 struct thread_runtime *r)
2503 double mean = avg_stats(&r->run_stats);
2506 printf("%*s %5d %9" PRIu64 " ",
2507 comm_width, timehist_get_commstr(t), t->ppid,
2508 (u64) r->run_stats.n);
2510 print_sched_time(r->total_run_time, 8);
2511 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2512 print_sched_time(r->run_stats.min, 6);
2514 print_sched_time((u64) mean, 6);
2516 print_sched_time(r->run_stats.max, 6);
2518 printf("%5.2f", stddev);
2519 printf(" %5" PRIu64, r->migrations);
2523 struct total_run_stats {
2529 static int __show_thread_runtime(struct thread *t, void *priv)
2531 struct total_run_stats *stats = priv;
2532 struct thread_runtime *r;
2534 if (thread__is_filtered(t))
2537 r = thread__priv(t);
2538 if (r && r->run_stats.n) {
2539 stats->task_count++;
2540 stats->sched_count += r->run_stats.n;
2541 stats->total_run_time += r->total_run_time;
2542 print_thread_runtime(t, r);
2548 static int show_thread_runtime(struct thread *t, void *priv)
2553 return __show_thread_runtime(t, priv);
2556 static int show_deadthread_runtime(struct thread *t, void *priv)
2561 return __show_thread_runtime(t, priv);
2564 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2566 const char *sep = " <- ";
2567 struct callchain_list *chain;
2575 ret = callchain__fprintf_folded(fp, node->parent);
2578 list_for_each_entry(chain, &node->val, list) {
2579 if (chain->ip >= PERF_CONTEXT_MAX)
2581 if (chain->ms.sym && chain->ms.sym->ignore)
2583 ret += fprintf(fp, "%s%s", first ? "" : sep,
2584 callchain_list__sym_name(chain, bf, sizeof(bf),
2592 static size_t timehist_print_idlehist_callchain(struct rb_root *root)
2596 struct callchain_node *chain;
2597 struct rb_node *rb_node = rb_first(root);
2599 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2600 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2604 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2605 rb_node = rb_next(rb_node);
2607 ret += fprintf(fp, " ");
2608 print_sched_time(chain->hit, 12);
2609 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2610 ret += fprintf(fp, " %8d ", chain->count);
2611 ret += callchain__fprintf_folded(fp, chain);
2612 ret += fprintf(fp, "\n");
2618 static void timehist_print_summary(struct perf_sched *sched,
2619 struct perf_session *session)
2621 struct machine *m = &session->machines.host;
2622 struct total_run_stats totals;
2625 struct thread_runtime *r;
2628 memset(&totals, 0, sizeof(totals));
2630 if (sched->idle_hist) {
2631 printf("\nIdle-time summary\n");
2632 printf("%*s parent sched-out ", comm_width, "comm");
2633 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2635 printf("\nRuntime summary\n");
2636 printf("%*s parent sched-in ", comm_width, "comm");
2637 printf(" run-time min-run avg-run max-run stddev migrations\n");
2639 printf("%*s (count) ", comm_width, "");
2640 printf(" (msec) (msec) (msec) (msec) %%\n");
2641 printf("%.117s\n", graph_dotted_line);
2643 machine__for_each_thread(m, show_thread_runtime, &totals);
2644 task_count = totals.task_count;
2646 printf("<no still running tasks>\n");
2648 printf("\nTerminated tasks:\n");
2649 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2650 if (task_count == totals.task_count)
2651 printf("<no terminated tasks>\n");
2653 /* CPU idle stats not tracked when samples were skipped */
2654 if (sched->skipped_samples && !sched->idle_hist)
2657 printf("\nIdle stats:\n");
2658 for (i = 0; i < idle_max_cpu; ++i) {
2659 t = idle_threads[i];
2663 r = thread__priv(t);
2664 if (r && r->run_stats.n) {
2665 totals.sched_count += r->run_stats.n;
2666 printf(" CPU %2d idle for ", i);
2667 print_sched_time(r->total_run_time, 6);
2670 printf(" CPU %2d idle entire time window\n", i);
2673 if (sched->idle_hist && symbol_conf.use_callchain) {
2674 callchain_param.mode = CHAIN_FOLDED;
2675 callchain_param.value = CCVAL_PERIOD;
2677 callchain_register_param(&callchain_param);
2679 printf("\nIdle stats by callchain:\n");
2680 for (i = 0; i < idle_max_cpu; ++i) {
2681 struct idle_thread_runtime *itr;
2683 t = idle_threads[i];
2687 itr = thread__priv(t);
2691 callchain_param.sort(&itr->sorted_root, &itr->callchain,
2692 0, &callchain_param);
2694 printf(" CPU %2d:", i);
2695 print_sched_time(itr->tr.total_run_time, 6);
2697 timehist_print_idlehist_callchain(&itr->sorted_root);
2703 " Total number of unique tasks: %" PRIu64 "\n"
2704 "Total number of context switches: %" PRIu64 "\n"
2705 " Total run time (msec): ",
2706 totals.task_count, totals.sched_count);
2708 print_sched_time(totals.total_run_time, 2);
2712 typedef int (*sched_handler)(struct perf_tool *tool,
2713 union perf_event *event,
2714 struct perf_evsel *evsel,
2715 struct perf_sample *sample,
2716 struct machine *machine);
2718 static int perf_timehist__process_sample(struct perf_tool *tool,
2719 union perf_event *event,
2720 struct perf_sample *sample,
2721 struct perf_evsel *evsel,
2722 struct machine *machine)
2724 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2726 int this_cpu = sample->cpu;
2728 if (this_cpu > sched->max_cpu)
2729 sched->max_cpu = this_cpu;
2731 if (evsel->handler != NULL) {
2732 sched_handler f = evsel->handler;
2734 err = f(tool, event, evsel, sample, machine);
2740 static int timehist_check_attr(struct perf_sched *sched,
2741 struct perf_evlist *evlist)
2743 struct perf_evsel *evsel;
2744 struct evsel_runtime *er;
2746 list_for_each_entry(evsel, &evlist->entries, node) {
2747 er = perf_evsel__get_runtime(evsel);
2749 pr_err("Failed to allocate memory for evsel runtime data\n");
2753 if (sched->show_callchain &&
2754 !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
2755 pr_info("Samples do not have callchains.\n");
2756 sched->show_callchain = 0;
2757 symbol_conf.use_callchain = 0;
2764 static int perf_sched__timehist(struct perf_sched *sched)
2766 const struct perf_evsel_str_handler handlers[] = {
2767 { "sched:sched_switch", timehist_sched_switch_event, },
2768 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2769 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2771 const struct perf_evsel_str_handler migrate_handlers[] = {
2772 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2774 struct perf_data_file file = {
2776 .mode = PERF_DATA_MODE_READ,
2777 .force = sched->force,
2780 struct perf_session *session;
2781 struct perf_evlist *evlist;
2785 * event handlers for timehist option
2787 sched->tool.sample = perf_timehist__process_sample;
2788 sched->tool.mmap = perf_event__process_mmap;
2789 sched->tool.comm = perf_event__process_comm;
2790 sched->tool.exit = perf_event__process_exit;
2791 sched->tool.fork = perf_event__process_fork;
2792 sched->tool.lost = process_lost;
2793 sched->tool.attr = perf_event__process_attr;
2794 sched->tool.tracing_data = perf_event__process_tracing_data;
2795 sched->tool.build_id = perf_event__process_build_id;
2797 sched->tool.ordered_events = true;
2798 sched->tool.ordering_requires_timestamps = true;
2800 symbol_conf.use_callchain = sched->show_callchain;
2802 session = perf_session__new(&file, false, &sched->tool);
2803 if (session == NULL)
2806 evlist = session->evlist;
2808 symbol__init(&session->header.env);
2810 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
2811 pr_err("Invalid time string\n");
2815 if (timehist_check_attr(sched, evlist) != 0)
2820 /* setup per-evsel handlers */
2821 if (perf_session__set_tracepoints_handlers(session, handlers))
2824 /* sched_switch event at a minimum needs to exist */
2825 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
2826 "sched:sched_switch")) {
2827 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
2831 if (sched->show_migrations &&
2832 perf_session__set_tracepoints_handlers(session, migrate_handlers))
2835 /* pre-allocate struct for per-CPU idle stats */
2836 sched->max_cpu = session->header.env.nr_cpus_online;
2837 if (sched->max_cpu == 0)
2839 if (init_idle_threads(sched->max_cpu))
2842 /* summary_only implies summary option, but don't overwrite summary if set */
2843 if (sched->summary_only)
2844 sched->summary = sched->summary_only;
2846 if (!sched->summary_only)
2847 timehist_header(sched);
2849 err = perf_session__process_events(session);
2851 pr_err("Failed to process events, error %d", err);
2855 sched->nr_events = evlist->stats.nr_events[0];
2856 sched->nr_lost_events = evlist->stats.total_lost;
2857 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
2860 timehist_print_summary(sched, session);
2863 free_idle_threads();
2864 perf_session__delete(session);
2870 static void print_bad_events(struct perf_sched *sched)
2872 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
2873 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
2874 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
2875 sched->nr_unordered_timestamps, sched->nr_timestamps);
2877 if (sched->nr_lost_events && sched->nr_events) {
2878 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
2879 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
2880 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
2882 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
2883 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
2884 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
2885 sched->nr_context_switch_bugs, sched->nr_timestamps);
2886 if (sched->nr_lost_events)
2887 printf(" (due to lost events?)");
2892 static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
2894 struct rb_node **new = &(root->rb_node), *parent = NULL;
2895 struct work_atoms *this;
2896 const char *comm = thread__comm_str(data->thread), *this_comm;
2901 this = container_of(*new, struct work_atoms, node);
2904 this_comm = thread__comm_str(this->thread);
2905 cmp = strcmp(comm, this_comm);
2907 new = &((*new)->rb_left);
2908 } else if (cmp < 0) {
2909 new = &((*new)->rb_right);
2912 this->total_runtime += data->total_runtime;
2913 this->nb_atoms += data->nb_atoms;
2914 this->total_lat += data->total_lat;
2915 list_splice(&data->work_list, &this->work_list);
2916 if (this->max_lat < data->max_lat) {
2917 this->max_lat = data->max_lat;
2918 this->max_lat_at = data->max_lat_at;
2926 rb_link_node(&data->node, parent, new);
2927 rb_insert_color(&data->node, root);
2930 static void perf_sched__merge_lat(struct perf_sched *sched)
2932 struct work_atoms *data;
2933 struct rb_node *node;
2935 if (sched->skip_merge)
2938 while ((node = rb_first(&sched->atom_root))) {
2939 rb_erase(node, &sched->atom_root);
2940 data = rb_entry(node, struct work_atoms, node);
2941 __merge_work_atoms(&sched->merged_atom_root, data);
2945 static int perf_sched__lat(struct perf_sched *sched)
2947 struct rb_node *next;
2951 if (perf_sched__read_events(sched))
2954 perf_sched__merge_lat(sched);
2955 perf_sched__sort_lat(sched);
2957 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
2958 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
2959 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2961 next = rb_first(&sched->sorted_atom_root);
2964 struct work_atoms *work_list;
2966 work_list = rb_entry(next, struct work_atoms, node);
2967 output_lat_thread(sched, work_list);
2968 next = rb_next(next);
2969 thread__zput(work_list->thread);
2972 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2973 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
2974 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
2976 printf(" ---------------------------------------------------\n");
2978 print_bad_events(sched);
2984 static int setup_map_cpus(struct perf_sched *sched)
2986 struct cpu_map *map;
2988 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
2990 if (sched->map.comp) {
2991 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
2992 if (!sched->map.comp_cpus)
2996 if (!sched->map.cpus_str)
2999 map = cpu_map__new(sched->map.cpus_str);
3001 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3005 sched->map.cpus = map;
3009 static int setup_color_pids(struct perf_sched *sched)
3011 struct thread_map *map;
3013 if (!sched->map.color_pids_str)
3016 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3018 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3022 sched->map.color_pids = map;
3026 static int setup_color_cpus(struct perf_sched *sched)
3028 struct cpu_map *map;
3030 if (!sched->map.color_cpus_str)
3033 map = cpu_map__new(sched->map.color_cpus_str);
3035 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3039 sched->map.color_cpus = map;
3043 static int perf_sched__map(struct perf_sched *sched)
3045 if (setup_map_cpus(sched))
3048 if (setup_color_pids(sched))
3051 if (setup_color_cpus(sched))
3055 if (perf_sched__read_events(sched))
3057 print_bad_events(sched);
3061 static int perf_sched__replay(struct perf_sched *sched)
3065 calibrate_run_measurement_overhead(sched);
3066 calibrate_sleep_measurement_overhead(sched);
3068 test_calibrations(sched);
3070 if (perf_sched__read_events(sched))
3073 printf("nr_run_events: %ld\n", sched->nr_run_events);
3074 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3075 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3077 if (sched->targetless_wakeups)
3078 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3079 if (sched->multitarget_wakeups)
3080 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3081 if (sched->nr_run_events_optimized)
3082 printf("run atoms optimized: %ld\n",
3083 sched->nr_run_events_optimized);
3085 print_task_traces(sched);
3086 add_cross_task_wakeups(sched);
3088 create_tasks(sched);
3089 printf("------------------------------------------------------------\n");
3090 for (i = 0; i < sched->replay_repeat; i++)
3091 run_one_test(sched);
3096 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3097 const char * const usage_msg[])
3099 char *tmp, *tok, *str = strdup(sched->sort_order);
3101 for (tok = strtok_r(str, ", ", &tmp);
3102 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3103 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3104 usage_with_options_msg(usage_msg, options,
3105 "Unknown --sort key: `%s'", tok);
3111 sort_dimension__add("pid", &sched->cmp_pid);
3114 static int __cmd_record(int argc, const char **argv)
3116 unsigned int rec_argc, i, j;
3117 const char **rec_argv;
3118 const char * const record_args[] = {
3124 "-e", "sched:sched_switch",
3125 "-e", "sched:sched_stat_wait",
3126 "-e", "sched:sched_stat_sleep",
3127 "-e", "sched:sched_stat_iowait",
3128 "-e", "sched:sched_stat_runtime",
3129 "-e", "sched:sched_process_fork",
3130 "-e", "sched:sched_wakeup",
3131 "-e", "sched:sched_wakeup_new",
3132 "-e", "sched:sched_migrate_task",
3135 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
3136 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3138 if (rec_argv == NULL)
3141 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3142 rec_argv[i] = strdup(record_args[i]);
3144 for (j = 1; j < (unsigned int)argc; j++, i++)
3145 rec_argv[i] = argv[j];
3147 BUG_ON(i != rec_argc);
3149 return cmd_record(i, rec_argv, NULL);
3152 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
3154 const char default_sort_order[] = "avg, max, switch, runtime";
3155 struct perf_sched sched = {
3157 .sample = perf_sched__process_tracepoint_sample,
3158 .comm = perf_event__process_comm,
3159 .lost = perf_event__process_lost,
3160 .fork = perf_sched__process_fork_event,
3161 .ordered_events = true,
3163 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3164 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3165 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
3166 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3167 .sort_order = default_sort_order,
3168 .replay_repeat = 10,
3170 .next_shortname1 = 'A',
3171 .next_shortname2 = '0',
3173 .show_callchain = 1,
3176 const struct option sched_options[] = {
3177 OPT_STRING('i', "input", &input_name, "file",
3179 OPT_INCR('v', "verbose", &verbose,
3180 "be more verbose (show symbol address, etc)"),
3181 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3182 "dump raw trace in ASCII"),
3183 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3186 const struct option latency_options[] = {
3187 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3188 "sort by key(s): runtime, switch, avg, max"),
3189 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3190 "CPU to profile on"),
3191 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3192 "latency stats per pid instead of per comm"),
3193 OPT_PARENT(sched_options)
3195 const struct option replay_options[] = {
3196 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3197 "repeat the workload replay N times (-1: infinite)"),
3198 OPT_PARENT(sched_options)
3200 const struct option map_options[] = {
3201 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3202 "map output in compact mode"),
3203 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3204 "highlight given pids in map"),
3205 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3206 "highlight given CPUs in map"),
3207 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3208 "display given CPUs in map"),
3209 OPT_PARENT(sched_options)
3211 const struct option timehist_options[] = {
3212 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3213 "file", "vmlinux pathname"),
3214 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3215 "file", "kallsyms pathname"),
3216 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3217 "Display call chains if present (default on)"),
3218 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3219 "Maximum number of functions to display backtrace."),
3220 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3221 "Look for files with symbols relative to this directory"),
3222 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3223 "Show only syscall summary with statistics"),
3224 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3225 "Show all syscalls and summary with statistics"),
3226 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3227 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3228 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3229 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3230 OPT_STRING(0, "time", &sched.time_str, "str",
3231 "Time span for analysis (start,stop)"),
3232 OPT_PARENT(sched_options)
3235 const char * const latency_usage[] = {
3236 "perf sched latency [<options>]",
3239 const char * const replay_usage[] = {
3240 "perf sched replay [<options>]",
3243 const char * const map_usage[] = {
3244 "perf sched map [<options>]",
3247 const char * const timehist_usage[] = {
3248 "perf sched timehist [<options>]",
3251 const char *const sched_subcommands[] = { "record", "latency", "map",
3254 const char *sched_usage[] = {
3258 struct trace_sched_handler lat_ops = {
3259 .wakeup_event = latency_wakeup_event,
3260 .switch_event = latency_switch_event,
3261 .runtime_event = latency_runtime_event,
3262 .migrate_task_event = latency_migrate_task_event,
3264 struct trace_sched_handler map_ops = {
3265 .switch_event = map_switch_event,
3267 struct trace_sched_handler replay_ops = {
3268 .wakeup_event = replay_wakeup_event,
3269 .switch_event = replay_switch_event,
3270 .fork_event = replay_fork_event,
3274 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3275 sched.curr_pid[i] = -1;
3277 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3278 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3280 usage_with_options(sched_usage, sched_options);
3283 * Aliased to 'perf script' for now:
3285 if (!strcmp(argv[0], "script"))
3286 return cmd_script(argc, argv, prefix);
3288 if (!strncmp(argv[0], "rec", 3)) {
3289 return __cmd_record(argc, argv);
3290 } else if (!strncmp(argv[0], "lat", 3)) {
3291 sched.tp_handler = &lat_ops;
3293 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3295 usage_with_options(latency_usage, latency_options);
3297 setup_sorting(&sched, latency_options, latency_usage);
3298 return perf_sched__lat(&sched);
3299 } else if (!strcmp(argv[0], "map")) {
3301 argc = parse_options(argc, argv, map_options, map_usage, 0);
3303 usage_with_options(map_usage, map_options);
3305 sched.tp_handler = &map_ops;
3306 setup_sorting(&sched, latency_options, latency_usage);
3307 return perf_sched__map(&sched);
3308 } else if (!strncmp(argv[0], "rep", 3)) {
3309 sched.tp_handler = &replay_ops;
3311 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3313 usage_with_options(replay_usage, replay_options);
3315 return perf_sched__replay(&sched);
3316 } else if (!strcmp(argv[0], "timehist")) {
3318 argc = parse_options(argc, argv, timehist_options,
3321 usage_with_options(timehist_usage, timehist_options);
3323 if (sched.show_wakeups && sched.summary_only) {
3324 pr_err(" Error: -s and -w are mutually exclusive.\n");
3325 parse_options_usage(timehist_usage, timehist_options, "s", true);
3326 parse_options_usage(NULL, timehist_options, "w", true);
3330 return perf_sched__timehist(&sched);
3332 usage_with_options(sched_usage, sched_options);