2 * builtin-timechart.c - make an svg timechart of system activity
4 * (C) Copyright 2009 Intel Corporation
7 * Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <traceevent/event-parse.h>
19 #include "util/util.h"
21 #include "util/color.h"
22 #include <linux/list.h>
23 #include "util/cache.h"
24 #include "util/evlist.h"
25 #include "util/evsel.h"
26 #include <linux/rbtree.h>
27 #include "util/symbol.h"
28 #include "util/callchain.h"
29 #include "util/strlist.h"
32 #include "util/header.h"
33 #include "util/parse-options.h"
34 #include "util/parse-events.h"
35 #include "util/event.h"
36 #include "util/session.h"
37 #include "util/svghelper.h"
38 #include "util/tool.h"
40 #define SUPPORT_OLD_POWER_EVENTS 1
41 #define PWR_EVENT_EXIT -1
44 static unsigned int numcpus;
45 static u64 min_freq; /* Lowest CPU frequency seen */
46 static u64 max_freq; /* Highest CPU frequency seen */
47 static u64 turbo_frequency;
49 static u64 first_time, last_time;
51 static bool power_only;
61 struct sample_wrapper;
64 * Datastructure layout:
65 * We keep an list of "pid"s, matching the kernels notion of a task struct.
66 * Each "pid" entry, has a list of "comm"s.
67 * this is because we want to track different programs different, while
68 * exec will reuse the original pid (by design).
69 * Each comm has a list of samples that will be used to draw
84 struct per_pidcomm *all;
85 struct per_pidcomm *current;
90 struct per_pidcomm *next;
104 struct cpu_sample *samples;
107 struct sample_wrapper {
108 struct sample_wrapper *next;
111 unsigned char data[0];
115 #define TYPE_RUNNING 1
116 #define TYPE_WAITING 2
117 #define TYPE_BLOCKED 3
120 struct cpu_sample *next;
128 static struct per_pid *all_data;
134 struct power_event *next;
143 struct wake_event *next;
149 static struct power_event *power_events;
150 static struct wake_event *wake_events;
152 struct process_filter;
153 struct process_filter {
156 struct process_filter *next;
159 static struct process_filter *process_filter;
162 static struct per_pid *find_create_pid(int pid)
164 struct per_pid *cursor = all_data;
167 if (cursor->pid == pid)
169 cursor = cursor->next;
171 cursor = zalloc(sizeof(*cursor));
172 assert(cursor != NULL);
174 cursor->next = all_data;
179 static void pid_set_comm(int pid, char *comm)
182 struct per_pidcomm *c;
183 p = find_create_pid(pid);
186 if (c->comm && strcmp(c->comm, comm) == 0) {
191 c->comm = strdup(comm);
197 c = zalloc(sizeof(*c));
199 c->comm = strdup(comm);
205 static void pid_fork(int pid, int ppid, u64 timestamp)
207 struct per_pid *p, *pp;
208 p = find_create_pid(pid);
209 pp = find_create_pid(ppid);
211 if (pp->current && pp->current->comm && !p->current)
212 pid_set_comm(pid, pp->current->comm);
214 p->start_time = timestamp;
216 p->current->start_time = timestamp;
217 p->current->state_since = timestamp;
221 static void pid_exit(int pid, u64 timestamp)
224 p = find_create_pid(pid);
225 p->end_time = timestamp;
227 p->current->end_time = timestamp;
231 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
234 struct per_pidcomm *c;
235 struct cpu_sample *sample;
237 p = find_create_pid(pid);
240 c = zalloc(sizeof(*c));
247 sample = zalloc(sizeof(*sample));
248 assert(sample != NULL);
249 sample->start_time = start;
250 sample->end_time = end;
252 sample->next = c->samples;
256 if (sample->type == TYPE_RUNNING && end > start && start > 0) {
257 c->total_time += (end-start);
258 p->total_time += (end-start);
261 if (c->start_time == 0 || c->start_time > start)
262 c->start_time = start;
263 if (p->start_time == 0 || p->start_time > start)
264 p->start_time = start;
267 #define MAX_CPUS 4096
269 static u64 cpus_cstate_start_times[MAX_CPUS];
270 static int cpus_cstate_state[MAX_CPUS];
271 static u64 cpus_pstate_start_times[MAX_CPUS];
272 static u64 cpus_pstate_state[MAX_CPUS];
274 static int process_comm_event(struct perf_tool *tool __maybe_unused,
275 union perf_event *event,
276 struct perf_sample *sample __maybe_unused,
277 struct machine *machine __maybe_unused)
279 pid_set_comm(event->comm.tid, event->comm.comm);
283 static int process_fork_event(struct perf_tool *tool __maybe_unused,
284 union perf_event *event,
285 struct perf_sample *sample __maybe_unused,
286 struct machine *machine __maybe_unused)
288 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
292 static int process_exit_event(struct perf_tool *tool __maybe_unused,
293 union perf_event *event,
294 struct perf_sample *sample __maybe_unused,
295 struct machine *machine __maybe_unused)
297 pid_exit(event->fork.pid, event->fork.time);
304 unsigned char preempt_count;
309 #ifdef SUPPORT_OLD_POWER_EVENTS
310 static int use_old_power_events;
311 struct power_entry_old {
312 struct trace_entry te;
319 struct power_processor_entry {
320 struct trace_entry te;
325 #define TASK_COMM_LEN 16
326 struct wakeup_entry {
327 struct trace_entry te;
328 char comm[TASK_COMM_LEN];
334 struct sched_switch {
335 struct trace_entry te;
336 char prev_comm[TASK_COMM_LEN];
339 long prev_state; /* Arjan weeps. */
340 char next_comm[TASK_COMM_LEN];
345 static void c_state_start(int cpu, u64 timestamp, int state)
347 cpus_cstate_start_times[cpu] = timestamp;
348 cpus_cstate_state[cpu] = state;
351 static void c_state_end(int cpu, u64 timestamp)
353 struct power_event *pwr = zalloc(sizeof(*pwr));
358 pwr->state = cpus_cstate_state[cpu];
359 pwr->start_time = cpus_cstate_start_times[cpu];
360 pwr->end_time = timestamp;
363 pwr->next = power_events;
368 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
370 struct power_event *pwr;
372 if (new_freq > 8000000) /* detect invalid data */
375 pwr = zalloc(sizeof(*pwr));
379 pwr->state = cpus_pstate_state[cpu];
380 pwr->start_time = cpus_pstate_start_times[cpu];
381 pwr->end_time = timestamp;
384 pwr->next = power_events;
386 if (!pwr->start_time)
387 pwr->start_time = first_time;
391 cpus_pstate_state[cpu] = new_freq;
392 cpus_pstate_start_times[cpu] = timestamp;
394 if ((u64)new_freq > max_freq)
397 if (new_freq < min_freq || min_freq == 0)
400 if (new_freq == max_freq - 1000)
401 turbo_frequency = max_freq;
405 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
408 struct wakeup_entry *wake = (void *)te;
409 struct wake_event *we = zalloc(sizeof(*we));
414 we->time = timestamp;
417 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
420 we->wakee = wake->pid;
421 we->next = wake_events;
423 p = find_create_pid(we->wakee);
425 if (p && p->current && p->current->state == TYPE_NONE) {
426 p->current->state_since = timestamp;
427 p->current->state = TYPE_WAITING;
429 if (p && p->current && p->current->state == TYPE_BLOCKED) {
430 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
431 p->current->state_since = timestamp;
432 p->current->state = TYPE_WAITING;
436 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
438 struct per_pid *p = NULL, *prev_p;
439 struct sched_switch *sw = (void *)te;
442 prev_p = find_create_pid(sw->prev_pid);
444 p = find_create_pid(sw->next_pid);
446 if (prev_p->current && prev_p->current->state != TYPE_NONE)
447 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
448 if (p && p->current) {
449 if (p->current->state != TYPE_NONE)
450 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
452 p->current->state_since = timestamp;
453 p->current->state = TYPE_RUNNING;
456 if (prev_p->current) {
457 prev_p->current->state = TYPE_NONE;
458 prev_p->current->state_since = timestamp;
459 if (sw->prev_state & 2)
460 prev_p->current->state = TYPE_BLOCKED;
461 if (sw->prev_state == 0)
462 prev_p->current->state = TYPE_WAITING;
466 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
467 struct perf_sample *sample);
469 static int process_sample_event(struct perf_tool *tool __maybe_unused,
470 union perf_event *event __maybe_unused,
471 struct perf_sample *sample,
472 struct perf_evsel *evsel,
473 struct machine *machine __maybe_unused)
475 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
476 if (!first_time || first_time > sample->time)
477 first_time = sample->time;
478 if (last_time < sample->time)
479 last_time = sample->time;
482 if (sample->cpu > numcpus)
483 numcpus = sample->cpu;
485 if (evsel->handler.func != NULL) {
486 tracepoint_handler f = evsel->handler.func;
487 return f(evsel, sample);
494 process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused,
495 struct perf_sample *sample)
497 struct power_processor_entry *ppe = sample->raw_data;
499 if (ppe->state == (u32) PWR_EVENT_EXIT)
500 c_state_end(ppe->cpu_id, sample->time);
502 c_state_start(ppe->cpu_id, sample->time, ppe->state);
507 process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused,
508 struct perf_sample *sample)
510 struct power_processor_entry *ppe = sample->raw_data;
512 p_state_change(ppe->cpu_id, sample->time, ppe->state);
517 process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused,
518 struct perf_sample *sample)
520 struct trace_entry *te = sample->raw_data;
522 sched_wakeup(sample->cpu, sample->time, sample->pid, te);
527 process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused,
528 struct perf_sample *sample)
530 struct trace_entry *te = sample->raw_data;
532 sched_switch(sample->cpu, sample->time, te);
536 #ifdef SUPPORT_OLD_POWER_EVENTS
538 process_sample_power_start(struct perf_evsel *evsel __maybe_unused,
539 struct perf_sample *sample)
541 struct power_entry_old *peo = sample->raw_data;
543 c_state_start(peo->cpu_id, sample->time, peo->value);
548 process_sample_power_end(struct perf_evsel *evsel __maybe_unused,
549 struct perf_sample *sample)
551 c_state_end(sample->cpu, sample->time);
556 process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused,
557 struct perf_sample *sample)
559 struct power_entry_old *peo = sample->raw_data;
561 p_state_change(peo->cpu_id, sample->time, peo->value);
564 #endif /* SUPPORT_OLD_POWER_EVENTS */
567 * After the last sample we need to wrap up the current C/P state
568 * and close out each CPU for these.
570 static void end_sample_processing(void)
573 struct power_event *pwr;
575 for (cpu = 0; cpu <= numcpus; cpu++) {
578 pwr = zalloc(sizeof(*pwr));
582 pwr->state = cpus_cstate_state[cpu];
583 pwr->start_time = cpus_cstate_start_times[cpu];
584 pwr->end_time = last_time;
587 pwr->next = power_events;
593 pwr = zalloc(sizeof(*pwr));
597 pwr->state = cpus_pstate_state[cpu];
598 pwr->start_time = cpus_pstate_start_times[cpu];
599 pwr->end_time = last_time;
602 pwr->next = power_events;
604 if (!pwr->start_time)
605 pwr->start_time = first_time;
607 pwr->state = min_freq;
613 * Sort the pid datastructure
615 static void sort_pids(void)
617 struct per_pid *new_list, *p, *cursor, *prev;
618 /* sort by ppid first, then by pid, lowest to highest */
627 if (new_list == NULL) {
635 if (cursor->ppid > p->ppid ||
636 (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
637 /* must insert before */
639 p->next = prev->next;
652 cursor = cursor->next;
661 static void draw_c_p_states(void)
663 struct power_event *pwr;
667 * two pass drawing so that the P state bars are on top of the C state blocks
670 if (pwr->type == CSTATE)
671 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
677 if (pwr->type == PSTATE) {
679 pwr->state = min_freq;
680 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
686 static void draw_wakeups(void)
688 struct wake_event *we;
690 struct per_pidcomm *c;
694 int from = 0, to = 0;
695 char *task_from = NULL, *task_to = NULL;
697 /* locate the column of the waker and wakee */
700 if (p->pid == we->waker || p->pid == we->wakee) {
703 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
704 if (p->pid == we->waker && !from) {
706 task_from = strdup(c->comm);
708 if (p->pid == we->wakee && !to) {
710 task_to = strdup(c->comm);
717 if (p->pid == we->waker && !from) {
719 task_from = strdup(c->comm);
721 if (p->pid == we->wakee && !to) {
723 task_to = strdup(c->comm);
732 task_from = malloc(40);
733 sprintf(task_from, "[%i]", we->waker);
736 task_to = malloc(40);
737 sprintf(task_to, "[%i]", we->wakee);
741 svg_interrupt(we->time, to);
742 else if (from && to && abs(from - to) == 1)
743 svg_wakeline(we->time, from, to);
745 svg_partial_wakeline(we->time, from, task_from, to, task_to);
753 static void draw_cpu_usage(void)
756 struct per_pidcomm *c;
757 struct cpu_sample *sample;
764 if (sample->type == TYPE_RUNNING)
765 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
767 sample = sample->next;
775 static void draw_process_bars(void)
778 struct per_pidcomm *c;
779 struct cpu_sample *sample;
794 svg_box(Y, c->start_time, c->end_time, "process");
797 if (sample->type == TYPE_RUNNING)
798 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
799 if (sample->type == TYPE_BLOCKED)
800 svg_box(Y, sample->start_time, sample->end_time, "blocked");
801 if (sample->type == TYPE_WAITING)
802 svg_waiting(Y, sample->start_time, sample->end_time);
803 sample = sample->next;
808 if (c->total_time > 5000000000) /* 5 seconds */
809 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
811 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
813 svg_text(Y, c->start_time, comm);
823 static void add_process_filter(const char *string)
825 int pid = strtoull(string, NULL, 10);
826 struct process_filter *filt = malloc(sizeof(*filt));
831 filt->name = strdup(string);
833 filt->next = process_filter;
835 process_filter = filt;
838 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
840 struct process_filter *filt;
844 filt = process_filter;
846 if (filt->pid && p->pid == filt->pid)
848 if (strcmp(filt->name, c->comm) == 0)
855 static int determine_display_tasks_filtered(void)
858 struct per_pidcomm *c;
864 if (p->start_time == 1)
865 p->start_time = first_time;
867 /* no exit marker, task kept running to the end */
868 if (p->end_time == 0)
869 p->end_time = last_time;
876 if (c->start_time == 1)
877 c->start_time = first_time;
879 if (passes_filter(p, c)) {
885 if (c->end_time == 0)
886 c->end_time = last_time;
895 static int determine_display_tasks(u64 threshold)
898 struct per_pidcomm *c;
902 return determine_display_tasks_filtered();
907 if (p->start_time == 1)
908 p->start_time = first_time;
910 /* no exit marker, task kept running to the end */
911 if (p->end_time == 0)
912 p->end_time = last_time;
913 if (p->total_time >= threshold && !power_only)
921 if (c->start_time == 1)
922 c->start_time = first_time;
924 if (c->total_time >= threshold && !power_only) {
929 if (c->end_time == 0)
930 c->end_time = last_time;
941 #define TIME_THRESH 10000000
943 static void write_svg_file(const char *filename)
951 count = determine_display_tasks(TIME_THRESH);
953 /* We'd like to show at least 15 tasks; be less picky if we have fewer */
955 count = determine_display_tasks(TIME_THRESH / 10);
957 open_svg(filename, numcpus, count, first_time, last_time);
962 for (i = 0; i < numcpus; i++)
963 svg_cpu_box(i, max_freq, turbo_frequency);
973 static int __cmd_timechart(const char *output_name)
975 struct perf_tool perf_timechart = {
976 .comm = process_comm_event,
977 .fork = process_fork_event,
978 .exit = process_exit_event,
979 .sample = process_sample_event,
980 .ordered_samples = true,
982 const struct perf_evsel_str_handler power_tracepoints[] = {
983 { "power:cpu_idle", process_sample_cpu_idle },
984 { "power:cpu_frequency", process_sample_cpu_frequency },
985 { "sched:sched_wakeup", process_sample_sched_wakeup },
986 { "sched:sched_switch", process_sample_sched_switch },
987 #ifdef SUPPORT_OLD_POWER_EVENTS
988 { "power:power_start", process_sample_power_start },
989 { "power:power_end", process_sample_power_end },
990 { "power:power_frequency", process_sample_power_frequency },
993 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
994 0, false, &perf_timechart);
1000 if (!perf_session__has_traces(session, "timechart record"))
1003 if (perf_session__set_tracepoints_handlers(session,
1004 power_tracepoints)) {
1005 pr_err("Initializing session tracepoint handlers failed\n");
1009 ret = perf_session__process_events(session, &perf_timechart);
1013 end_sample_processing();
1017 write_svg_file(output_name);
1019 pr_info("Written %2.1f seconds of trace to %s.\n",
1020 (last_time - first_time) / 1000000000.0, output_name);
1022 perf_session__delete(session);
1026 static int __cmd_record(int argc, const char **argv)
1028 #ifdef SUPPORT_OLD_POWER_EVENTS
1029 const char * const record_old_args[] = {
1030 "record", "-a", "-R", "-c", "1",
1031 "-e", "power:power_start",
1032 "-e", "power:power_end",
1033 "-e", "power:power_frequency",
1034 "-e", "sched:sched_wakeup",
1035 "-e", "sched:sched_switch",
1038 const char * const record_new_args[] = {
1039 "record", "-a", "-R", "-c", "1",
1040 "-e", "power:cpu_frequency",
1041 "-e", "power:cpu_idle",
1042 "-e", "sched:sched_wakeup",
1043 "-e", "sched:sched_switch",
1045 unsigned int rec_argc, i, j;
1046 const char **rec_argv;
1047 const char * const *record_args = record_new_args;
1048 unsigned int record_elems = ARRAY_SIZE(record_new_args);
1050 #ifdef SUPPORT_OLD_POWER_EVENTS
1051 if (!is_valid_tracepoint("power:cpu_idle") &&
1052 is_valid_tracepoint("power:power_start")) {
1053 use_old_power_events = 1;
1054 record_args = record_old_args;
1055 record_elems = ARRAY_SIZE(record_old_args);
1059 rec_argc = record_elems + argc - 1;
1060 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1062 if (rec_argv == NULL)
1065 for (i = 0; i < record_elems; i++)
1066 rec_argv[i] = strdup(record_args[i]);
1068 for (j = 1; j < (unsigned int)argc; j++, i++)
1069 rec_argv[i] = argv[j];
1071 return cmd_record(i, rec_argv, NULL);
1075 parse_process(const struct option *opt __maybe_unused, const char *arg,
1076 int __maybe_unused unset)
1079 add_process_filter(arg);
1083 int cmd_timechart(int argc, const char **argv,
1084 const char *prefix __maybe_unused)
1086 const char *output_name = "output.svg";
1087 const struct option options[] = {
1088 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1089 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1090 OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1091 OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
1092 OPT_CALLBACK('p', "process", NULL, "process",
1093 "process selector. Pass a pid or process name.",
1095 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1096 "Look for files with symbols relative to this directory"),
1099 const char * const timechart_usage[] = {
1100 "perf timechart [<options>] {record}",
1104 argc = parse_options(argc, argv, options, timechart_usage,
1105 PARSE_OPT_STOP_AT_NON_OPTION);
1109 if (argc && !strncmp(argv[0], "rec", 3))
1110 return __cmd_record(argc, argv);
1112 usage_with_options(timechart_usage, options);
1116 return __cmd_timechart(output_name);