1 // SPDX-License-Identifier: GPL-2.0
5 * Builtin report command: Analyze the perf.data input file,
6 * look up and read DSOs and symbol information and display
7 * a histogram of results, along various sorting keys.
11 #include "util/config.h"
13 #include "util/annotate.h"
14 #include "util/color.h"
16 #include <linux/list.h>
17 #include <linux/rbtree.h>
18 #include <linux/err.h>
19 #include <linux/zalloc.h>
21 #include "util/symbol.h"
22 #include "util/map_symbol.h"
23 #include "util/mem-events.h"
24 #include "util/branch.h"
25 #include "util/callchain.h"
26 #include "util/values.h"
29 #include "util/debug.h"
30 #include "util/evlist.h"
31 #include "util/evsel.h"
32 #include "util/evswitch.h"
33 #include "util/header.h"
34 #include "util/session.h"
35 #include "util/srcline.h"
36 #include "util/tool.h"
38 #include <subcmd/parse-options.h>
39 #include <subcmd/exec-cmd.h>
40 #include "util/parse-events.h"
42 #include "util/thread.h"
43 #include "util/sort.h"
44 #include "util/hist.h"
45 #include "util/data.h"
46 #include "arch/common.h"
47 #include "util/time-utils.h"
48 #include "util/auxtrace.h"
49 #include "util/units.h"
50 #include "util/util.h" // perf_tip()
52 #include "ui/progress.h"
53 #include "util/block-info.h"
59 #include <linux/ctype.h>
61 #include <linux/bitmap.h>
62 #include <linux/string.h>
63 #include <linux/stringify.h>
64 #include <linux/time64.h>
65 #include <sys/types.h>
68 #include <linux/mman.h>
71 struct perf_tool tool;
72 struct perf_session *session;
73 struct evswitch evswitch;
74 bool use_tui, use_gtk, use_stdio;
77 bool inverted_callchain;
84 bool nonany_branch_mode;
90 struct perf_read_values show_threads_values;
91 struct annotation_options annotation_opts;
92 const char *pretty_printing_style;
94 const char *symbol_filter_str;
96 struct perf_time_interval *ptime_range;
104 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
105 struct branch_type_stat brtype_stat;
107 bool total_cycles_mode;
108 struct block_report *block_reports;
109 int nr_block_reports;
112 static int report__config(const char *var, const char *value, void *cb)
114 struct report *rep = cb;
116 if (!strcmp(var, "report.group")) {
117 symbol_conf.event_group = perf_config_bool(var, value);
120 if (!strcmp(var, "report.percent-limit")) {
121 double pcnt = strtof(value, NULL);
123 rep->min_percent = pcnt;
124 callchain_param.min_percent = pcnt;
127 if (!strcmp(var, "report.children")) {
128 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
131 if (!strcmp(var, "report.queue-size"))
132 return perf_config_u64(&rep->queue_size, var, value);
134 if (!strcmp(var, "report.sort_order")) {
135 default_sort_order = strdup(value);
139 if (!strcmp(var, "report.skip-empty")) {
140 rep->skip_empty = perf_config_bool(var, value);
147 static int hist_iter__report_callback(struct hist_entry_iter *iter,
148 struct addr_location *al, bool single,
152 struct report *rep = arg;
153 struct hist_entry *he = iter->he;
154 struct evsel *evsel = iter->evsel;
155 struct perf_sample *sample = iter->sample;
157 struct branch_info *bi;
159 if (!ui__has_annotation() && !rep->symbol_ipc)
162 if (sort__mode == SORT_MODE__BRANCH) {
163 bi = he->branch_info;
164 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
168 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
170 } else if (rep->mem_mode) {
172 err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel);
176 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
178 } else if (symbol_conf.cumulate_callchain) {
180 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
182 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
189 static int hist_iter__branch_callback(struct hist_entry_iter *iter,
190 struct addr_location *al __maybe_unused,
191 bool single __maybe_unused,
194 struct hist_entry *he = iter->he;
195 struct report *rep = arg;
196 struct branch_info *bi = he->branch_info;
197 struct perf_sample *sample = iter->sample;
198 struct evsel *evsel = iter->evsel;
201 branch_type_count(&rep->brtype_stat, &bi->flags,
202 bi->from.addr, bi->to.addr);
204 if (!ui__has_annotation() && !rep->symbol_ipc)
207 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
211 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
217 static void setup_forced_leader(struct report *report,
218 struct evlist *evlist)
220 if (report->group_set)
221 evlist__force_leader(evlist);
224 static int process_feature_event(struct perf_session *session,
225 union perf_event *event)
227 struct report *rep = container_of(session->tool, struct report, tool);
229 if (event->feat.feat_id < HEADER_LAST_FEATURE)
230 return perf_event__process_feature(session, event);
232 if (event->feat.feat_id != HEADER_LAST_FEATURE) {
233 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
234 event->feat.feat_id);
236 } else if (rep->header_only) {
241 * (feat_id = HEADER_LAST_FEATURE) is the end marker which
242 * means all features are received, now we can force the
245 setup_forced_leader(rep, session->evlist);
249 static int process_sample_event(struct perf_tool *tool,
250 union perf_event *event,
251 struct perf_sample *sample,
253 struct machine *machine)
255 struct report *rep = container_of(tool, struct report, tool);
256 struct addr_location al;
257 struct hist_entry_iter iter = {
260 .hide_unresolved = symbol_conf.hide_unresolved,
261 .add_entry_cb = hist_iter__report_callback,
265 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
270 if (evswitch__discard(&rep->evswitch, evsel))
273 if (machine__resolve(machine, &al, sample) < 0) {
274 pr_debug("problem processing %d event, skipping it.\n",
280 al.thread->lbr_stitch_enable = true;
282 if (symbol_conf.hide_unresolved && al.sym == NULL)
285 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
288 if (sort__mode == SORT_MODE__BRANCH) {
290 * A non-synthesized event might not have a branch stack if
291 * branch stacks have been synthesized (using itrace options).
293 if (!sample->branch_stack)
296 iter.add_entry_cb = hist_iter__branch_callback;
297 iter.ops = &hist_iter_branch;
298 } else if (rep->mem_mode) {
299 iter.ops = &hist_iter_mem;
300 } else if (symbol_conf.cumulate_callchain) {
301 iter.ops = &hist_iter_cumulative;
303 iter.ops = &hist_iter_normal;
307 al.map->dso->hit = 1;
309 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
310 hist__account_cycles(sample->branch_stack, &al, sample,
311 rep->nonany_branch_mode,
315 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
317 pr_debug("problem adding hist entry, skipping event\n");
319 addr_location__put(&al);
323 static int process_read_event(struct perf_tool *tool,
324 union perf_event *event,
325 struct perf_sample *sample __maybe_unused,
327 struct machine *machine __maybe_unused)
329 struct report *rep = container_of(tool, struct report, tool);
331 if (rep->show_threads) {
332 const char *name = evsel__name(evsel);
333 int err = perf_read_values_add_value(&rep->show_threads_values,
334 event->read.pid, event->read.tid,
346 /* For pipe mode, sample_type is not currently set */
347 static int report__setup_sample_type(struct report *rep)
349 struct perf_session *session = rep->session;
350 u64 sample_type = evlist__combined_sample_type(session->evlist);
351 bool is_pipe = perf_data__is_pipe(session->data);
354 if (session->itrace_synth_opts->callchain ||
355 session->itrace_synth_opts->add_callchain ||
357 perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
358 !session->itrace_synth_opts->set))
359 sample_type |= PERF_SAMPLE_CALLCHAIN;
361 if (session->itrace_synth_opts->last_branch ||
362 session->itrace_synth_opts->add_last_branch)
363 sample_type |= PERF_SAMPLE_BRANCH_STACK;
365 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
366 if (perf_hpp_list.parent) {
367 ui__error("Selected --sort parent, but no "
368 "callchain data. Did you call "
369 "'perf record' without -g?\n");
372 if (symbol_conf.use_callchain &&
373 !symbol_conf.show_branchflag_count) {
374 ui__error("Selected -g or --branch-history.\n"
375 "But no callchain or branch data.\n"
376 "Did you call 'perf record' without -g or -b?\n");
379 } else if (!callchain_param.enabled &&
380 callchain_param.mode != CHAIN_NONE &&
381 !symbol_conf.use_callchain) {
382 symbol_conf.use_callchain = true;
383 if (callchain_register_param(&callchain_param) < 0) {
384 ui__error("Can't register callchain params.\n");
389 if (symbol_conf.cumulate_callchain) {
390 /* Silently ignore if callchain is missing */
391 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
392 symbol_conf.cumulate_callchain = false;
393 perf_hpp__cancel_cumulate();
397 if (sort__mode == SORT_MODE__BRANCH) {
399 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
400 ui__error("Selected -b but no branch data. "
401 "Did you call perf record without -b?\n");
406 if (sort__mode == SORT_MODE__MEMORY) {
408 * FIXUP: prior to kernel 5.18, Arm SPE missed to set
409 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
410 * compatibility, set the bit if it's an old perf data file.
412 evlist__for_each_entry(session->evlist, evsel) {
413 if (strstr(evsel->name, "arm_spe") &&
414 !(sample_type & PERF_SAMPLE_DATA_SRC)) {
415 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
416 sample_type |= PERF_SAMPLE_DATA_SRC;
420 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
421 ui__error("Selected --mem-mode but no mem data. "
422 "Did you call perf record without -d?\n");
427 callchain_param_setup(sample_type);
429 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
430 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
431 "Please apply --call-graph lbr when recording.\n");
432 rep->stitch_lbr = false;
435 /* ??? handle more cases than just ANY? */
436 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
437 rep->nonany_branch_mode = true;
439 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
440 if (dwarf_callchain_users) {
441 ui__warning("Please install libunwind or libdw "
442 "development packages during the perf build.\n");
449 static void sig_handler(int sig __maybe_unused)
454 static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
455 const char *evname, FILE *fp)
459 unsigned long nr_samples = hists->stats.nr_samples;
460 u64 nr_events = hists->stats.total_period;
461 struct evsel *evsel = hists_to_evsel(hists);
463 size_t size = sizeof(buf);
464 int socked_id = hists->socket_filter;
469 if (symbol_conf.filter_relative) {
470 nr_samples = hists->stats.nr_non_filtered_samples;
471 nr_events = hists->stats.total_non_filtered_period;
474 if (evsel__is_group_event(evsel)) {
477 evsel__group_desc(evsel, buf, size);
480 for_each_group_member(pos, evsel) {
481 const struct hists *pos_hists = evsel__hists(pos);
483 if (symbol_conf.filter_relative) {
484 nr_samples += pos_hists->stats.nr_non_filtered_samples;
485 nr_events += pos_hists->stats.total_non_filtered_period;
487 nr_samples += pos_hists->stats.nr_samples;
488 nr_events += pos_hists->stats.total_period;
493 nr_samples = convert_unit(nr_samples, &unit);
494 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
495 if (evname != NULL) {
496 ret += fprintf(fp, " of event%s '%s'",
497 evsel->core.nr_members > 1 ? "s" : "", evname);
501 ret += fprintf(fp, " (time slices: %s)", rep->time_str);
503 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
504 ret += fprintf(fp, ", show reference callgraph");
508 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
509 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
511 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
514 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
516 return ret + fprintf(fp, "\n#\n");
519 static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
524 evlist__for_each_entry(evlist, pos) {
525 ret = report__browse_block_hists(&rep->block_reports[i++].hist,
526 rep->min_percent, pos,
527 &rep->session->header.env,
528 &rep->annotation_opts);
536 static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
542 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
543 evlist->stats.total_lost_samples);
546 evlist__for_each_entry(evlist, pos) {
547 struct hists *hists = evsel__hists(pos);
548 const char *evname = evsel__name(pos);
550 if (symbol_conf.event_group && !evsel__is_group_leader(pos))
553 if (rep->skip_empty && !hists->stats.nr_samples)
556 hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
558 if (rep->total_cycles_mode) {
559 report__browse_block_hists(&rep->block_reports[i++].hist,
560 rep->min_percent, pos,
565 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
566 !(symbol_conf.use_callchain ||
567 symbol_conf.show_branchflag_count));
568 fprintf(stdout, "\n\n");
572 fprintf(stdout, "#\n# (%s)\n#\n", help);
574 if (rep->show_threads) {
575 bool style = !strcmp(rep->pretty_printing_style, "raw");
576 perf_read_values_display(stdout, &rep->show_threads_values,
578 perf_read_values_destroy(&rep->show_threads_values);
581 if (sort__mode == SORT_MODE__BRANCH)
582 branch_type_stat_display(stdout, &rep->brtype_stat);
587 static void report__warn_kptr_restrict(const struct report *rep)
589 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
590 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
592 if (evlist__exclude_kernel(rep->session->evlist))
595 if (kernel_map == NULL ||
596 (kernel_map->dso->hit &&
597 (kernel_kmap->ref_reloc_sym == NULL ||
598 kernel_kmap->ref_reloc_sym->addr == 0))) {
600 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
601 "can't be resolved.";
603 if (kernel_map && map__has_symbols(kernel_map)) {
604 desc = "If some relocation was applied (e.g. "
605 "kexec) symbols may be misresolved.";
609 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
610 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
611 "Samples in kernel modules can't be resolved as well.\n\n",
616 static int report__gtk_browse_hists(struct report *rep, const char *help)
618 int (*hist_browser)(struct evlist *evlist, const char *help,
619 struct hist_browser_timer *timer, float min_pcnt);
621 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
623 if (hist_browser == NULL) {
624 ui__error("GTK browser not found!\n");
628 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
631 static int report__browse_hists(struct report *rep)
634 struct perf_session *session = rep->session;
635 struct evlist *evlist = session->evlist;
636 char *help = NULL, *path = NULL;
638 path = system_path(TIPDIR);
639 if (perf_tip(&help, path) || help == NULL) {
640 /* fallback for people who don't install perf ;-) */
642 path = system_path(DOCDIR);
643 if (perf_tip(&help, path) || help == NULL)
644 help = strdup("Cannot load tips.txt file, please install perf!");
648 switch (use_browser) {
650 if (rep->total_cycles_mode) {
651 ret = evlist__tui_block_hists_browse(evlist, rep);
655 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
656 &session->header.env, true, &rep->annotation_opts);
658 * Usually "ret" is the last pressed key, and we only
659 * care if the key notifies us to switch data file.
661 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
665 ret = report__gtk_browse_hists(rep, help);
668 ret = evlist__tty_browse_hists(evlist, rep, help);
675 static int report__collapse_hists(struct report *rep)
677 struct ui_progress prog;
681 ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
683 evlist__for_each_entry(rep->session->evlist, pos) {
684 struct hists *hists = evsel__hists(pos);
686 if (pos->core.idx == 0)
687 hists->symbol_filter_str = rep->symbol_filter_str;
689 hists->socket_filter = rep->socket_filter;
691 ret = hists__collapse_resort(hists, &prog);
695 /* Non-group events are considered as leader */
696 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
697 struct hists *leader_hists = evsel__hists(evsel__leader(pos));
699 hists__match(leader_hists, hists);
700 hists__link(leader_hists, hists);
704 ui_progress__finish();
708 static int hists__resort_cb(struct hist_entry *he, void *arg)
710 struct report *rep = arg;
711 struct symbol *sym = he->ms.sym;
713 if (rep->symbol_ipc && sym && !sym->annotate2) {
714 struct evsel *evsel = hists_to_evsel(he->hists);
716 symbol__annotate2(&he->ms, evsel,
717 &annotation__default_options, NULL);
723 static void report__output_resort(struct report *rep)
725 struct ui_progress prog;
728 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
730 evlist__for_each_entry(rep->session->evlist, pos) {
731 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
734 ui_progress__finish();
737 static int count_sample_event(struct perf_tool *tool __maybe_unused,
738 union perf_event *event __maybe_unused,
739 struct perf_sample *sample __maybe_unused,
741 struct machine *machine __maybe_unused)
743 struct hists *hists = evsel__hists(evsel);
745 hists__inc_nr_events(hists);
749 static int process_attr(struct perf_tool *tool __maybe_unused,
750 union perf_event *event,
751 struct evlist **pevlist);
753 static void stats_setup(struct report *rep)
755 memset(&rep->tool, 0, sizeof(rep->tool));
756 rep->tool.attr = process_attr;
757 rep->tool.sample = count_sample_event;
758 rep->tool.no_warn = true;
761 static int stats_print(struct report *rep)
763 struct perf_session *session = rep->session;
765 perf_session__fprintf_nr_events(session, stdout, rep->skip_empty);
766 evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty);
770 static void tasks_setup(struct report *rep)
772 memset(&rep->tool, 0, sizeof(rep->tool));
773 rep->tool.ordered_events = true;
774 if (rep->mmaps_mode) {
775 rep->tool.mmap = perf_event__process_mmap;
776 rep->tool.mmap2 = perf_event__process_mmap2;
778 rep->tool.attr = process_attr;
779 rep->tool.comm = perf_event__process_comm;
780 rep->tool.exit = perf_event__process_exit;
781 rep->tool.fork = perf_event__process_fork;
782 rep->tool.no_warn = true;
786 struct thread *thread;
787 struct list_head list;
788 struct list_head children;
791 static struct task *tasks_list(struct task *task, struct machine *machine)
793 struct thread *parent_thread, *thread = task->thread;
794 struct task *parent_task;
796 /* Already listed. */
797 if (!list_empty(&task->list))
800 /* Last one in the chain. */
801 if (thread->ppid == -1)
804 parent_thread = machine__find_thread(machine, -1, thread->ppid);
806 return ERR_PTR(-ENOENT);
808 parent_task = thread__priv(parent_thread);
809 list_add_tail(&task->list, &parent_task->children);
810 return tasks_list(parent_task, machine);
813 static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
818 maps__for_each_entry(maps, map) {
819 printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
820 indent, "", map->start, map->end,
821 map->prot & PROT_READ ? 'r' : '-',
822 map->prot & PROT_WRITE ? 'w' : '-',
823 map->prot & PROT_EXEC ? 'x' : '-',
824 map->flags & MAP_SHARED ? 's' : 'p',
826 map->dso->id.ino, map->dso->name);
832 static void task__print_level(struct task *task, FILE *fp, int level)
834 struct thread *thread = task->thread;
836 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
837 thread->pid_, thread->tid, thread->ppid,
840 fprintf(fp, "%s\n", thread__comm_str(thread));
842 maps__fprintf_task(thread->maps, comm_indent, fp);
844 if (!list_empty(&task->children)) {
845 list_for_each_entry(child, &task->children, list)
846 task__print_level(child, fp, level + 1);
850 static int tasks_print(struct report *rep, FILE *fp)
852 struct perf_session *session = rep->session;
853 struct machine *machine = &session->machines.host;
854 struct task *tasks, *task;
855 unsigned int nr = 0, itask = 0, i;
860 * No locking needed while accessing machine->threads,
861 * because --tasks is single threaded command.
864 /* Count all the threads. */
865 for (i = 0; i < THREADS__TABLE_SIZE; i++)
866 nr += machine->threads[i].nr;
868 tasks = malloc(sizeof(*tasks) * nr);
872 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
873 struct threads *threads = &machine->threads[i];
875 for (nd = rb_first_cached(&threads->entries); nd;
877 task = tasks + itask++;
879 task->thread = rb_entry(nd, struct thread, rb_node);
880 INIT_LIST_HEAD(&task->children);
881 INIT_LIST_HEAD(&task->list);
882 thread__set_priv(task->thread, task);
887 * Iterate every task down to the unprocessed parent
888 * and link all in task children list. Task with no
889 * parent is added into 'list'.
891 for (itask = 0; itask < nr; itask++) {
892 task = tasks + itask;
894 if (!list_empty(&task->list))
897 task = tasks_list(task, machine);
899 pr_err("Error: failed to process tasks\n");
901 return PTR_ERR(task);
905 list_add_tail(&task->list, &list);
908 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
910 list_for_each_entry(task, &list, list)
911 task__print_level(task, fp, 0);
917 static int __cmd_report(struct report *rep)
920 struct perf_session *session = rep->session;
922 struct perf_data *data = session->data;
924 signal(SIGINT, sig_handler);
927 ret = perf_session__cpu_bitmap(session, rep->cpu_list,
930 ui__error("failed to set cpu bitmap\n");
933 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
936 if (rep->show_threads) {
937 ret = perf_read_values_init(&rep->show_threads_values);
942 ret = report__setup_sample_type(rep);
944 /* report__setup_sample_type() already showed error message */
954 ret = perf_session__process_events(session);
956 ui__error("failed to process sample\n");
960 evlist__check_mem_load_aux(session->evlist);
963 return stats_print(rep);
966 return tasks_print(rep, stdout);
968 report__warn_kptr_restrict(rep);
970 evlist__for_each_entry(session->evlist, pos)
971 rep->nr_entries += evsel__hists(pos)->nr_entries;
973 if (use_browser == 0) {
975 perf_session__fprintf(session, stdout);
978 perf_session__fprintf_dsos(session, stdout);
981 perf_session__fprintf_nr_events(session, stdout,
983 evlist__fprintf_nr_events(session->evlist, stdout,
989 ret = report__collapse_hists(rep);
991 ui__error("failed to process hist entry\n");
999 * recalculate number of entries after collapsing since it
1000 * might be changed during the collapse phase.
1002 rep->nr_entries = 0;
1003 evlist__for_each_entry(session->evlist, pos)
1004 rep->nr_entries += evsel__hists(pos)->nr_entries;
1006 if (rep->nr_entries == 0) {
1007 ui__error("The %s data has no samples!\n", data->path);
1011 report__output_resort(rep);
1013 if (rep->total_cycles_mode) {
1014 int block_hpps[6] = {
1015 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
1016 PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
1017 PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
1018 PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
1019 PERF_HPP_REPORT__BLOCK_RANGE,
1020 PERF_HPP_REPORT__BLOCK_DSO,
1023 rep->block_reports = block_info__create_report(session->evlist,
1026 &rep->nr_block_reports);
1027 if (!rep->block_reports)
1031 return report__browse_hists(rep);
1035 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1037 struct callchain_param *callchain = opt->value;
1039 callchain->enabled = !unset;
1044 symbol_conf.use_callchain = false;
1045 callchain->mode = CHAIN_NONE;
1049 return parse_callchain_report_opt(arg);
1053 parse_time_quantum(const struct option *opt, const char *arg,
1054 int unset __maybe_unused)
1056 unsigned long *time_q = opt->value;
1059 *time_q = strtoul(arg, &end, 0);
1063 pr_err("time quantum cannot be 0");
1066 end = skip_spaces(end);
1069 if (!strcmp(end, "s")) {
1070 *time_q *= NSEC_PER_SEC;
1073 if (!strcmp(end, "ms")) {
1074 *time_q *= NSEC_PER_MSEC;
1077 if (!strcmp(end, "us")) {
1078 *time_q *= NSEC_PER_USEC;
1081 if (!strcmp(end, "ns"))
1084 pr_err("Cannot parse time quantum `%s'\n", arg);
1089 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
1090 const char *arg, int unset __maybe_unused)
1093 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
1096 regerror(err, &ignore_callees_regex, buf, sizeof(buf));
1097 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
1100 have_ignore_callees = 1;
1107 parse_branch_mode(const struct option *opt,
1108 const char *str __maybe_unused, int unset)
1110 int *branch_mode = opt->value;
1112 *branch_mode = !unset;
1117 parse_percent_limit(const struct option *opt, const char *str,
1118 int unset __maybe_unused)
1120 struct report *rep = opt->value;
1121 double pcnt = strtof(str, NULL);
1123 rep->min_percent = pcnt;
1124 callchain_param.min_percent = pcnt;
1128 static int process_attr(struct perf_tool *tool __maybe_unused,
1129 union perf_event *event,
1130 struct evlist **pevlist)
1135 err = perf_event__process_attr(tool, event, pevlist);
1140 * Check if we need to enable callchains based
1141 * on events sample_type.
1143 sample_type = evlist__combined_sample_type(*pevlist);
1144 callchain_param_setup(sample_type);
1148 int cmd_report(int argc, const char **argv)
1150 struct perf_session *session;
1151 struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1153 bool has_br_stack = false;
1154 int branch_mode = -1;
1156 bool branch_call_mode = false;
1157 #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
1158 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1159 CALLCHAIN_REPORT_HELP
1160 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1161 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1162 const char * const report_usage[] = {
1163 "perf report [<options>]",
1166 struct report report = {
1168 .sample = process_sample_event,
1169 .mmap = perf_event__process_mmap,
1170 .mmap2 = perf_event__process_mmap2,
1171 .comm = perf_event__process_comm,
1172 .namespaces = perf_event__process_namespaces,
1173 .cgroup = perf_event__process_cgroup,
1174 .exit = perf_event__process_exit,
1175 .fork = perf_event__process_fork,
1176 .lost = perf_event__process_lost,
1177 .read = process_read_event,
1178 .attr = process_attr,
1179 .tracing_data = perf_event__process_tracing_data,
1180 .build_id = perf_event__process_build_id,
1181 .id_index = perf_event__process_id_index,
1182 .auxtrace_info = perf_event__process_auxtrace_info,
1183 .auxtrace = perf_event__process_auxtrace,
1184 .event_update = perf_event__process_event_update,
1185 .feature = process_feature_event,
1186 .ordered_events = true,
1187 .ordering_requires_timestamps = true,
1189 .max_stack = PERF_MAX_STACK_DEPTH,
1190 .pretty_printing_style = "normal",
1191 .socket_filter = -1,
1192 .annotation_opts = annotation__default_options,
1195 char *sort_order_help = sort_help("sort by key(s):");
1196 char *field_order_help = sort_help("output field(s): overhead period sample ");
1197 const struct option options[] = {
1198 OPT_STRING('i', "input", &input_name, "file",
1200 OPT_INCR('v', "verbose", &verbose,
1201 "be more verbose (show symbol address, etc)"),
1202 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
1203 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1204 "dump raw trace in ASCII"),
1205 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1206 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1207 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1208 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1209 "file", "vmlinux pathname"),
1210 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1211 "don't load vmlinux even if found"),
1212 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1213 "file", "kallsyms pathname"),
1214 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1215 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1216 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1217 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1218 "Show a column with the number of samples"),
1219 OPT_BOOLEAN('T', "threads", &report.show_threads,
1220 "Show per-thread event counters"),
1221 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1222 "pretty printing style key: normal raw"),
1223 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1224 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1225 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1226 "Use the stdio interface"),
1227 OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1228 OPT_BOOLEAN(0, "header-only", &report.header_only,
1229 "Show only data header."),
1230 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1232 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1234 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1235 "Show sample percentage for different cpu modes"),
1236 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1237 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1238 OPT_STRING('p', "parent", &parent_pattern, "regex",
1239 "regex filter to identify parent, see: '--sort parent'"),
1240 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1241 "Only display entries with parent-match"),
1242 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1243 "print_type,threshold[,print_limit],order,sort_key[,branch],value",
1244 report_callchain_help, &report_parse_callchain_opt,
1245 callchain_default_opt),
1246 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1247 "Accumulate callchains of children and show total overhead as well. "
1248 "Enabled by default, use --no-children to disable."),
1249 OPT_INTEGER(0, "max-stack", &report.max_stack,
1250 "Set the maximum stack depth when parsing the callchain, "
1251 "anything beyond the specified depth will be ignored. "
1252 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1253 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1254 "alias for inverted call graph"),
1255 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1256 "ignore callees of these functions in call graphs",
1257 report_parse_ignore_callees_opt),
1258 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1259 "only consider symbols in these dsos"),
1260 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1261 "only consider symbols in these comms"),
1262 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1263 "only consider symbols in these pids"),
1264 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1265 "only consider symbols in these tids"),
1266 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1267 "only consider these symbols"),
1268 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1269 "only show symbols that (partially) match with this filter"),
1270 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1272 "don't try to adjust column width, use these fixed values"),
1273 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1274 "separator for columns, no spaces will be added between "
1275 "columns '.' is reserved."),
1276 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1277 "Only display entries resolved to a symbol"),
1278 OPT_CALLBACK(0, "symfs", NULL, "directory",
1279 "Look for files with symbols relative to this directory",
1280 symbol__config_symfs),
1281 OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1282 "list of cpus to profile"),
1283 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1284 "Display extended information about perf.data file"),
1285 OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
1286 "Interleave source code with assembly code (default)"),
1287 OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
1288 "Display raw encoding of assembly instructions (default)"),
1289 OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
1290 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1291 OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
1292 "Add prefix to source file path names in programs (with --prefix-strip)"),
1293 OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
1294 "Strip first N entries of source file path name in programs (with --prefix)"),
1295 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1296 "Show a column with the sum of periods"),
1297 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1298 "Show event group information together"),
1299 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1300 "Sort the output by the event at the index n in group. "
1301 "If n is invalid, sort by the first event. "
1302 "WARNING: should be used on grouped events."),
1303 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1304 "use branch records for per branch histogram filling",
1306 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1307 "add last branch records to call history"),
1308 OPT_STRING(0, "objdump", &report.annotation_opts.objdump_path, "path",
1309 "objdump binary to use for disassembly and annotations"),
1310 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1311 "Disable symbol demangling"),
1312 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1313 "Enable kernel symbol demangling"),
1314 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1315 OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1316 "Number of samples to save per histogram entry for individual browsing"),
1317 OPT_CALLBACK(0, "percent-limit", &report, "percent",
1318 "Don't show entries under that percent", parse_percent_limit),
1319 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1320 "how to display percentage of filtered entries", parse_filter_percentage),
1321 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1322 "Instruction Tracing options\n" ITRACE_HELP,
1323 itrace_parse_synth_opts),
1324 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1325 "Show full source file name path for source lines"),
1326 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1327 "Show callgraph from reference event"),
1328 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
1329 "Enable LBR callgraph stitching approach"),
1330 OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1331 "only show processor socket that match with this filter"),
1332 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1333 "Show raw trace event output (do not use print fmt or plugins)"),
1334 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1335 "Show entries in a hierarchy"),
1336 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1337 "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1338 stdio__config_color, "always"),
1339 OPT_STRING(0, "time", &report.time_str, "str",
1340 "Time span of interest (start,stop)"),
1341 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1342 "Show inline function"),
1343 OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
1344 "Set percent type local/global-period/hits",
1345 annotate_parse_percent_type),
1346 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1347 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1348 "Set time quantum for time sort key (default 100ms)",
1349 parse_time_quantum),
1350 OPTS_EVSWITCH(&report.evswitch),
1351 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
1352 "Sort all blocks by 'Sampled Cycles%'"),
1353 OPT_BOOLEAN(0, "disable-order", &report.disable_order,
1354 "Disable raw trace ordering"),
1355 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
1356 "Do not display empty (or dummy) events in the output"),
1359 struct perf_data data = {
1360 .mode = PERF_DATA_MODE_READ,
1362 int ret = hists__init();
1368 ret = perf_config(report__config, &report);
1372 argc = parse_options(argc, argv, options, report_usage, 0);
1375 * Special case: if there's an argument left then assume that
1376 * it's a symbol filter:
1379 usage_with_options(report_usage, options);
1381 report.symbol_filter_str = argv[0];
1384 if (annotate_check_args(&report.annotation_opts) < 0) {
1389 if (report.mmaps_mode)
1390 report.tasks_mode = true;
1392 if (dump_trace && report.disable_order)
1393 report.tool.ordered_events = false;
1396 perf_quiet_option();
1398 if (symbol_conf.vmlinux_name &&
1399 access(symbol_conf.vmlinux_name, R_OK)) {
1400 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
1404 if (symbol_conf.kallsyms_name &&
1405 access(symbol_conf.kallsyms_name, R_OK)) {
1406 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
1411 if (report.inverted_callchain)
1412 callchain_param.order = ORDER_CALLER;
1413 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1414 callchain_param.order = ORDER_CALLER;
1416 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
1417 (int)itrace_synth_opts.callchain_sz > report.max_stack)
1418 report.max_stack = itrace_synth_opts.callchain_sz;
1420 if (!input_name || !strlen(input_name)) {
1421 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1424 input_name = "perf.data";
1427 data.path = input_name;
1428 data.force = symbol_conf.force;
1431 session = perf_session__new(&data, &report.tool);
1432 if (IS_ERR(session)) {
1433 ret = PTR_ERR(session);
1437 ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1441 if (zstd_init(&(session->zstd_data), 0) < 0)
1442 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1444 if (report.queue_size) {
1445 ordered_events__set_alloc_size(&session->ordered_events,
1449 session->itrace_synth_opts = &itrace_synth_opts;
1451 report.session = session;
1453 has_br_stack = perf_header__has_feat(&session->header,
1454 HEADER_BRANCH_STACK);
1455 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1456 has_br_stack = false;
1458 setup_forced_leader(&report, session->evlist);
1460 if (symbol_conf.group_sort_idx && !session->evlist->core.nr_groups) {
1461 parse_options_usage(NULL, options, "group-sort-idx", 0);
1466 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
1467 has_br_stack = true;
1469 if (has_br_stack && branch_call_mode)
1470 symbol_conf.show_branchflag_count = true;
1472 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1475 * Branch mode is a tristate:
1476 * -1 means default, so decide based on the file having branch data.
1477 * 0/1 means the user chose a mode.
1479 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1480 !branch_call_mode) {
1481 sort__mode = SORT_MODE__BRANCH;
1482 symbol_conf.cumulate_callchain = false;
1484 if (branch_call_mode) {
1485 callchain_param.key = CCKEY_ADDRESS;
1486 callchain_param.branch_callstack = true;
1487 symbol_conf.use_callchain = true;
1488 callchain_register_param(&callchain_param);
1489 if (sort_order == NULL)
1490 sort_order = "srcline,symbol,dso";
1493 if (report.mem_mode) {
1494 if (sort__mode == SORT_MODE__BRANCH) {
1495 pr_err("branch and mem mode incompatible\n");
1498 sort__mode = SORT_MODE__MEMORY;
1499 symbol_conf.cumulate_callchain = false;
1502 if (symbol_conf.report_hierarchy) {
1503 /* disable incompatible options */
1504 symbol_conf.cumulate_callchain = false;
1507 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1508 parse_options_usage(report_usage, options, "F", 1);
1509 parse_options_usage(NULL, options, "hierarchy", 0);
1513 perf_hpp_list.need_collapse = true;
1516 if (report.use_stdio)
1518 else if (report.use_tui)
1520 else if (report.use_gtk)
1523 /* Force tty output for header output and per-thread stat. */
1524 if (report.header || report.header_only || report.show_threads)
1526 if (report.header || report.header_only)
1527 report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1528 if (report.show_full_info)
1529 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1530 if (report.stats_mode || report.tasks_mode)
1532 if (report.stats_mode && report.tasks_mode) {
1533 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1537 if (report.total_cycles_mode) {
1538 if (sort__mode != SORT_MODE__BRANCH)
1539 report.total_cycles_mode = false;
1544 if (strcmp(input_name, "-") != 0)
1545 setup_browser(true);
1549 if (sort_order && strstr(sort_order, "ipc")) {
1550 parse_options_usage(report_usage, options, "s", 1);
1554 if (sort_order && strstr(sort_order, "symbol")) {
1555 if (sort__mode == SORT_MODE__BRANCH) {
1556 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1557 sort_order, "ipc_lbr");
1558 report.symbol_ipc = true;
1560 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1561 sort_order, "ipc_null");
1564 sort_order = sort_tmp;
1567 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
1568 (setup_sorting(session->evlist) < 0)) {
1570 parse_options_usage(report_usage, options, "s", 1);
1572 parse_options_usage(sort_order ? NULL : report_usage,
1577 if ((report.header || report.header_only) && !quiet) {
1578 perf_session__fprintf_info(session, stdout,
1579 report.show_full_info);
1580 if (report.header_only) {
1583 * we need to process first few records
1584 * which contains PERF_RECORD_HEADER_FEATURE.
1586 perf_session__process_events(session);
1591 } else if (use_browser == 0 && !quiet &&
1592 !report.stats_mode && !report.tasks_mode) {
1593 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1598 * Only in the TUI browser we are doing integrated annotation,
1599 * so don't allocate extra space that won't be used in the stdio
1602 if (ui__has_annotation() || report.symbol_ipc ||
1603 report.total_cycles_mode) {
1604 ret = symbol__annotation_init();
1608 * For searching by name on the "Browse map details".
1609 * providing it only in verbose mode not to bloat too
1610 * much struct symbol.
1614 * XXX: Need to provide a less kludgy way to ask for
1615 * more space per symbol, the u32 is for the index on
1617 * See symbol__browser_index.
1619 symbol_conf.priv_size += sizeof(u32);
1620 symbol_conf.sort_by_name = true;
1622 annotation_config__init(&report.annotation_opts);
1625 if (symbol__init(&session->header.env) < 0)
1628 if (report.time_str) {
1629 ret = perf_time__parse_for_ranges(report.time_str, session,
1630 &report.ptime_range,
1636 itrace_synth_opts__set_time_range(&itrace_synth_opts,
1641 if (session->tevent.pevent &&
1642 tep_set_function_resolver(session->tevent.pevent,
1643 machine__resolve_kernel_addr,
1644 &session->machines.host) < 0) {
1645 pr_err("%s: failed to set libtraceevent function resolver\n",
1650 sort__setup_elide(stdout);
1652 ret = __cmd_report(&report);
1653 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
1654 perf_session__delete(session);
1655 last_key = K_SWITCH_INPUT_DATA;
1661 if (report.ptime_range) {
1662 itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1663 zfree(&report.ptime_range);
1666 if (report.block_reports) {
1667 block_info__free_report(report.block_reports,
1668 report.nr_block_reports);
1669 report.block_reports = NULL;
1672 zstd_fini(&(session->zstd_data));
1673 perf_session__delete(session);
1675 free(sort_order_help);
1676 free(field_order_help);