Merge tag 'irqchip-fixes-6.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / tools / perf / builtin-stat.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-stat.c
4  *
5  * Builtin stat command: Give a precise performance counters summary
6  * overview about any workload, CPU or specific PID.
7  *
8  * Sample output:
9
10    $ perf stat ./hackbench 10
11
12   Time: 0.118
13
14   Performance counter stats for './hackbench 10':
15
16        1708.761321 task-clock                #   11.037 CPUs utilized
17             41,190 context-switches          #    0.024 M/sec
18              6,735 CPU-migrations            #    0.004 M/sec
19             17,318 page-faults               #    0.010 M/sec
20      5,205,202,243 cycles                    #    3.046 GHz
21      3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
22      1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
23      2,603,501,247 instructions              #    0.50  insns per cycle
24                                              #    1.48  stalled cycles per insn
25        484,357,498 branches                  #  283.455 M/sec
26          6,388,934 branch-misses             #    1.32% of all branches
27
28         0.154822978  seconds time elapsed
29
30  *
31  * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32  *
33  * Improvements and fixes by:
34  *
35  *   Arjan van de Ven <arjan@linux.intel.com>
36  *   Yanmin Zhang <yanmin.zhang@intel.com>
37  *   Wu Fengguang <fengguang.wu@intel.com>
38  *   Mike Galbraith <efault@gmx.de>
39  *   Paul Mackerras <paulus@samba.org>
40  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
41  */
42
43 #include "builtin.h"
44 #include "util/cgroup.h"
45 #include <subcmd/parse-options.h>
46 #include "util/parse-events.h"
47 #include "util/pmu.h"
48 #include "util/event.h"
49 #include "util/evlist.h"
50 #include "util/evlist-hybrid.h"
51 #include "util/evsel.h"
52 #include "util/debug.h"
53 #include "util/color.h"
54 #include "util/stat.h"
55 #include "util/header.h"
56 #include "util/cpumap.h"
57 #include "util/thread_map.h"
58 #include "util/counts.h"
59 #include "util/topdown.h"
60 #include "util/session.h"
61 #include "util/tool.h"
62 #include "util/string2.h"
63 #include "util/metricgroup.h"
64 #include "util/synthetic-events.h"
65 #include "util/target.h"
66 #include "util/time-utils.h"
67 #include "util/top.h"
68 #include "util/affinity.h"
69 #include "util/pfm.h"
70 #include "util/bpf_counter.h"
71 #include "util/iostat.h"
72 #include "util/pmu-hybrid.h"
73 #include "util/util.h"
74 #include "asm/bug.h"
75
76 #include <linux/time64.h>
77 #include <linux/zalloc.h>
78 #include <api/fs/fs.h>
79 #include <errno.h>
80 #include <signal.h>
81 #include <stdlib.h>
82 #include <sys/prctl.h>
83 #include <inttypes.h>
84 #include <locale.h>
85 #include <math.h>
86 #include <sys/types.h>
87 #include <sys/stat.h>
88 #include <sys/wait.h>
89 #include <unistd.h>
90 #include <sys/time.h>
91 #include <sys/resource.h>
92 #include <linux/err.h>
93
94 #include <linux/ctype.h>
95 #include <perf/evlist.h>
96 #include <internal/threadmap.h>
97
98 #define DEFAULT_SEPARATOR       " "
99 #define FREEZE_ON_SMI_PATH      "devices/cpu/freeze_on_smi"
100
101 static void print_counters(struct timespec *ts, int argc, const char **argv);
102
103 static struct evlist    *evsel_list;
104 static bool all_counters_use_bpf = true;
105
106 static struct target target = {
107         .uid    = UINT_MAX,
108 };
109
110 #define METRIC_ONLY_LEN 20
111
112 static volatile sig_atomic_t    child_pid                       = -1;
113 static int                      detailed_run                    =  0;
114 static bool                     transaction_run;
115 static bool                     topdown_run                     = false;
116 static bool                     smi_cost                        = false;
117 static bool                     smi_reset                       = false;
118 static int                      big_num_opt                     =  -1;
119 static const char               *pre_cmd                        = NULL;
120 static const char               *post_cmd                       = NULL;
121 static bool                     sync_run                        = false;
122 static bool                     forever                         = false;
123 static bool                     force_metric_only               = false;
124 static struct timespec          ref_time;
125 static bool                     append_file;
126 static bool                     interval_count;
127 static const char               *output_name;
128 static int                      output_fd;
129 static char                     *metrics;
130
131 struct perf_stat {
132         bool                     record;
133         struct perf_data         data;
134         struct perf_session     *session;
135         u64                      bytes_written;
136         struct perf_tool         tool;
137         bool                     maps_allocated;
138         struct perf_cpu_map     *cpus;
139         struct perf_thread_map *threads;
140         enum aggr_mode           aggr_mode;
141 };
142
143 static struct perf_stat         perf_stat;
144 #define STAT_RECORD             perf_stat.record
145
146 static volatile sig_atomic_t done = 0;
147
148 static struct perf_stat_config stat_config = {
149         .aggr_mode              = AGGR_GLOBAL,
150         .scale                  = true,
151         .unit_width             = 4, /* strlen("unit") */
152         .run_count              = 1,
153         .metric_only_len        = METRIC_ONLY_LEN,
154         .walltime_nsecs_stats   = &walltime_nsecs_stats,
155         .ru_stats               = &ru_stats,
156         .big_num                = true,
157         .ctl_fd                 = -1,
158         .ctl_fd_ack             = -1,
159         .iostat_run             = false,
160 };
161
162 static bool cpus_map_matched(struct evsel *a, struct evsel *b)
163 {
164         if (!a->core.cpus && !b->core.cpus)
165                 return true;
166
167         if (!a->core.cpus || !b->core.cpus)
168                 return false;
169
170         if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
171                 return false;
172
173         for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
174                 if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
175                     perf_cpu_map__cpu(b->core.cpus, i).cpu)
176                         return false;
177         }
178
179         return true;
180 }
181
182 static void evlist__check_cpu_maps(struct evlist *evlist)
183 {
184         struct evsel *evsel, *warned_leader = NULL;
185
186         if (evlist__has_hybrid(evlist))
187                 evlist__warn_hybrid_group(evlist);
188
189         evlist__for_each_entry(evlist, evsel) {
190                 struct evsel *leader = evsel__leader(evsel);
191
192                 /* Check that leader matches cpus with each member. */
193                 if (leader == evsel)
194                         continue;
195                 if (cpus_map_matched(leader, evsel))
196                         continue;
197
198                 /* If there's mismatch disable the group and warn user. */
199                 if (warned_leader != leader) {
200                         char buf[200];
201
202                         pr_warning("WARNING: grouped events cpus do not match.\n"
203                                 "Events with CPUs not matching the leader will "
204                                 "be removed from the group.\n");
205                         evsel__group_desc(leader, buf, sizeof(buf));
206                         pr_warning("  %s\n", buf);
207                         warned_leader = leader;
208                 }
209                 if (verbose > 0) {
210                         char buf[200];
211
212                         cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
213                         pr_warning("     %s: %s\n", leader->name, buf);
214                         cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
215                         pr_warning("     %s: %s\n", evsel->name, buf);
216                 }
217
218                 evsel__remove_from_group(evsel, leader);
219         }
220 }
221
222 static inline void diff_timespec(struct timespec *r, struct timespec *a,
223                                  struct timespec *b)
224 {
225         r->tv_sec = a->tv_sec - b->tv_sec;
226         if (a->tv_nsec < b->tv_nsec) {
227                 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
228                 r->tv_sec--;
229         } else {
230                 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
231         }
232 }
233
234 static void perf_stat__reset_stats(void)
235 {
236         evlist__reset_stats(evsel_list);
237         perf_stat__reset_shadow_stats();
238 }
239
240 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
241                                      union perf_event *event,
242                                      struct perf_sample *sample __maybe_unused,
243                                      struct machine *machine __maybe_unused)
244 {
245         if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
246                 pr_err("failed to write perf data, error: %m\n");
247                 return -1;
248         }
249
250         perf_stat.bytes_written += event->header.size;
251         return 0;
252 }
253
254 static int write_stat_round_event(u64 tm, u64 type)
255 {
256         return perf_event__synthesize_stat_round(NULL, tm, type,
257                                                  process_synthesized_event,
258                                                  NULL);
259 }
260
261 #define WRITE_STAT_ROUND_EVENT(time, interval) \
262         write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
263
264 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
265
266 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
267                                    struct perf_counts_values *count)
268 {
269         struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
270         struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
271
272         return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
273                                            process_synthesized_event, NULL);
274 }
275
276 static int read_single_counter(struct evsel *counter, int cpu_map_idx,
277                                int thread, struct timespec *rs)
278 {
279         switch(counter->tool_event) {
280                 case PERF_TOOL_DURATION_TIME: {
281                         u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
282                         struct perf_counts_values *count =
283                                 perf_counts(counter->counts, cpu_map_idx, thread);
284                         count->ena = count->run = val;
285                         count->val = val;
286                         return 0;
287                 }
288                 case PERF_TOOL_USER_TIME:
289                 case PERF_TOOL_SYSTEM_TIME: {
290                         u64 val;
291                         struct perf_counts_values *count =
292                                 perf_counts(counter->counts, cpu_map_idx, thread);
293                         if (counter->tool_event == PERF_TOOL_USER_TIME)
294                                 val = ru_stats.ru_utime_usec_stat.mean;
295                         else
296                                 val = ru_stats.ru_stime_usec_stat.mean;
297                         count->ena = count->run = val;
298                         count->val = val;
299                         return 0;
300                 }
301                 default:
302                 case PERF_TOOL_NONE:
303                         return evsel__read_counter(counter, cpu_map_idx, thread);
304                 case PERF_TOOL_MAX:
305                         /* This should never be reached */
306                         return 0;
307         }
308 }
309
310 /*
311  * Read out the results of a single counter:
312  * do not aggregate counts across CPUs in system-wide mode
313  */
314 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
315 {
316         int nthreads = perf_thread_map__nr(evsel_list->core.threads);
317         int thread;
318
319         if (!counter->supported)
320                 return -ENOENT;
321
322         for (thread = 0; thread < nthreads; thread++) {
323                 struct perf_counts_values *count;
324
325                 count = perf_counts(counter->counts, cpu_map_idx, thread);
326
327                 /*
328                  * The leader's group read loads data into its group members
329                  * (via evsel__read_counter()) and sets their count->loaded.
330                  */
331                 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
332                     read_single_counter(counter, cpu_map_idx, thread, rs)) {
333                         counter->counts->scaled = -1;
334                         perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
335                         perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
336                         return -1;
337                 }
338
339                 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
340
341                 if (STAT_RECORD) {
342                         if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
343                                 pr_err("failed to write stat event\n");
344                                 return -1;
345                         }
346                 }
347
348                 if (verbose > 1) {
349                         fprintf(stat_config.output,
350                                 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
351                                         evsel__name(counter),
352                                         perf_cpu_map__cpu(evsel__cpus(counter),
353                                                           cpu_map_idx).cpu,
354                                         count->val, count->ena, count->run);
355                 }
356         }
357
358         return 0;
359 }
360
361 static int read_affinity_counters(struct timespec *rs)
362 {
363         struct evlist_cpu_iterator evlist_cpu_itr;
364         struct affinity saved_affinity, *affinity;
365
366         if (all_counters_use_bpf)
367                 return 0;
368
369         if (!target__has_cpu(&target) || target__has_per_thread(&target))
370                 affinity = NULL;
371         else if (affinity__setup(&saved_affinity) < 0)
372                 return -1;
373         else
374                 affinity = &saved_affinity;
375
376         evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
377                 struct evsel *counter = evlist_cpu_itr.evsel;
378
379                 if (evsel__is_bpf(counter))
380                         continue;
381
382                 if (!counter->err) {
383                         counter->err = read_counter_cpu(counter, rs,
384                                                         evlist_cpu_itr.cpu_map_idx);
385                 }
386         }
387         if (affinity)
388                 affinity__cleanup(&saved_affinity);
389
390         return 0;
391 }
392
393 static int read_bpf_map_counters(void)
394 {
395         struct evsel *counter;
396         int err;
397
398         evlist__for_each_entry(evsel_list, counter) {
399                 if (!evsel__is_bpf(counter))
400                         continue;
401
402                 err = bpf_counter__read(counter);
403                 if (err)
404                         return err;
405         }
406         return 0;
407 }
408
409 static int read_counters(struct timespec *rs)
410 {
411         if (!stat_config.stop_read_counter) {
412                 if (read_bpf_map_counters() ||
413                     read_affinity_counters(rs))
414                         return -1;
415         }
416         return 0;
417 }
418
419 static void process_counters(void)
420 {
421         struct evsel *counter;
422
423         evlist__for_each_entry(evsel_list, counter) {
424                 if (counter->err)
425                         pr_debug("failed to read counter %s\n", counter->name);
426                 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
427                         pr_warning("failed to process counter %s\n", counter->name);
428                 counter->err = 0;
429         }
430
431         perf_stat_merge_counters(&stat_config, evsel_list);
432         perf_stat_process_percore(&stat_config, evsel_list);
433 }
434
435 static void process_interval(void)
436 {
437         struct timespec ts, rs;
438
439         clock_gettime(CLOCK_MONOTONIC, &ts);
440         diff_timespec(&rs, &ts, &ref_time);
441
442         evlist__reset_aggr_stats(evsel_list);
443
444         if (read_counters(&rs) == 0)
445                 process_counters();
446
447         if (STAT_RECORD) {
448                 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
449                         pr_err("failed to write stat round event\n");
450         }
451
452         init_stats(&walltime_nsecs_stats);
453         update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
454         print_counters(&rs, 0, NULL);
455 }
456
457 static bool handle_interval(unsigned int interval, int *times)
458 {
459         if (interval) {
460                 process_interval();
461                 if (interval_count && !(--(*times)))
462                         return true;
463         }
464         return false;
465 }
466
467 static int enable_counters(void)
468 {
469         struct evsel *evsel;
470         int err;
471
472         evlist__for_each_entry(evsel_list, evsel) {
473                 if (!evsel__is_bpf(evsel))
474                         continue;
475
476                 err = bpf_counter__enable(evsel);
477                 if (err)
478                         return err;
479         }
480
481         if (!target__enable_on_exec(&target)) {
482                 if (!all_counters_use_bpf)
483                         evlist__enable(evsel_list);
484         }
485         return 0;
486 }
487
488 static void disable_counters(void)
489 {
490         struct evsel *counter;
491
492         /*
493          * If we don't have tracee (attaching to task or cpu), counters may
494          * still be running. To get accurate group ratios, we must stop groups
495          * from counting before reading their constituent counters.
496          */
497         if (!target__none(&target)) {
498                 evlist__for_each_entry(evsel_list, counter)
499                         bpf_counter__disable(counter);
500                 if (!all_counters_use_bpf)
501                         evlist__disable(evsel_list);
502         }
503 }
504
505 static volatile sig_atomic_t workload_exec_errno;
506
507 /*
508  * evlist__prepare_workload will send a SIGUSR1
509  * if the fork fails, since we asked by setting its
510  * want_signal to true.
511  */
512 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
513                                         void *ucontext __maybe_unused)
514 {
515         workload_exec_errno = info->si_value.sival_int;
516 }
517
518 static bool evsel__should_store_id(struct evsel *counter)
519 {
520         return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
521 }
522
523 static bool is_target_alive(struct target *_target,
524                             struct perf_thread_map *threads)
525 {
526         struct stat st;
527         int i;
528
529         if (!target__has_task(_target))
530                 return true;
531
532         for (i = 0; i < threads->nr; i++) {
533                 char path[PATH_MAX];
534
535                 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
536                           threads->map[i].pid);
537
538                 if (!stat(path, &st))
539                         return true;
540         }
541
542         return false;
543 }
544
545 static void process_evlist(struct evlist *evlist, unsigned int interval)
546 {
547         enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
548
549         if (evlist__ctlfd_process(evlist, &cmd) > 0) {
550                 switch (cmd) {
551                 case EVLIST_CTL_CMD_ENABLE:
552                         fallthrough;
553                 case EVLIST_CTL_CMD_DISABLE:
554                         if (interval)
555                                 process_interval();
556                         break;
557                 case EVLIST_CTL_CMD_SNAPSHOT:
558                 case EVLIST_CTL_CMD_ACK:
559                 case EVLIST_CTL_CMD_UNSUPPORTED:
560                 case EVLIST_CTL_CMD_EVLIST:
561                 case EVLIST_CTL_CMD_STOP:
562                 case EVLIST_CTL_CMD_PING:
563                 default:
564                         break;
565                 }
566         }
567 }
568
569 static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
570                         int *time_to_sleep)
571 {
572         int tts = *time_to_sleep;
573         struct timespec time_diff;
574
575         diff_timespec(&time_diff, time_stop, time_start);
576
577         tts -= time_diff.tv_sec * MSEC_PER_SEC +
578                time_diff.tv_nsec / NSEC_PER_MSEC;
579
580         if (tts < 0)
581                 tts = 0;
582
583         *time_to_sleep = tts;
584 }
585
586 static int dispatch_events(bool forks, int timeout, int interval, int *times)
587 {
588         int child_exited = 0, status = 0;
589         int time_to_sleep, sleep_time;
590         struct timespec time_start, time_stop;
591
592         if (interval)
593                 sleep_time = interval;
594         else if (timeout)
595                 sleep_time = timeout;
596         else
597                 sleep_time = 1000;
598
599         time_to_sleep = sleep_time;
600
601         while (!done) {
602                 if (forks)
603                         child_exited = waitpid(child_pid, &status, WNOHANG);
604                 else
605                         child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
606
607                 if (child_exited)
608                         break;
609
610                 clock_gettime(CLOCK_MONOTONIC, &time_start);
611                 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
612                         if (timeout || handle_interval(interval, times))
613                                 break;
614                         time_to_sleep = sleep_time;
615                 } else { /* fd revent */
616                         process_evlist(evsel_list, interval);
617                         clock_gettime(CLOCK_MONOTONIC, &time_stop);
618                         compute_tts(&time_start, &time_stop, &time_to_sleep);
619                 }
620         }
621
622         return status;
623 }
624
625 enum counter_recovery {
626         COUNTER_SKIP,
627         COUNTER_RETRY,
628         COUNTER_FATAL,
629 };
630
631 static enum counter_recovery stat_handle_error(struct evsel *counter)
632 {
633         char msg[BUFSIZ];
634         /*
635          * PPC returns ENXIO for HW counters until 2.6.37
636          * (behavior changed with commit b0a873e).
637          */
638         if (errno == EINVAL || errno == ENOSYS ||
639             errno == ENOENT || errno == EOPNOTSUPP ||
640             errno == ENXIO) {
641                 if (verbose > 0)
642                         ui__warning("%s event is not supported by the kernel.\n",
643                                     evsel__name(counter));
644                 counter->supported = false;
645                 /*
646                  * errored is a sticky flag that means one of the counter's
647                  * cpu event had a problem and needs to be reexamined.
648                  */
649                 counter->errored = true;
650
651                 if ((evsel__leader(counter) != counter) ||
652                     !(counter->core.leader->nr_members > 1))
653                         return COUNTER_SKIP;
654         } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
655                 if (verbose > 0)
656                         ui__warning("%s\n", msg);
657                 return COUNTER_RETRY;
658         } else if (target__has_per_thread(&target) &&
659                    evsel_list->core.threads &&
660                    evsel_list->core.threads->err_thread != -1) {
661                 /*
662                  * For global --per-thread case, skip current
663                  * error thread.
664                  */
665                 if (!thread_map__remove(evsel_list->core.threads,
666                                         evsel_list->core.threads->err_thread)) {
667                         evsel_list->core.threads->err_thread = -1;
668                         return COUNTER_RETRY;
669                 }
670         } else if (counter->skippable) {
671                 if (verbose > 0)
672                         ui__warning("skipping event %s that kernel failed to open .\n",
673                                     evsel__name(counter));
674                 counter->supported = false;
675                 counter->errored = true;
676                 return COUNTER_SKIP;
677         }
678
679         evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
680         ui__error("%s\n", msg);
681
682         if (child_pid != -1)
683                 kill(child_pid, SIGTERM);
684         return COUNTER_FATAL;
685 }
686
687 static int __run_perf_stat(int argc, const char **argv, int run_idx)
688 {
689         int interval = stat_config.interval;
690         int times = stat_config.times;
691         int timeout = stat_config.timeout;
692         char msg[BUFSIZ];
693         unsigned long long t0, t1;
694         struct evsel *counter;
695         size_t l;
696         int status = 0;
697         const bool forks = (argc > 0);
698         bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
699         struct evlist_cpu_iterator evlist_cpu_itr;
700         struct affinity saved_affinity, *affinity = NULL;
701         int err;
702         bool second_pass = false;
703
704         if (forks) {
705                 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
706                         perror("failed to prepare workload");
707                         return -1;
708                 }
709                 child_pid = evsel_list->workload.pid;
710         }
711
712         if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
713                 if (affinity__setup(&saved_affinity) < 0)
714                         return -1;
715                 affinity = &saved_affinity;
716         }
717
718         evlist__for_each_entry(evsel_list, counter) {
719                 counter->reset_group = false;
720                 if (bpf_counter__load(counter, &target))
721                         return -1;
722                 if (!(evsel__is_bperf(counter)))
723                         all_counters_use_bpf = false;
724         }
725
726         evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
727                 counter = evlist_cpu_itr.evsel;
728
729                 /*
730                  * bperf calls evsel__open_per_cpu() in bperf__load(), so
731                  * no need to call it again here.
732                  */
733                 if (target.use_bpf)
734                         break;
735
736                 if (counter->reset_group || counter->errored)
737                         continue;
738                 if (evsel__is_bperf(counter))
739                         continue;
740 try_again:
741                 if (create_perf_stat_counter(counter, &stat_config, &target,
742                                              evlist_cpu_itr.cpu_map_idx) < 0) {
743
744                         /*
745                          * Weak group failed. We cannot just undo this here
746                          * because earlier CPUs might be in group mode, and the kernel
747                          * doesn't support mixing group and non group reads. Defer
748                          * it to later.
749                          * Don't close here because we're in the wrong affinity.
750                          */
751                         if ((errno == EINVAL || errno == EBADF) &&
752                                 evsel__leader(counter) != counter &&
753                                 counter->weak_group) {
754                                 evlist__reset_weak_group(evsel_list, counter, false);
755                                 assert(counter->reset_group);
756                                 second_pass = true;
757                                 continue;
758                         }
759
760                         switch (stat_handle_error(counter)) {
761                         case COUNTER_FATAL:
762                                 return -1;
763                         case COUNTER_RETRY:
764                                 goto try_again;
765                         case COUNTER_SKIP:
766                                 continue;
767                         default:
768                                 break;
769                         }
770
771                 }
772                 counter->supported = true;
773         }
774
775         if (second_pass) {
776                 /*
777                  * Now redo all the weak group after closing them,
778                  * and also close errored counters.
779                  */
780
781                 /* First close errored or weak retry */
782                 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
783                         counter = evlist_cpu_itr.evsel;
784
785                         if (!counter->reset_group && !counter->errored)
786                                 continue;
787
788                         perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
789                 }
790                 /* Now reopen weak */
791                 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
792                         counter = evlist_cpu_itr.evsel;
793
794                         if (!counter->reset_group)
795                                 continue;
796 try_again_reset:
797                         pr_debug2("reopening weak %s\n", evsel__name(counter));
798                         if (create_perf_stat_counter(counter, &stat_config, &target,
799                                                      evlist_cpu_itr.cpu_map_idx) < 0) {
800
801                                 switch (stat_handle_error(counter)) {
802                                 case COUNTER_FATAL:
803                                         return -1;
804                                 case COUNTER_RETRY:
805                                         goto try_again_reset;
806                                 case COUNTER_SKIP:
807                                         continue;
808                                 default:
809                                         break;
810                                 }
811                         }
812                         counter->supported = true;
813                 }
814         }
815         affinity__cleanup(affinity);
816
817         evlist__for_each_entry(evsel_list, counter) {
818                 if (!counter->supported) {
819                         perf_evsel__free_fd(&counter->core);
820                         continue;
821                 }
822
823                 l = strlen(counter->unit);
824                 if (l > stat_config.unit_width)
825                         stat_config.unit_width = l;
826
827                 if (evsel__should_store_id(counter) &&
828                     evsel__store_ids(counter, evsel_list))
829                         return -1;
830         }
831
832         if (evlist__apply_filters(evsel_list, &counter)) {
833                 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
834                         counter->filter, evsel__name(counter), errno,
835                         str_error_r(errno, msg, sizeof(msg)));
836                 return -1;
837         }
838
839         if (STAT_RECORD) {
840                 int fd = perf_data__fd(&perf_stat.data);
841
842                 if (is_pipe) {
843                         err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
844                 } else {
845                         err = perf_session__write_header(perf_stat.session, evsel_list,
846                                                          fd, false);
847                 }
848
849                 if (err < 0)
850                         return err;
851
852                 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
853                                                          process_synthesized_event, is_pipe);
854                 if (err < 0)
855                         return err;
856         }
857
858         if (target.initial_delay) {
859                 pr_info(EVLIST_DISABLED_MSG);
860         } else {
861                 err = enable_counters();
862                 if (err)
863                         return -1;
864         }
865
866         /* Exec the command, if any */
867         if (forks)
868                 evlist__start_workload(evsel_list);
869
870         if (target.initial_delay > 0) {
871                 usleep(target.initial_delay * USEC_PER_MSEC);
872                 err = enable_counters();
873                 if (err)
874                         return -1;
875
876                 pr_info(EVLIST_ENABLED_MSG);
877         }
878
879         t0 = rdclock();
880         clock_gettime(CLOCK_MONOTONIC, &ref_time);
881
882         if (forks) {
883                 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
884                         status = dispatch_events(forks, timeout, interval, &times);
885                 if (child_pid != -1) {
886                         if (timeout)
887                                 kill(child_pid, SIGTERM);
888                         wait4(child_pid, &status, 0, &stat_config.ru_data);
889                 }
890
891                 if (workload_exec_errno) {
892                         const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
893                         pr_err("Workload failed: %s\n", emsg);
894                         return -1;
895                 }
896
897                 if (WIFSIGNALED(status))
898                         psignal(WTERMSIG(status), argv[0]);
899         } else {
900                 status = dispatch_events(forks, timeout, interval, &times);
901         }
902
903         disable_counters();
904
905         t1 = rdclock();
906
907         if (stat_config.walltime_run_table)
908                 stat_config.walltime_run[run_idx] = t1 - t0;
909
910         if (interval && stat_config.summary) {
911                 stat_config.interval = 0;
912                 stat_config.stop_read_counter = true;
913                 init_stats(&walltime_nsecs_stats);
914                 update_stats(&walltime_nsecs_stats, t1 - t0);
915
916                 evlist__copy_prev_raw_counts(evsel_list);
917                 evlist__reset_prev_raw_counts(evsel_list);
918                 evlist__reset_aggr_stats(evsel_list);
919         } else {
920                 update_stats(&walltime_nsecs_stats, t1 - t0);
921                 update_rusage_stats(&ru_stats, &stat_config.ru_data);
922         }
923
924         /*
925          * Closing a group leader splits the group, and as we only disable
926          * group leaders, results in remaining events becoming enabled. To
927          * avoid arbitrary skew, we must read all counters before closing any
928          * group leaders.
929          */
930         if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
931                 process_counters();
932
933         /*
934          * We need to keep evsel_list alive, because it's processed
935          * later the evsel_list will be closed after.
936          */
937         if (!STAT_RECORD)
938                 evlist__close(evsel_list);
939
940         return WEXITSTATUS(status);
941 }
942
943 static int run_perf_stat(int argc, const char **argv, int run_idx)
944 {
945         int ret;
946
947         if (pre_cmd) {
948                 ret = system(pre_cmd);
949                 if (ret)
950                         return ret;
951         }
952
953         if (sync_run)
954                 sync();
955
956         ret = __run_perf_stat(argc, argv, run_idx);
957         if (ret)
958                 return ret;
959
960         if (post_cmd) {
961                 ret = system(post_cmd);
962                 if (ret)
963                         return ret;
964         }
965
966         return ret;
967 }
968
969 static void print_counters(struct timespec *ts, int argc, const char **argv)
970 {
971         /* Do not print anything if we record to the pipe. */
972         if (STAT_RECORD && perf_stat.data.is_pipe)
973                 return;
974         if (quiet)
975                 return;
976
977         evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
978 }
979
980 static volatile sig_atomic_t signr = -1;
981
982 static void skip_signal(int signo)
983 {
984         if ((child_pid == -1) || stat_config.interval)
985                 done = 1;
986
987         signr = signo;
988         /*
989          * render child_pid harmless
990          * won't send SIGTERM to a random
991          * process in case of race condition
992          * and fast PID recycling
993          */
994         child_pid = -1;
995 }
996
997 static void sig_atexit(void)
998 {
999         sigset_t set, oset;
1000
1001         /*
1002          * avoid race condition with SIGCHLD handler
1003          * in skip_signal() which is modifying child_pid
1004          * goal is to avoid send SIGTERM to a random
1005          * process
1006          */
1007         sigemptyset(&set);
1008         sigaddset(&set, SIGCHLD);
1009         sigprocmask(SIG_BLOCK, &set, &oset);
1010
1011         if (child_pid != -1)
1012                 kill(child_pid, SIGTERM);
1013
1014         sigprocmask(SIG_SETMASK, &oset, NULL);
1015
1016         if (signr == -1)
1017                 return;
1018
1019         signal(signr, SIG_DFL);
1020         kill(getpid(), signr);
1021 }
1022
1023 void perf_stat__set_big_num(int set)
1024 {
1025         stat_config.big_num = (set != 0);
1026 }
1027
1028 void perf_stat__set_no_csv_summary(int set)
1029 {
1030         stat_config.no_csv_summary = (set != 0);
1031 }
1032
1033 static int stat__set_big_num(const struct option *opt __maybe_unused,
1034                              const char *s __maybe_unused, int unset)
1035 {
1036         big_num_opt = unset ? 0 : 1;
1037         perf_stat__set_big_num(!unset);
1038         return 0;
1039 }
1040
1041 static int enable_metric_only(const struct option *opt __maybe_unused,
1042                               const char *s __maybe_unused, int unset)
1043 {
1044         force_metric_only = true;
1045         stat_config.metric_only = !unset;
1046         return 0;
1047 }
1048
1049 static int append_metric_groups(const struct option *opt __maybe_unused,
1050                                const char *str,
1051                                int unset __maybe_unused)
1052 {
1053         if (metrics) {
1054                 char *tmp;
1055
1056                 if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
1057                         return -ENOMEM;
1058                 free(metrics);
1059                 metrics = tmp;
1060         } else {
1061                 metrics = strdup(str);
1062                 if (!metrics)
1063                         return -ENOMEM;
1064         }
1065         return 0;
1066 }
1067
1068 static int parse_control_option(const struct option *opt,
1069                                 const char *str,
1070                                 int unset __maybe_unused)
1071 {
1072         struct perf_stat_config *config = opt->value;
1073
1074         return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
1075 }
1076
1077 static int parse_stat_cgroups(const struct option *opt,
1078                               const char *str, int unset)
1079 {
1080         if (stat_config.cgroup_list) {
1081                 pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
1082                 return -1;
1083         }
1084
1085         return parse_cgroups(opt, str, unset);
1086 }
1087
1088 static int parse_hybrid_type(const struct option *opt,
1089                              const char *str,
1090                              int unset __maybe_unused)
1091 {
1092         struct evlist *evlist = *(struct evlist **)opt->value;
1093
1094         if (!list_empty(&evlist->core.entries)) {
1095                 fprintf(stderr, "Must define cputype before events/metrics\n");
1096                 return -1;
1097         }
1098
1099         evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str);
1100         if (!evlist->hybrid_pmu_name) {
1101                 fprintf(stderr, "--cputype %s is not supported!\n", str);
1102                 return -1;
1103         }
1104
1105         return 0;
1106 }
1107
1108 static struct option stat_options[] = {
1109         OPT_BOOLEAN('T', "transaction", &transaction_run,
1110                     "hardware transaction statistics"),
1111         OPT_CALLBACK('e', "event", &evsel_list, "event",
1112                      "event selector. use 'perf list' to list available events",
1113                      parse_events_option),
1114         OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1115                      "event filter", parse_filter),
1116         OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1117                     "child tasks do not inherit counters"),
1118         OPT_STRING('p', "pid", &target.pid, "pid",
1119                    "stat events on existing process id"),
1120         OPT_STRING('t', "tid", &target.tid, "tid",
1121                    "stat events on existing thread id"),
1122 #ifdef HAVE_BPF_SKEL
1123         OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1124                    "stat events on existing bpf program id"),
1125         OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
1126                     "use bpf program to count events"),
1127         OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
1128                    "path to perf_event_attr map"),
1129 #endif
1130         OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1131                     "system-wide collection from all CPUs"),
1132         OPT_BOOLEAN(0, "scale", &stat_config.scale,
1133                     "Use --no-scale to disable counter scaling for multiplexing"),
1134         OPT_INCR('v', "verbose", &verbose,
1135                     "be more verbose (show counter open errors, etc)"),
1136         OPT_INTEGER('r', "repeat", &stat_config.run_count,
1137                     "repeat command and print average + stddev (max: 100, forever: 0)"),
1138         OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
1139                     "display details about each run (only with -r option)"),
1140         OPT_BOOLEAN('n', "null", &stat_config.null_run,
1141                     "null run - dont start any counters"),
1142         OPT_INCR('d', "detailed", &detailed_run,
1143                     "detailed run - start a lot of events"),
1144         OPT_BOOLEAN('S', "sync", &sync_run,
1145                     "call sync() before starting a run"),
1146         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1147                            "print large numbers with thousands\' separators",
1148                            stat__set_big_num),
1149         OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1150                     "list of cpus to monitor in system-wide"),
1151         OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1152                     "disable CPU count aggregation", AGGR_NONE),
1153         OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
1154         OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
1155                     "Merge identical named hybrid events"),
1156         OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1157                    "print counts with custom separator"),
1158         OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
1159                    "print counts in JSON format"),
1160         OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1161                      "monitor event in cgroup name only", parse_stat_cgroups),
1162         OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1163                     "expand events for each cgroup"),
1164         OPT_STRING('o', "output", &output_name, "file", "output file name"),
1165         OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1166         OPT_INTEGER(0, "log-fd", &output_fd,
1167                     "log output to fd, instead of stderr"),
1168         OPT_STRING(0, "pre", &pre_cmd, "command",
1169                         "command to run prior to the measured command"),
1170         OPT_STRING(0, "post", &post_cmd, "command",
1171                         "command to run after to the measured command"),
1172         OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1173                     "print counts at regular interval in ms "
1174                     "(overhead is possible for values <= 100ms)"),
1175         OPT_INTEGER(0, "interval-count", &stat_config.times,
1176                     "print counts for fixed number of times"),
1177         OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
1178                     "clear screen in between new interval"),
1179         OPT_UINTEGER(0, "timeout", &stat_config.timeout,
1180                     "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1181         OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1182                      "aggregate counts per processor socket", AGGR_SOCKET),
1183         OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1184                      "aggregate counts per processor die", AGGR_DIE),
1185         OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1186                      "aggregate counts per physical processor core", AGGR_CORE),
1187         OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1188                      "aggregate counts per thread", AGGR_THREAD),
1189         OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
1190                      "aggregate counts per numa node", AGGR_NODE),
1191         OPT_INTEGER('D', "delay", &target.initial_delay,
1192                     "ms to wait before starting measurement after program start (-1: start with events disabled)"),
1193         OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
1194                         "Only print computed metrics. No raw values", enable_metric_only),
1195         OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
1196                        "don't group metric events, impacts multiplexing"),
1197         OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
1198                        "don't try to share events between metrics in a group"),
1199         OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
1200                        "don't try to share events between metrics in a group  "),
1201         OPT_BOOLEAN(0, "topdown", &topdown_run,
1202                         "measure top-down statistics"),
1203         OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
1204                         "Set the metrics level for the top-down statistics (0: max level)"),
1205         OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1206                         "measure SMI cost"),
1207         OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1208                      "monitor specified metrics or metric groups (separated by ,)",
1209                      append_metric_groups),
1210         OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1211                          "Configure all used events to run in kernel space.",
1212                          PARSE_OPT_EXCLUSIVE),
1213         OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1214                          "Configure all used events to run in user space.",
1215                          PARSE_OPT_EXCLUSIVE),
1216         OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
1217                     "Use with 'percore' event qualifier to show the event "
1218                     "counts of one hardware thread by sum up total hardware "
1219                     "threads of same physical core"),
1220         OPT_BOOLEAN(0, "summary", &stat_config.summary,
1221                        "print summary for interval mode"),
1222         OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
1223                        "don't print 'summary' for CSV summary output"),
1224         OPT_BOOLEAN(0, "quiet", &quiet,
1225                         "don't print any output, messages or warnings (useful with record)"),
1226         OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1227                      "Only enable events on applying cpu with this type "
1228                      "for hybrid platform (e.g. core or atom)",
1229                      parse_hybrid_type),
1230 #ifdef HAVE_LIBPFM
1231         OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1232                 "libpfm4 event selector. use 'perf list' to list available events",
1233                 parse_libpfm_events_option),
1234 #endif
1235         OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
1236                      "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
1237                      "\t\t\t  Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1238                      "\t\t\t  Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
1239                       parse_control_option),
1240         OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1241                             "measure I/O performance metrics provided by arch/platform",
1242                             iostat_parse),
1243         OPT_END()
1244 };
1245
1246 static const char *const aggr_mode__string[] = {
1247         [AGGR_CORE] = "core",
1248         [AGGR_DIE] = "die",
1249         [AGGR_GLOBAL] = "global",
1250         [AGGR_NODE] = "node",
1251         [AGGR_NONE] = "none",
1252         [AGGR_SOCKET] = "socket",
1253         [AGGR_THREAD] = "thread",
1254         [AGGR_UNSET] = "unset",
1255 };
1256
1257 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
1258                                                 struct perf_cpu cpu)
1259 {
1260         return aggr_cpu_id__socket(cpu, /*data=*/NULL);
1261 }
1262
1263 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
1264                                              struct perf_cpu cpu)
1265 {
1266         return aggr_cpu_id__die(cpu, /*data=*/NULL);
1267 }
1268
1269 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
1270                                               struct perf_cpu cpu)
1271 {
1272         return aggr_cpu_id__core(cpu, /*data=*/NULL);
1273 }
1274
1275 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
1276                                               struct perf_cpu cpu)
1277 {
1278         return aggr_cpu_id__node(cpu, /*data=*/NULL);
1279 }
1280
1281 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused,
1282                                                 struct perf_cpu cpu)
1283 {
1284         return aggr_cpu_id__global(cpu, /*data=*/NULL);
1285 }
1286
1287 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused,
1288                                              struct perf_cpu cpu)
1289 {
1290         return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
1291 }
1292
1293 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
1294                                               aggr_get_id_t get_id, struct perf_cpu cpu)
1295 {
1296         struct aggr_cpu_id id;
1297
1298         /* per-process mode - should use global aggr mode */
1299         if (cpu.cpu == -1)
1300                 return get_id(config, cpu);
1301
1302         if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
1303                 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
1304
1305         id = config->cpus_aggr_map->map[cpu.cpu];
1306         return id;
1307 }
1308
1309 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
1310                                                        struct perf_cpu cpu)
1311 {
1312         return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
1313 }
1314
1315 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
1316                                                     struct perf_cpu cpu)
1317 {
1318         return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
1319 }
1320
1321 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
1322                                                      struct perf_cpu cpu)
1323 {
1324         return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
1325 }
1326
1327 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
1328                                                      struct perf_cpu cpu)
1329 {
1330         return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
1331 }
1332
1333 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config,
1334                                                        struct perf_cpu cpu)
1335 {
1336         return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
1337 }
1338
1339 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config,
1340                                                     struct perf_cpu cpu)
1341 {
1342         return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
1343 }
1344
1345 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
1346 {
1347         switch (aggr_mode) {
1348         case AGGR_SOCKET:
1349                 return aggr_cpu_id__socket;
1350         case AGGR_DIE:
1351                 return aggr_cpu_id__die;
1352         case AGGR_CORE:
1353                 return aggr_cpu_id__core;
1354         case AGGR_NODE:
1355                 return aggr_cpu_id__node;
1356         case AGGR_NONE:
1357                 return aggr_cpu_id__cpu;
1358         case AGGR_GLOBAL:
1359                 return aggr_cpu_id__global;
1360         case AGGR_THREAD:
1361         case AGGR_UNSET:
1362         case AGGR_MAX:
1363         default:
1364                 return NULL;
1365         }
1366 }
1367
1368 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
1369 {
1370         switch (aggr_mode) {
1371         case AGGR_SOCKET:
1372                 return perf_stat__get_socket_cached;
1373         case AGGR_DIE:
1374                 return perf_stat__get_die_cached;
1375         case AGGR_CORE:
1376                 return perf_stat__get_core_cached;
1377         case AGGR_NODE:
1378                 return perf_stat__get_node_cached;
1379         case AGGR_NONE:
1380                 return perf_stat__get_cpu_cached;
1381         case AGGR_GLOBAL:
1382                 return perf_stat__get_global_cached;
1383         case AGGR_THREAD:
1384         case AGGR_UNSET:
1385         case AGGR_MAX:
1386         default:
1387                 return NULL;
1388         }
1389 }
1390
1391 static int perf_stat_init_aggr_mode(void)
1392 {
1393         int nr;
1394         aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
1395
1396         if (get_id) {
1397                 bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
1398                 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1399                                                          get_id, /*data=*/NULL, needs_sort);
1400                 if (!stat_config.aggr_map) {
1401                         pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
1402                         return -1;
1403                 }
1404                 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode);
1405         }
1406
1407         if (stat_config.aggr_mode == AGGR_THREAD) {
1408                 nr = perf_thread_map__nr(evsel_list->core.threads);
1409                 stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1410                 if (stat_config.aggr_map == NULL)
1411                         return -ENOMEM;
1412
1413                 for (int s = 0; s < nr; s++) {
1414                         struct aggr_cpu_id id = aggr_cpu_id__empty();
1415
1416                         id.thread_idx = s;
1417                         stat_config.aggr_map->map[s] = id;
1418                 }
1419                 return 0;
1420         }
1421
1422         /*
1423          * The evsel_list->cpus is the base we operate on,
1424          * taking the highest cpu number to be the size of
1425          * the aggregation translate cpumap.
1426          */
1427         if (evsel_list->core.user_requested_cpus)
1428                 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
1429         else
1430                 nr = 0;
1431         stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
1432         return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
1433 }
1434
1435 static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
1436 {
1437         if (map) {
1438                 WARN_ONCE(refcount_read(&map->refcnt) != 0,
1439                           "cpu_aggr_map refcnt unbalanced\n");
1440                 free(map);
1441         }
1442 }
1443
1444 static void cpu_aggr_map__put(struct cpu_aggr_map *map)
1445 {
1446         if (map && refcount_dec_and_test(&map->refcnt))
1447                 cpu_aggr_map__delete(map);
1448 }
1449
1450 static void perf_stat__exit_aggr_mode(void)
1451 {
1452         cpu_aggr_map__put(stat_config.aggr_map);
1453         cpu_aggr_map__put(stat_config.cpus_aggr_map);
1454         stat_config.aggr_map = NULL;
1455         stat_config.cpus_aggr_map = NULL;
1456 }
1457
1458 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
1459 {
1460         struct perf_env *env = data;
1461         struct aggr_cpu_id id = aggr_cpu_id__empty();
1462
1463         if (cpu.cpu != -1)
1464                 id.socket = env->cpu[cpu.cpu].socket_id;
1465
1466         return id;
1467 }
1468
1469 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
1470 {
1471         struct perf_env *env = data;
1472         struct aggr_cpu_id id = aggr_cpu_id__empty();
1473
1474         if (cpu.cpu != -1) {
1475                 /*
1476                  * die_id is relative to socket, so start
1477                  * with the socket ID and then add die to
1478                  * make a unique ID.
1479                  */
1480                 id.socket = env->cpu[cpu.cpu].socket_id;
1481                 id.die = env->cpu[cpu.cpu].die_id;
1482         }
1483
1484         return id;
1485 }
1486
1487 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
1488 {
1489         struct perf_env *env = data;
1490         struct aggr_cpu_id id = aggr_cpu_id__empty();
1491
1492         if (cpu.cpu != -1) {
1493                 /*
1494                  * core_id is relative to socket and die,
1495                  * we need a global id. So we set
1496                  * socket, die id and core id
1497                  */
1498                 id.socket = env->cpu[cpu.cpu].socket_id;
1499                 id.die = env->cpu[cpu.cpu].die_id;
1500                 id.core = env->cpu[cpu.cpu].core_id;
1501         }
1502
1503         return id;
1504 }
1505
1506 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
1507 {
1508         struct perf_env *env = data;
1509         struct aggr_cpu_id id = aggr_cpu_id__empty();
1510
1511         if (cpu.cpu != -1) {
1512                 /*
1513                  * core_id is relative to socket and die,
1514                  * we need a global id. So we set
1515                  * socket, die id and core id
1516                  */
1517                 id.socket = env->cpu[cpu.cpu].socket_id;
1518                 id.die = env->cpu[cpu.cpu].die_id;
1519                 id.core = env->cpu[cpu.cpu].core_id;
1520                 id.cpu = cpu;
1521         }
1522
1523         return id;
1524 }
1525
1526 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
1527 {
1528         struct aggr_cpu_id id = aggr_cpu_id__empty();
1529
1530         id.node = perf_env__numa_node(data, cpu);
1531         return id;
1532 }
1533
1534 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
1535                                                            void *data __maybe_unused)
1536 {
1537         struct aggr_cpu_id id = aggr_cpu_id__empty();
1538
1539         /* it always aggregates to the cpu 0 */
1540         id.cpu = (struct perf_cpu){ .cpu = 0 };
1541         return id;
1542 }
1543
1544 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
1545                                                      struct perf_cpu cpu)
1546 {
1547         return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1548 }
1549 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
1550                                                   struct perf_cpu cpu)
1551 {
1552         return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1553 }
1554
1555 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
1556                                                    struct perf_cpu cpu)
1557 {
1558         return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1559 }
1560
1561 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
1562                                                   struct perf_cpu cpu)
1563 {
1564         return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1565 }
1566
1567 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
1568                                                    struct perf_cpu cpu)
1569 {
1570         return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1571 }
1572
1573 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
1574                                                      struct perf_cpu cpu)
1575 {
1576         return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1577 }
1578
1579 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
1580 {
1581         switch (aggr_mode) {
1582         case AGGR_SOCKET:
1583                 return perf_env__get_socket_aggr_by_cpu;
1584         case AGGR_DIE:
1585                 return perf_env__get_die_aggr_by_cpu;
1586         case AGGR_CORE:
1587                 return perf_env__get_core_aggr_by_cpu;
1588         case AGGR_NODE:
1589                 return perf_env__get_node_aggr_by_cpu;
1590         case AGGR_GLOBAL:
1591                 return perf_env__get_global_aggr_by_cpu;
1592         case AGGR_NONE:
1593                 return perf_env__get_cpu_aggr_by_cpu;
1594         case AGGR_THREAD:
1595         case AGGR_UNSET:
1596         case AGGR_MAX:
1597         default:
1598                 return NULL;
1599         }
1600 }
1601
1602 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
1603 {
1604         switch (aggr_mode) {
1605         case AGGR_SOCKET:
1606                 return perf_stat__get_socket_file;
1607         case AGGR_DIE:
1608                 return perf_stat__get_die_file;
1609         case AGGR_CORE:
1610                 return perf_stat__get_core_file;
1611         case AGGR_NODE:
1612                 return perf_stat__get_node_file;
1613         case AGGR_GLOBAL:
1614                 return perf_stat__get_global_file;
1615         case AGGR_NONE:
1616                 return perf_stat__get_cpu_file;
1617         case AGGR_THREAD:
1618         case AGGR_UNSET:
1619         case AGGR_MAX:
1620         default:
1621                 return NULL;
1622         }
1623 }
1624
1625 static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1626 {
1627         struct perf_env *env = &st->session->header.env;
1628         aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
1629         bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
1630
1631         if (stat_config.aggr_mode == AGGR_THREAD) {
1632                 int nr = perf_thread_map__nr(evsel_list->core.threads);
1633
1634                 stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1635                 if (stat_config.aggr_map == NULL)
1636                         return -ENOMEM;
1637
1638                 for (int s = 0; s < nr; s++) {
1639                         struct aggr_cpu_id id = aggr_cpu_id__empty();
1640
1641                         id.thread_idx = s;
1642                         stat_config.aggr_map->map[s] = id;
1643                 }
1644                 return 0;
1645         }
1646
1647         if (!get_id)
1648                 return 0;
1649
1650         stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1651                                                  get_id, env, needs_sort);
1652         if (!stat_config.aggr_map) {
1653                 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
1654                 return -1;
1655         }
1656         stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode);
1657         return 0;
1658 }
1659
1660 /*
1661  * Add default attributes, if there were no attributes specified or
1662  * if -d/--detailed, -d -d or -d -d -d is used:
1663  */
1664 static int add_default_attributes(void)
1665 {
1666         struct perf_event_attr default_attrs0[] = {
1667
1668   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
1669   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
1670   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
1671   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
1672
1673   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
1674 };
1675         struct perf_event_attr frontend_attrs[] = {
1676   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1677 };
1678         struct perf_event_attr backend_attrs[] = {
1679   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND  },
1680 };
1681         struct perf_event_attr default_attrs1[] = {
1682   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
1683   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
1684   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
1685
1686 };
1687
1688 /*
1689  * Detailed stats (-d), covering the L1 and last level data caches:
1690  */
1691         struct perf_event_attr detailed_attrs[] = {
1692
1693   { .type = PERF_TYPE_HW_CACHE,
1694     .config =
1695          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1696         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1697         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1698
1699   { .type = PERF_TYPE_HW_CACHE,
1700     .config =
1701          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1702         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1703         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1704
1705   { .type = PERF_TYPE_HW_CACHE,
1706     .config =
1707          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1708         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1709         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1710
1711   { .type = PERF_TYPE_HW_CACHE,
1712     .config =
1713          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1714         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1715         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1716 };
1717
1718 /*
1719  * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1720  */
1721         struct perf_event_attr very_detailed_attrs[] = {
1722
1723   { .type = PERF_TYPE_HW_CACHE,
1724     .config =
1725          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1726         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1727         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1728
1729   { .type = PERF_TYPE_HW_CACHE,
1730     .config =
1731          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1732         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1733         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1734
1735   { .type = PERF_TYPE_HW_CACHE,
1736     .config =
1737          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1738         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1739         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1740
1741   { .type = PERF_TYPE_HW_CACHE,
1742     .config =
1743          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1744         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1745         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1746
1747   { .type = PERF_TYPE_HW_CACHE,
1748     .config =
1749          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1750         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1751         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1752
1753   { .type = PERF_TYPE_HW_CACHE,
1754     .config =
1755          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1756         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1757         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1758
1759 };
1760
1761 /*
1762  * Very, very detailed stats (-d -d -d), adding prefetch events:
1763  */
1764         struct perf_event_attr very_very_detailed_attrs[] = {
1765
1766   { .type = PERF_TYPE_HW_CACHE,
1767     .config =
1768          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1769         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1770         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1771
1772   { .type = PERF_TYPE_HW_CACHE,
1773     .config =
1774          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1775         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1776         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1777 };
1778
1779         struct perf_event_attr default_null_attrs[] = {};
1780
1781         /* Set attrs if no event is selected and !null_run: */
1782         if (stat_config.null_run)
1783                 return 0;
1784
1785         if (transaction_run) {
1786                 /* Handle -T as -M transaction. Once platform specific metrics
1787                  * support has been added to the json files, all architectures
1788                  * will use this approach. To determine transaction support
1789                  * on an architecture test for such a metric name.
1790                  */
1791                 if (!metricgroup__has_metric("transaction")) {
1792                         pr_err("Missing transaction metrics");
1793                         return -1;
1794                 }
1795                 return metricgroup__parse_groups(evsel_list, "transaction",
1796                                                 stat_config.metric_no_group,
1797                                                 stat_config.metric_no_merge,
1798                                                 stat_config.metric_no_threshold,
1799                                                 stat_config.user_requested_cpu_list,
1800                                                 stat_config.system_wide,
1801                                                 &stat_config.metric_events);
1802         }
1803
1804         if (smi_cost) {
1805                 int smi;
1806
1807                 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
1808                         pr_err("freeze_on_smi is not supported.");
1809                         return -1;
1810                 }
1811
1812                 if (!smi) {
1813                         if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
1814                                 fprintf(stderr, "Failed to set freeze_on_smi.\n");
1815                                 return -1;
1816                         }
1817                         smi_reset = true;
1818                 }
1819
1820                 if (!metricgroup__has_metric("smi")) {
1821                         pr_err("Missing smi metrics");
1822                         return -1;
1823                 }
1824
1825                 if (!force_metric_only)
1826                         stat_config.metric_only = true;
1827
1828                 return metricgroup__parse_groups(evsel_list, "smi",
1829                                                 stat_config.metric_no_group,
1830                                                 stat_config.metric_no_merge,
1831                                                 stat_config.metric_no_threshold,
1832                                                 stat_config.user_requested_cpu_list,
1833                                                 stat_config.system_wide,
1834                                                 &stat_config.metric_events);
1835         }
1836
1837         if (topdown_run) {
1838                 unsigned int max_level = metricgroups__topdown_max_level();
1839                 char str[] = "TopdownL1";
1840
1841                 if (!force_metric_only)
1842                         stat_config.metric_only = true;
1843
1844                 if (!max_level) {
1845                         pr_err("Topdown requested but the topdown metric groups aren't present.\n"
1846                                 "(See perf list the metric groups have names like TopdownL1)");
1847                         return -1;
1848                 }
1849                 if (stat_config.topdown_level > max_level) {
1850                         pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
1851                         return -1;
1852                 } else if (!stat_config.topdown_level)
1853                         stat_config.topdown_level = 1;
1854
1855                 if (!stat_config.interval && !stat_config.metric_only) {
1856                         fprintf(stat_config.output,
1857                                 "Topdown accuracy may decrease when measuring long periods.\n"
1858                                 "Please print the result regularly, e.g. -I1000\n");
1859                 }
1860                 str[8] = stat_config.topdown_level + '0';
1861                 if (metricgroup__parse_groups(evsel_list, str,
1862                                                 /*metric_no_group=*/false,
1863                                                 /*metric_no_merge=*/false,
1864                                                 /*metric_no_threshold=*/true,
1865                                                 stat_config.user_requested_cpu_list,
1866                                                 stat_config.system_wide,
1867                                                 &stat_config.metric_events) < 0)
1868                         return -1;
1869         }
1870
1871         if (!stat_config.topdown_level)
1872                 stat_config.topdown_level = 1;
1873
1874         if (!evsel_list->core.nr_entries) {
1875                 /* No events so add defaults. */
1876                 if (target__has_cpu(&target))
1877                         default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1878
1879                 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1880                         return -1;
1881                 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1882                         if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
1883                                 return -1;
1884                 }
1885                 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1886                         if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
1887                                 return -1;
1888                 }
1889                 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
1890                         return -1;
1891                 /*
1892                  * Add TopdownL1 metrics if they exist. To minimize
1893                  * multiplexing, don't request threshold computation.
1894                  */
1895                 /*
1896                  * TODO: TopdownL1 is disabled on hybrid CPUs to avoid a crashes
1897                  * caused by exposing latent bugs. This is fixed properly in:
1898                  * https://lore.kernel.org/lkml/bff481ba-e60a-763f-0aa0-3ee53302c480@linux.intel.com/
1899                  */
1900                 if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid()) {
1901                         struct evlist *metric_evlist = evlist__new();
1902                         struct evsel *metric_evsel;
1903
1904                         if (!metric_evlist)
1905                                 return -1;
1906
1907                         if (metricgroup__parse_groups(metric_evlist, "TopdownL1",
1908                                                         /*metric_no_group=*/false,
1909                                                         /*metric_no_merge=*/false,
1910                                                         /*metric_no_threshold=*/true,
1911                                                         stat_config.user_requested_cpu_list,
1912                                                         stat_config.system_wide,
1913                                                         &stat_config.metric_events) < 0)
1914                                 return -1;
1915
1916                         evlist__for_each_entry(metric_evlist, metric_evsel) {
1917                                 metric_evsel->skippable = true;
1918                         }
1919                         evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
1920                         evlist__delete(metric_evlist);
1921                 }
1922
1923                 /* Platform specific attrs */
1924                 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
1925                         return -1;
1926         }
1927
1928         /* Detailed events get appended to the event list: */
1929
1930         if (detailed_run <  1)
1931                 return 0;
1932
1933         /* Append detailed run extra attributes: */
1934         if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1935                 return -1;
1936
1937         if (detailed_run < 2)
1938                 return 0;
1939
1940         /* Append very detailed run extra attributes: */
1941         if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1942                 return -1;
1943
1944         if (detailed_run < 3)
1945                 return 0;
1946
1947         /* Append very, very detailed run extra attributes: */
1948         return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1949 }
1950
1951 static const char * const stat_record_usage[] = {
1952         "perf stat record [<options>]",
1953         NULL,
1954 };
1955
1956 static void init_features(struct perf_session *session)
1957 {
1958         int feat;
1959
1960         for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1961                 perf_header__set_feat(&session->header, feat);
1962
1963         perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1964         perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1965         perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1966         perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1967         perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1968 }
1969
1970 static int __cmd_record(int argc, const char **argv)
1971 {
1972         struct perf_session *session;
1973         struct perf_data *data = &perf_stat.data;
1974
1975         argc = parse_options(argc, argv, stat_options, stat_record_usage,
1976                              PARSE_OPT_STOP_AT_NON_OPTION);
1977
1978         if (output_name)
1979                 data->path = output_name;
1980
1981         if (stat_config.run_count != 1 || forever) {
1982                 pr_err("Cannot use -r option with perf stat record.\n");
1983                 return -1;
1984         }
1985
1986         session = perf_session__new(data, NULL);
1987         if (IS_ERR(session)) {
1988                 pr_err("Perf session creation failed\n");
1989                 return PTR_ERR(session);
1990         }
1991
1992         init_features(session);
1993
1994         session->evlist   = evsel_list;
1995         perf_stat.session = session;
1996         perf_stat.record  = true;
1997         return argc;
1998 }
1999
2000 static int process_stat_round_event(struct perf_session *session,
2001                                     union perf_event *event)
2002 {
2003         struct perf_record_stat_round *stat_round = &event->stat_round;
2004         struct timespec tsh, *ts = NULL;
2005         const char **argv = session->header.env.cmdline_argv;
2006         int argc = session->header.env.nr_cmdline;
2007
2008         process_counters();
2009
2010         if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2011                 update_stats(&walltime_nsecs_stats, stat_round->time);
2012
2013         if (stat_config.interval && stat_round->time) {
2014                 tsh.tv_sec  = stat_round->time / NSEC_PER_SEC;
2015                 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
2016                 ts = &tsh;
2017         }
2018
2019         print_counters(ts, argc, argv);
2020         return 0;
2021 }
2022
2023 static
2024 int process_stat_config_event(struct perf_session *session,
2025                               union perf_event *event)
2026 {
2027         struct perf_tool *tool = session->tool;
2028         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2029
2030         perf_event__read_stat_config(&stat_config, &event->stat_config);
2031
2032         if (perf_cpu_map__empty(st->cpus)) {
2033                 if (st->aggr_mode != AGGR_UNSET)
2034                         pr_warning("warning: processing task data, aggregation mode not set\n");
2035         } else if (st->aggr_mode != AGGR_UNSET) {
2036                 stat_config.aggr_mode = st->aggr_mode;
2037         }
2038
2039         if (perf_stat.data.is_pipe)
2040                 perf_stat_init_aggr_mode();
2041         else
2042                 perf_stat_init_aggr_mode_file(st);
2043
2044         if (stat_config.aggr_map) {
2045                 int nr_aggr = stat_config.aggr_map->nr;
2046
2047                 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) {
2048                         pr_err("cannot allocate aggr counts\n");
2049                         return -1;
2050                 }
2051         }
2052         return 0;
2053 }
2054
2055 static int set_maps(struct perf_stat *st)
2056 {
2057         if (!st->cpus || !st->threads)
2058                 return 0;
2059
2060         if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
2061                 return -EINVAL;
2062
2063         perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
2064
2065         if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
2066                 return -ENOMEM;
2067
2068         st->maps_allocated = true;
2069         return 0;
2070 }
2071
2072 static
2073 int process_thread_map_event(struct perf_session *session,
2074                              union perf_event *event)
2075 {
2076         struct perf_tool *tool = session->tool;
2077         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2078
2079         if (st->threads) {
2080                 pr_warning("Extra thread map event, ignoring.\n");
2081                 return 0;
2082         }
2083
2084         st->threads = thread_map__new_event(&event->thread_map);
2085         if (!st->threads)
2086                 return -ENOMEM;
2087
2088         return set_maps(st);
2089 }
2090
2091 static
2092 int process_cpu_map_event(struct perf_session *session,
2093                           union perf_event *event)
2094 {
2095         struct perf_tool *tool = session->tool;
2096         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2097         struct perf_cpu_map *cpus;
2098
2099         if (st->cpus) {
2100                 pr_warning("Extra cpu map event, ignoring.\n");
2101                 return 0;
2102         }
2103
2104         cpus = cpu_map__new_data(&event->cpu_map.data);
2105         if (!cpus)
2106                 return -ENOMEM;
2107
2108         st->cpus = cpus;
2109         return set_maps(st);
2110 }
2111
2112 static const char * const stat_report_usage[] = {
2113         "perf stat report [<options>]",
2114         NULL,
2115 };
2116
2117 static struct perf_stat perf_stat = {
2118         .tool = {
2119                 .attr           = perf_event__process_attr,
2120                 .event_update   = perf_event__process_event_update,
2121                 .thread_map     = process_thread_map_event,
2122                 .cpu_map        = process_cpu_map_event,
2123                 .stat_config    = process_stat_config_event,
2124                 .stat           = perf_event__process_stat_event,
2125                 .stat_round     = process_stat_round_event,
2126         },
2127         .aggr_mode = AGGR_UNSET,
2128 };
2129
2130 static int __cmd_report(int argc, const char **argv)
2131 {
2132         struct perf_session *session;
2133         const struct option options[] = {
2134         OPT_STRING('i', "input", &input_name, "file", "input file name"),
2135         OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
2136                      "aggregate counts per processor socket", AGGR_SOCKET),
2137         OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
2138                      "aggregate counts per processor die", AGGR_DIE),
2139         OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
2140                      "aggregate counts per physical processor core", AGGR_CORE),
2141         OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
2142                      "aggregate counts per numa node", AGGR_NODE),
2143         OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
2144                      "disable CPU count aggregation", AGGR_NONE),
2145         OPT_END()
2146         };
2147         struct stat st;
2148         int ret;
2149
2150         argc = parse_options(argc, argv, options, stat_report_usage, 0);
2151
2152         if (!input_name || !strlen(input_name)) {
2153                 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2154                         input_name = "-";
2155                 else
2156                         input_name = "perf.data";
2157         }
2158
2159         perf_stat.data.path = input_name;
2160         perf_stat.data.mode = PERF_DATA_MODE_READ;
2161
2162         session = perf_session__new(&perf_stat.data, &perf_stat.tool);
2163         if (IS_ERR(session))
2164                 return PTR_ERR(session);
2165
2166         perf_stat.session  = session;
2167         stat_config.output = stderr;
2168         evsel_list         = session->evlist;
2169
2170         ret = perf_session__process_events(session);
2171         if (ret)
2172                 return ret;
2173
2174         perf_session__delete(session);
2175         return 0;
2176 }
2177
2178 static void setup_system_wide(int forks)
2179 {
2180         /*
2181          * Make system wide (-a) the default target if
2182          * no target was specified and one of following
2183          * conditions is met:
2184          *
2185          *   - there's no workload specified
2186          *   - there is workload specified but all requested
2187          *     events are system wide events
2188          */
2189         if (!target__none(&target))
2190                 return;
2191
2192         if (!forks)
2193                 target.system_wide = true;
2194         else {
2195                 struct evsel *counter;
2196
2197                 evlist__for_each_entry(evsel_list, counter) {
2198                         if (!counter->core.requires_cpu &&
2199                             !evsel__name_is(counter, "duration_time")) {
2200                                 return;
2201                         }
2202                 }
2203
2204                 if (evsel_list->core.nr_entries)
2205                         target.system_wide = true;
2206         }
2207 }
2208
2209 int cmd_stat(int argc, const char **argv)
2210 {
2211         const char * const stat_usage[] = {
2212                 "perf stat [<options>] [<command>]",
2213                 NULL
2214         };
2215         int status = -EINVAL, run_idx, err;
2216         const char *mode;
2217         FILE *output = stderr;
2218         unsigned int interval, timeout;
2219         const char * const stat_subcommands[] = { "record", "report" };
2220         char errbuf[BUFSIZ];
2221
2222         setlocale(LC_ALL, "");
2223
2224         evsel_list = evlist__new();
2225         if (evsel_list == NULL)
2226                 return -ENOMEM;
2227
2228         parse_events__shrink_config_terms();
2229
2230         /* String-parsing callback-based options would segfault when negated */
2231         set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
2232         set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
2233         set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
2234
2235         argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
2236                                         (const char **) stat_usage,
2237                                         PARSE_OPT_STOP_AT_NON_OPTION);
2238
2239         if (stat_config.csv_sep) {
2240                 stat_config.csv_output = true;
2241                 if (!strcmp(stat_config.csv_sep, "\\t"))
2242                         stat_config.csv_sep = "\t";
2243         } else
2244                 stat_config.csv_sep = DEFAULT_SEPARATOR;
2245
2246         if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2247                 argc = __cmd_record(argc, argv);
2248                 if (argc < 0)
2249                         return -1;
2250         } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
2251                 return __cmd_report(argc, argv);
2252
2253         interval = stat_config.interval;
2254         timeout = stat_config.timeout;
2255
2256         /*
2257          * For record command the -o is already taken care of.
2258          */
2259         if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
2260                 output = NULL;
2261
2262         if (output_name && output_fd) {
2263                 fprintf(stderr, "cannot use both --output and --log-fd\n");
2264                 parse_options_usage(stat_usage, stat_options, "o", 1);
2265                 parse_options_usage(NULL, stat_options, "log-fd", 0);
2266                 goto out;
2267         }
2268
2269         if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
2270                 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
2271                 goto out;
2272         }
2273
2274         if (stat_config.metric_only && stat_config.run_count > 1) {
2275                 fprintf(stderr, "--metric-only is not supported with -r\n");
2276                 goto out;
2277         }
2278
2279         if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
2280                 fprintf(stderr, "--table is only supported with -r\n");
2281                 parse_options_usage(stat_usage, stat_options, "r", 1);
2282                 parse_options_usage(NULL, stat_options, "table", 0);
2283                 goto out;
2284         }
2285
2286         if (output_fd < 0) {
2287                 fprintf(stderr, "argument to --log-fd must be a > 0\n");
2288                 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
2289                 goto out;
2290         }
2291
2292         if (!output && !quiet) {
2293                 struct timespec tm;
2294                 mode = append_file ? "a" : "w";
2295
2296                 output = fopen(output_name, mode);
2297                 if (!output) {
2298                         perror("failed to create output file");
2299                         return -1;
2300                 }
2301                 if (!stat_config.json_output) {
2302                         clock_gettime(CLOCK_REALTIME, &tm);
2303                         fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
2304                 }
2305         } else if (output_fd > 0) {
2306                 mode = append_file ? "a" : "w";
2307                 output = fdopen(output_fd, mode);
2308                 if (!output) {
2309                         perror("Failed opening logfd");
2310                         return -errno;
2311                 }
2312         }
2313
2314         if (stat_config.interval_clear && !isatty(fileno(output))) {
2315                 fprintf(stderr, "--interval-clear does not work with output\n");
2316                 parse_options_usage(stat_usage, stat_options, "o", 1);
2317                 parse_options_usage(NULL, stat_options, "log-fd", 0);
2318                 parse_options_usage(NULL, stat_options, "interval-clear", 0);
2319                 return -1;
2320         }
2321
2322         stat_config.output = output;
2323
2324         /*
2325          * let the spreadsheet do the pretty-printing
2326          */
2327         if (stat_config.csv_output) {
2328                 /* User explicitly passed -B? */
2329                 if (big_num_opt == 1) {
2330                         fprintf(stderr, "-B option not supported with -x\n");
2331                         parse_options_usage(stat_usage, stat_options, "B", 1);
2332                         parse_options_usage(NULL, stat_options, "x", 1);
2333                         goto out;
2334                 } else /* Nope, so disable big number formatting */
2335                         stat_config.big_num = false;
2336         } else if (big_num_opt == 0) /* User passed --no-big-num */
2337                 stat_config.big_num = false;
2338
2339         err = target__validate(&target);
2340         if (err) {
2341                 target__strerror(&target, err, errbuf, BUFSIZ);
2342                 pr_warning("%s\n", errbuf);
2343         }
2344
2345         setup_system_wide(argc);
2346
2347         /*
2348          * Display user/system times only for single
2349          * run and when there's specified tracee.
2350          */
2351         if ((stat_config.run_count == 1) && target__none(&target))
2352                 stat_config.ru_display = true;
2353
2354         if (stat_config.run_count < 0) {
2355                 pr_err("Run count must be a positive number\n");
2356                 parse_options_usage(stat_usage, stat_options, "r", 1);
2357                 goto out;
2358         } else if (stat_config.run_count == 0) {
2359                 forever = true;
2360                 stat_config.run_count = 1;
2361         }
2362
2363         if (stat_config.walltime_run_table) {
2364                 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
2365                 if (!stat_config.walltime_run) {
2366                         pr_err("failed to setup -r option");
2367                         goto out;
2368                 }
2369         }
2370
2371         if ((stat_config.aggr_mode == AGGR_THREAD) &&
2372                 !target__has_task(&target)) {
2373                 if (!target.system_wide || target.cpu_list) {
2374                         fprintf(stderr, "The --per-thread option is only "
2375                                 "available when monitoring via -p -t -a "
2376                                 "options or only --per-thread.\n");
2377                         parse_options_usage(NULL, stat_options, "p", 1);
2378                         parse_options_usage(NULL, stat_options, "t", 1);
2379                         goto out;
2380                 }
2381         }
2382
2383         /*
2384          * no_aggr, cgroup are for system-wide only
2385          * --per-thread is aggregated per thread, we dont mix it with cpu mode
2386          */
2387         if (((stat_config.aggr_mode != AGGR_GLOBAL &&
2388               stat_config.aggr_mode != AGGR_THREAD) ||
2389              (nr_cgroups || stat_config.cgroup_list)) &&
2390             !target__has_cpu(&target)) {
2391                 fprintf(stderr, "both cgroup and no-aggregation "
2392                         "modes only available in system-wide mode\n");
2393
2394                 parse_options_usage(stat_usage, stat_options, "G", 1);
2395                 parse_options_usage(NULL, stat_options, "A", 1);
2396                 parse_options_usage(NULL, stat_options, "a", 1);
2397                 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2398                 goto out;
2399         }
2400
2401         if (stat_config.iostat_run) {
2402                 status = iostat_prepare(evsel_list, &stat_config);
2403                 if (status)
2404                         goto out;
2405                 if (iostat_mode == IOSTAT_LIST) {
2406                         iostat_list(evsel_list, &stat_config);
2407                         goto out;
2408                 } else if (verbose > 0)
2409                         iostat_list(evsel_list, &stat_config);
2410                 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
2411                         target.system_wide = true;
2412         }
2413
2414         if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
2415                 target.per_thread = true;
2416
2417         stat_config.system_wide = target.system_wide;
2418         if (target.cpu_list) {
2419                 stat_config.user_requested_cpu_list = strdup(target.cpu_list);
2420                 if (!stat_config.user_requested_cpu_list) {
2421                         status = -ENOMEM;
2422                         goto out;
2423                 }
2424         }
2425
2426         /*
2427          * Metric parsing needs to be delayed as metrics may optimize events
2428          * knowing the target is system-wide.
2429          */
2430         if (metrics) {
2431                 metricgroup__parse_groups(evsel_list, metrics,
2432                                         stat_config.metric_no_group,
2433                                         stat_config.metric_no_merge,
2434                                         stat_config.metric_no_threshold,
2435                                         stat_config.user_requested_cpu_list,
2436                                         stat_config.system_wide,
2437                                         &stat_config.metric_events);
2438                 zfree(&metrics);
2439         }
2440
2441         if (add_default_attributes())
2442                 goto out;
2443
2444         if (stat_config.cgroup_list) {
2445                 if (nr_cgroups > 0) {
2446                         pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
2447                         parse_options_usage(stat_usage, stat_options, "G", 1);
2448                         parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2449                         goto out;
2450                 }
2451
2452                 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2453                                           &stat_config.metric_events, true) < 0) {
2454                         parse_options_usage(stat_usage, stat_options,
2455                                             "for-each-cgroup", 0);
2456                         goto out;
2457                 }
2458         }
2459
2460         if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) {
2461                 pr_err("failed to use cpu list %s\n", target.cpu_list);
2462                 goto out;
2463         }
2464
2465         target.hybrid = perf_pmu__has_hybrid();
2466         if (evlist__create_maps(evsel_list, &target) < 0) {
2467                 if (target__has_task(&target)) {
2468                         pr_err("Problems finding threads of monitor\n");
2469                         parse_options_usage(stat_usage, stat_options, "p", 1);
2470                         parse_options_usage(NULL, stat_options, "t", 1);
2471                 } else if (target__has_cpu(&target)) {
2472                         perror("failed to parse CPUs map");
2473                         parse_options_usage(stat_usage, stat_options, "C", 1);
2474                         parse_options_usage(NULL, stat_options, "a", 1);
2475                 }
2476                 goto out;
2477         }
2478
2479         evlist__check_cpu_maps(evsel_list);
2480
2481         /*
2482          * Initialize thread_map with comm names,
2483          * so we could print it out on output.
2484          */
2485         if (stat_config.aggr_mode == AGGR_THREAD) {
2486                 thread_map__read_comms(evsel_list->core.threads);
2487         }
2488
2489         if (stat_config.aggr_mode == AGGR_NODE)
2490                 cpu__setup_cpunode_map();
2491
2492         if (stat_config.times && interval)
2493                 interval_count = true;
2494         else if (stat_config.times && !interval) {
2495                 pr_err("interval-count option should be used together with "
2496                                 "interval-print.\n");
2497                 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
2498                 parse_options_usage(stat_usage, stat_options, "I", 1);
2499                 goto out;
2500         }
2501
2502         if (timeout && timeout < 100) {
2503                 if (timeout < 10) {
2504                         pr_err("timeout must be >= 10ms.\n");
2505                         parse_options_usage(stat_usage, stat_options, "timeout", 0);
2506                         goto out;
2507                 } else
2508                         pr_warning("timeout < 100ms. "
2509                                    "The overhead percentage could be high in some cases. "
2510                                    "Please proceed with caution.\n");
2511         }
2512         if (timeout && interval) {
2513                 pr_err("timeout option is not supported with interval-print.\n");
2514                 parse_options_usage(stat_usage, stat_options, "timeout", 0);
2515                 parse_options_usage(stat_usage, stat_options, "I", 1);
2516                 goto out;
2517         }
2518
2519         if (perf_stat_init_aggr_mode())
2520                 goto out;
2521
2522         if (evlist__alloc_stats(&stat_config, evsel_list, interval))
2523                 goto out;
2524
2525         /*
2526          * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
2527          * while avoiding that older tools show confusing messages.
2528          *
2529          * However for pipe sessions we need to keep it zero,
2530          * because script's perf_evsel__check_attr is triggered
2531          * by attr->sample_type != 0, and we can't run it on
2532          * stat sessions.
2533          */
2534         stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
2535
2536         /*
2537          * We dont want to block the signals - that would cause
2538          * child tasks to inherit that and Ctrl-C would not work.
2539          * What we want is for Ctrl-C to work in the exec()-ed
2540          * task, but being ignored by perf stat itself:
2541          */
2542         atexit(sig_atexit);
2543         if (!forever)
2544                 signal(SIGINT,  skip_signal);
2545         signal(SIGCHLD, skip_signal);
2546         signal(SIGALRM, skip_signal);
2547         signal(SIGABRT, skip_signal);
2548
2549         if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2550                 goto out;
2551
2552         /* Enable ignoring missing threads when -p option is defined. */
2553         evlist__first(evsel_list)->ignore_missing_thread = target.pid;
2554         status = 0;
2555         for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
2556                 if (stat_config.run_count != 1 && verbose > 0)
2557                         fprintf(output, "[ perf stat: executing run #%d ... ]\n",
2558                                 run_idx + 1);
2559
2560                 if (run_idx != 0)
2561                         evlist__reset_prev_raw_counts(evsel_list);
2562
2563                 status = run_perf_stat(argc, argv, run_idx);
2564                 if (forever && status != -1 && !interval) {
2565                         print_counters(NULL, argc, argv);
2566                         perf_stat__reset_stats();
2567                 }
2568         }
2569
2570         if (!forever && status != -1 && (!interval || stat_config.summary))
2571                 print_counters(NULL, argc, argv);
2572
2573         evlist__finalize_ctlfd(evsel_list);
2574
2575         if (STAT_RECORD) {
2576                 /*
2577                  * We synthesize the kernel mmap record just so that older tools
2578                  * don't emit warnings about not being able to resolve symbols
2579                  * due to /proc/sys/kernel/kptr_restrict settings and instead provide
2580                  * a saner message about no samples being in the perf.data file.
2581                  *
2582                  * This also serves to suppress a warning about f_header.data.size == 0
2583                  * in header.c at the moment 'perf stat record' gets introduced, which
2584                  * is not really needed once we start adding the stat specific PERF_RECORD_
2585                  * records, but the need to suppress the kptr_restrict messages in older
2586                  * tools remain  -acme
2587                  */
2588                 int fd = perf_data__fd(&perf_stat.data);
2589
2590                 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2591                                                          process_synthesized_event,
2592                                                          &perf_stat.session->machines.host);
2593                 if (err) {
2594                         pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2595                                    "older tools may produce warnings about this file\n.");
2596                 }
2597
2598                 if (!interval) {
2599                         if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
2600                                 pr_err("failed to write stat round event\n");
2601                 }
2602
2603                 if (!perf_stat.data.is_pipe) {
2604                         perf_stat.session->header.data_size += perf_stat.bytes_written;
2605                         perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2606                 }
2607
2608                 evlist__close(evsel_list);
2609                 perf_session__delete(perf_stat.session);
2610         }
2611
2612         perf_stat__exit_aggr_mode();
2613         evlist__free_stats(evsel_list);
2614 out:
2615         if (stat_config.iostat_run)
2616                 iostat_release(evsel_list);
2617
2618         zfree(&stat_config.walltime_run);
2619         zfree(&stat_config.user_requested_cpu_list);
2620
2621         if (smi_cost && smi_reset)
2622                 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2623
2624         evlist__delete(evsel_list);
2625
2626         metricgroup__rblist_exit(&stat_config.metric_events);
2627         evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
2628
2629         return status;
2630 }