perf scripts python: exported-sql-viewer.py: Remove SQLTableDialogDataItem
[platform/kernel/linux-starfive.git] / tools / perf / builtin-stat.c
1 /*
2  * builtin-stat.c
3  *
4  * Builtin stat command: Give a precise performance counters summary
5  * overview about any workload, CPU or specific PID.
6  *
7  * Sample output:
8
9    $ perf stat ./hackbench 10
10
11   Time: 0.118
12
13   Performance counter stats for './hackbench 10':
14
15        1708.761321 task-clock                #   11.037 CPUs utilized
16             41,190 context-switches          #    0.024 M/sec
17              6,735 CPU-migrations            #    0.004 M/sec
18             17,318 page-faults               #    0.010 M/sec
19      5,205,202,243 cycles                    #    3.046 GHz
20      3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
21      1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
22      2,603,501,247 instructions              #    0.50  insns per cycle
23                                              #    1.48  stalled cycles per insn
24        484,357,498 branches                  #  283.455 M/sec
25          6,388,934 branch-misses             #    1.32% of all branches
26
27         0.154822978  seconds time elapsed
28
29  *
30  * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31  *
32  * Improvements and fixes by:
33  *
34  *   Arjan van de Ven <arjan@linux.intel.com>
35  *   Yanmin Zhang <yanmin.zhang@intel.com>
36  *   Wu Fengguang <fengguang.wu@intel.com>
37  *   Mike Galbraith <efault@gmx.de>
38  *   Paul Mackerras <paulus@samba.org>
39  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
40  *
41  * Released under the GPL v2. (and only v2, not any later version)
42  */
43
44 #include "perf.h"
45 #include "builtin.h"
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
50 #include "util/pmu.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/color.h"
56 #include "util/stat.h"
57 #include "util/header.h"
58 #include "util/cpumap.h"
59 #include "util/thread.h"
60 #include "util/thread_map.h"
61 #include "util/counts.h"
62 #include "util/group.h"
63 #include "util/session.h"
64 #include "util/tool.h"
65 #include "util/string2.h"
66 #include "util/metricgroup.h"
67 #include "util/top.h"
68 #include "asm/bug.h"
69
70 #include <linux/time64.h>
71 #include <api/fs/fs.h>
72 #include <errno.h>
73 #include <signal.h>
74 #include <stdlib.h>
75 #include <sys/prctl.h>
76 #include <inttypes.h>
77 #include <locale.h>
78 #include <math.h>
79 #include <sys/types.h>
80 #include <sys/stat.h>
81 #include <sys/wait.h>
82 #include <unistd.h>
83 #include <sys/time.h>
84 #include <sys/resource.h>
85
86 #include "sane_ctype.h"
87
88 #define DEFAULT_SEPARATOR       " "
89 #define FREEZE_ON_SMI_PATH      "devices/cpu/freeze_on_smi"
90
91 static void print_counters(struct timespec *ts, int argc, const char **argv);
92
93 /* Default events used for perf stat -T */
94 static const char *transaction_attrs = {
95         "task-clock,"
96         "{"
97         "instructions,"
98         "cycles,"
99         "cpu/cycles-t/,"
100         "cpu/tx-start/,"
101         "cpu/el-start/,"
102         "cpu/cycles-ct/"
103         "}"
104 };
105
106 /* More limited version when the CPU does not have all events. */
107 static const char * transaction_limited_attrs = {
108         "task-clock,"
109         "{"
110         "instructions,"
111         "cycles,"
112         "cpu/cycles-t/,"
113         "cpu/tx-start/"
114         "}"
115 };
116
117 static const char * topdown_attrs[] = {
118         "topdown-total-slots",
119         "topdown-slots-retired",
120         "topdown-recovery-bubbles",
121         "topdown-fetch-bubbles",
122         "topdown-slots-issued",
123         NULL,
124 };
125
126 static const char *smi_cost_attrs = {
127         "{"
128         "msr/aperf/,"
129         "msr/smi/,"
130         "cycles"
131         "}"
132 };
133
134 static struct perf_evlist       *evsel_list;
135
136 static struct target target = {
137         .uid    = UINT_MAX,
138 };
139
140 #define METRIC_ONLY_LEN 20
141
142 static volatile pid_t           child_pid                       = -1;
143 static int                      detailed_run                    =  0;
144 static bool                     transaction_run;
145 static bool                     topdown_run                     = false;
146 static bool                     smi_cost                        = false;
147 static bool                     smi_reset                       = false;
148 static int                      big_num_opt                     =  -1;
149 static bool                     group                           = false;
150 static const char               *pre_cmd                        = NULL;
151 static const char               *post_cmd                       = NULL;
152 static bool                     sync_run                        = false;
153 static bool                     forever                         = false;
154 static bool                     force_metric_only               = false;
155 static struct timespec          ref_time;
156 static bool                     append_file;
157 static bool                     interval_count;
158 static const char               *output_name;
159 static int                      output_fd;
160
161 struct perf_stat {
162         bool                     record;
163         struct perf_data         data;
164         struct perf_session     *session;
165         u64                      bytes_written;
166         struct perf_tool         tool;
167         bool                     maps_allocated;
168         struct cpu_map          *cpus;
169         struct thread_map       *threads;
170         enum aggr_mode           aggr_mode;
171 };
172
173 static struct perf_stat         perf_stat;
174 #define STAT_RECORD             perf_stat.record
175
176 static volatile int done = 0;
177
178 static struct perf_stat_config stat_config = {
179         .aggr_mode              = AGGR_GLOBAL,
180         .scale                  = true,
181         .unit_width             = 4, /* strlen("unit") */
182         .run_count              = 1,
183         .metric_only_len        = METRIC_ONLY_LEN,
184         .walltime_nsecs_stats   = &walltime_nsecs_stats,
185         .big_num                = true,
186 };
187
188 static inline void diff_timespec(struct timespec *r, struct timespec *a,
189                                  struct timespec *b)
190 {
191         r->tv_sec = a->tv_sec - b->tv_sec;
192         if (a->tv_nsec < b->tv_nsec) {
193                 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
194                 r->tv_sec--;
195         } else {
196                 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
197         }
198 }
199
200 static void perf_stat__reset_stats(void)
201 {
202         int i;
203
204         perf_evlist__reset_stats(evsel_list);
205         perf_stat__reset_shadow_stats();
206
207         for (i = 0; i < stat_config.stats_num; i++)
208                 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
209 }
210
211 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
212                                      union perf_event *event,
213                                      struct perf_sample *sample __maybe_unused,
214                                      struct machine *machine __maybe_unused)
215 {
216         if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
217                 pr_err("failed to write perf data, error: %m\n");
218                 return -1;
219         }
220
221         perf_stat.bytes_written += event->header.size;
222         return 0;
223 }
224
225 static int write_stat_round_event(u64 tm, u64 type)
226 {
227         return perf_event__synthesize_stat_round(NULL, tm, type,
228                                                  process_synthesized_event,
229                                                  NULL);
230 }
231
232 #define WRITE_STAT_ROUND_EVENT(time, interval) \
233         write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
234
235 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
236
237 static int
238 perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
239                              struct perf_counts_values *count)
240 {
241         struct perf_sample_id *sid = SID(counter, cpu, thread);
242
243         return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
244                                            process_synthesized_event, NULL);
245 }
246
247 /*
248  * Read out the results of a single counter:
249  * do not aggregate counts across CPUs in system-wide mode
250  */
251 static int read_counter(struct perf_evsel *counter)
252 {
253         int nthreads = thread_map__nr(evsel_list->threads);
254         int ncpus, cpu, thread;
255
256         if (target__has_cpu(&target) && !target__has_per_thread(&target))
257                 ncpus = perf_evsel__nr_cpus(counter);
258         else
259                 ncpus = 1;
260
261         if (!counter->supported)
262                 return -ENOENT;
263
264         if (counter->system_wide)
265                 nthreads = 1;
266
267         for (thread = 0; thread < nthreads; thread++) {
268                 for (cpu = 0; cpu < ncpus; cpu++) {
269                         struct perf_counts_values *count;
270
271                         count = perf_counts(counter->counts, cpu, thread);
272
273                         /*
274                          * The leader's group read loads data into its group members
275                          * (via perf_evsel__read_counter) and sets threir count->loaded.
276                          */
277                         if (!count->loaded &&
278                             perf_evsel__read_counter(counter, cpu, thread)) {
279                                 counter->counts->scaled = -1;
280                                 perf_counts(counter->counts, cpu, thread)->ena = 0;
281                                 perf_counts(counter->counts, cpu, thread)->run = 0;
282                                 return -1;
283                         }
284
285                         count->loaded = false;
286
287                         if (STAT_RECORD) {
288                                 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
289                                         pr_err("failed to write stat event\n");
290                                         return -1;
291                                 }
292                         }
293
294                         if (verbose > 1) {
295                                 fprintf(stat_config.output,
296                                         "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
297                                                 perf_evsel__name(counter),
298                                                 cpu,
299                                                 count->val, count->ena, count->run);
300                         }
301                 }
302         }
303
304         return 0;
305 }
306
307 static void read_counters(void)
308 {
309         struct perf_evsel *counter;
310         int ret;
311
312         evlist__for_each_entry(evsel_list, counter) {
313                 ret = read_counter(counter);
314                 if (ret)
315                         pr_debug("failed to read counter %s\n", counter->name);
316
317                 if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
318                         pr_warning("failed to process counter %s\n", counter->name);
319         }
320 }
321
322 static void process_interval(void)
323 {
324         struct timespec ts, rs;
325
326         read_counters();
327
328         clock_gettime(CLOCK_MONOTONIC, &ts);
329         diff_timespec(&rs, &ts, &ref_time);
330
331         if (STAT_RECORD) {
332                 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
333                         pr_err("failed to write stat round event\n");
334         }
335
336         init_stats(&walltime_nsecs_stats);
337         update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
338         print_counters(&rs, 0, NULL);
339 }
340
341 static void enable_counters(void)
342 {
343         if (stat_config.initial_delay)
344                 usleep(stat_config.initial_delay * USEC_PER_MSEC);
345
346         /*
347          * We need to enable counters only if:
348          * - we don't have tracee (attaching to task or cpu)
349          * - we have initial delay configured
350          */
351         if (!target__none(&target) || stat_config.initial_delay)
352                 perf_evlist__enable(evsel_list);
353 }
354
355 static void disable_counters(void)
356 {
357         /*
358          * If we don't have tracee (attaching to task or cpu), counters may
359          * still be running. To get accurate group ratios, we must stop groups
360          * from counting before reading their constituent counters.
361          */
362         if (!target__none(&target))
363                 perf_evlist__disable(evsel_list);
364 }
365
366 static volatile int workload_exec_errno;
367
368 /*
369  * perf_evlist__prepare_workload will send a SIGUSR1
370  * if the fork fails, since we asked by setting its
371  * want_signal to true.
372  */
373 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
374                                         void *ucontext __maybe_unused)
375 {
376         workload_exec_errno = info->si_value.sival_int;
377 }
378
379 static bool perf_evsel__should_store_id(struct perf_evsel *counter)
380 {
381         return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
382 }
383
384 static bool is_target_alive(struct target *_target,
385                             struct thread_map *threads)
386 {
387         struct stat st;
388         int i;
389
390         if (!target__has_task(_target))
391                 return true;
392
393         for (i = 0; i < threads->nr; i++) {
394                 char path[PATH_MAX];
395
396                 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
397                           threads->map[i].pid);
398
399                 if (!stat(path, &st))
400                         return true;
401         }
402
403         return false;
404 }
405
406 static int __run_perf_stat(int argc, const char **argv, int run_idx)
407 {
408         int interval = stat_config.interval;
409         int times = stat_config.times;
410         int timeout = stat_config.timeout;
411         char msg[BUFSIZ];
412         unsigned long long t0, t1;
413         struct perf_evsel *counter;
414         struct timespec ts;
415         size_t l;
416         int status = 0;
417         const bool forks = (argc > 0);
418         bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
419
420         if (interval) {
421                 ts.tv_sec  = interval / USEC_PER_MSEC;
422                 ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
423         } else if (timeout) {
424                 ts.tv_sec  = timeout / USEC_PER_MSEC;
425                 ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
426         } else {
427                 ts.tv_sec  = 1;
428                 ts.tv_nsec = 0;
429         }
430
431         if (forks) {
432                 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
433                                                   workload_exec_failed_signal) < 0) {
434                         perror("failed to prepare workload");
435                         return -1;
436                 }
437                 child_pid = evsel_list->workload.pid;
438         }
439
440         if (group)
441                 perf_evlist__set_leader(evsel_list);
442
443         evlist__for_each_entry(evsel_list, counter) {
444 try_again:
445                 if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
446
447                         /* Weak group failed. Reset the group. */
448                         if ((errno == EINVAL || errno == EBADF) &&
449                             counter->leader != counter &&
450                             counter->weak_group) {
451                                 counter = perf_evlist__reset_weak_group(evsel_list, counter);
452                                 goto try_again;
453                         }
454
455                         /*
456                          * PPC returns ENXIO for HW counters until 2.6.37
457                          * (behavior changed with commit b0a873e).
458                          */
459                         if (errno == EINVAL || errno == ENOSYS ||
460                             errno == ENOENT || errno == EOPNOTSUPP ||
461                             errno == ENXIO) {
462                                 if (verbose > 0)
463                                         ui__warning("%s event is not supported by the kernel.\n",
464                                                     perf_evsel__name(counter));
465                                 counter->supported = false;
466
467                                 if ((counter->leader != counter) ||
468                                     !(counter->leader->nr_members > 1))
469                                         continue;
470                         } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
471                                 if (verbose > 0)
472                                         ui__warning("%s\n", msg);
473                                 goto try_again;
474                         } else if (target__has_per_thread(&target) &&
475                                    evsel_list->threads &&
476                                    evsel_list->threads->err_thread != -1) {
477                                 /*
478                                  * For global --per-thread case, skip current
479                                  * error thread.
480                                  */
481                                 if (!thread_map__remove(evsel_list->threads,
482                                                         evsel_list->threads->err_thread)) {
483                                         evsel_list->threads->err_thread = -1;
484                                         goto try_again;
485                                 }
486                         }
487
488                         perf_evsel__open_strerror(counter, &target,
489                                                   errno, msg, sizeof(msg));
490                         ui__error("%s\n", msg);
491
492                         if (child_pid != -1)
493                                 kill(child_pid, SIGTERM);
494
495                         return -1;
496                 }
497                 counter->supported = true;
498
499                 l = strlen(counter->unit);
500                 if (l > stat_config.unit_width)
501                         stat_config.unit_width = l;
502
503                 if (perf_evsel__should_store_id(counter) &&
504                     perf_evsel__store_ids(counter, evsel_list))
505                         return -1;
506         }
507
508         if (perf_evlist__apply_filters(evsel_list, &counter)) {
509                 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
510                         counter->filter, perf_evsel__name(counter), errno,
511                         str_error_r(errno, msg, sizeof(msg)));
512                 return -1;
513         }
514
515         if (STAT_RECORD) {
516                 int err, fd = perf_data__fd(&perf_stat.data);
517
518                 if (is_pipe) {
519                         err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
520                 } else {
521                         err = perf_session__write_header(perf_stat.session, evsel_list,
522                                                          fd, false);
523                 }
524
525                 if (err < 0)
526                         return err;
527
528                 err = perf_stat_synthesize_config(&stat_config, NULL, evsel_list,
529                                                   process_synthesized_event, is_pipe);
530                 if (err < 0)
531                         return err;
532         }
533
534         /*
535          * Enable counters and exec the command:
536          */
537         t0 = rdclock();
538         clock_gettime(CLOCK_MONOTONIC, &ref_time);
539
540         if (forks) {
541                 perf_evlist__start_workload(evsel_list);
542                 enable_counters();
543
544                 if (interval || timeout) {
545                         while (!waitpid(child_pid, &status, WNOHANG)) {
546                                 nanosleep(&ts, NULL);
547                                 if (timeout)
548                                         break;
549                                 process_interval();
550                                 if (interval_count && !(--times))
551                                         break;
552                         }
553                 }
554                 if (child_pid != -1)
555                         wait4(child_pid, &status, 0, &stat_config.ru_data);
556
557                 if (workload_exec_errno) {
558                         const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
559                         pr_err("Workload failed: %s\n", emsg);
560                         return -1;
561                 }
562
563                 if (WIFSIGNALED(status))
564                         psignal(WTERMSIG(status), argv[0]);
565         } else {
566                 enable_counters();
567                 while (!done) {
568                         nanosleep(&ts, NULL);
569                         if (!is_target_alive(&target, evsel_list->threads))
570                                 break;
571                         if (timeout)
572                                 break;
573                         if (interval) {
574                                 process_interval();
575                                 if (interval_count && !(--times))
576                                         break;
577                         }
578                 }
579         }
580
581         disable_counters();
582
583         t1 = rdclock();
584
585         if (stat_config.walltime_run_table)
586                 stat_config.walltime_run[run_idx] = t1 - t0;
587
588         update_stats(&walltime_nsecs_stats, t1 - t0);
589
590         /*
591          * Closing a group leader splits the group, and as we only disable
592          * group leaders, results in remaining events becoming enabled. To
593          * avoid arbitrary skew, we must read all counters before closing any
594          * group leaders.
595          */
596         read_counters();
597         perf_evlist__close(evsel_list);
598
599         return WEXITSTATUS(status);
600 }
601
602 static int run_perf_stat(int argc, const char **argv, int run_idx)
603 {
604         int ret;
605
606         if (pre_cmd) {
607                 ret = system(pre_cmd);
608                 if (ret)
609                         return ret;
610         }
611
612         if (sync_run)
613                 sync();
614
615         ret = __run_perf_stat(argc, argv, run_idx);
616         if (ret)
617                 return ret;
618
619         if (post_cmd) {
620                 ret = system(post_cmd);
621                 if (ret)
622                         return ret;
623         }
624
625         return ret;
626 }
627
628 static void print_counters(struct timespec *ts, int argc, const char **argv)
629 {
630         /* Do not print anything if we record to the pipe. */
631         if (STAT_RECORD && perf_stat.data.is_pipe)
632                 return;
633
634         perf_evlist__print_counters(evsel_list, &stat_config, &target,
635                                     ts, argc, argv);
636 }
637
638 static volatile int signr = -1;
639
640 static void skip_signal(int signo)
641 {
642         if ((child_pid == -1) || stat_config.interval)
643                 done = 1;
644
645         signr = signo;
646         /*
647          * render child_pid harmless
648          * won't send SIGTERM to a random
649          * process in case of race condition
650          * and fast PID recycling
651          */
652         child_pid = -1;
653 }
654
655 static void sig_atexit(void)
656 {
657         sigset_t set, oset;
658
659         /*
660          * avoid race condition with SIGCHLD handler
661          * in skip_signal() which is modifying child_pid
662          * goal is to avoid send SIGTERM to a random
663          * process
664          */
665         sigemptyset(&set);
666         sigaddset(&set, SIGCHLD);
667         sigprocmask(SIG_BLOCK, &set, &oset);
668
669         if (child_pid != -1)
670                 kill(child_pid, SIGTERM);
671
672         sigprocmask(SIG_SETMASK, &oset, NULL);
673
674         if (signr == -1)
675                 return;
676
677         signal(signr, SIG_DFL);
678         kill(getpid(), signr);
679 }
680
681 static int stat__set_big_num(const struct option *opt __maybe_unused,
682                              const char *s __maybe_unused, int unset)
683 {
684         big_num_opt = unset ? 0 : 1;
685         return 0;
686 }
687
688 static int enable_metric_only(const struct option *opt __maybe_unused,
689                               const char *s __maybe_unused, int unset)
690 {
691         force_metric_only = true;
692         stat_config.metric_only = !unset;
693         return 0;
694 }
695
696 static int parse_metric_groups(const struct option *opt,
697                                const char *str,
698                                int unset __maybe_unused)
699 {
700         return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
701 }
702
703 static struct option stat_options[] = {
704         OPT_BOOLEAN('T', "transaction", &transaction_run,
705                     "hardware transaction statistics"),
706         OPT_CALLBACK('e', "event", &evsel_list, "event",
707                      "event selector. use 'perf list' to list available events",
708                      parse_events_option),
709         OPT_CALLBACK(0, "filter", &evsel_list, "filter",
710                      "event filter", parse_filter),
711         OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
712                     "child tasks do not inherit counters"),
713         OPT_STRING('p', "pid", &target.pid, "pid",
714                    "stat events on existing process id"),
715         OPT_STRING('t', "tid", &target.tid, "tid",
716                    "stat events on existing thread id"),
717         OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
718                     "system-wide collection from all CPUs"),
719         OPT_BOOLEAN('g', "group", &group,
720                     "put the counters into a counter group"),
721         OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
722         OPT_INCR('v', "verbose", &verbose,
723                     "be more verbose (show counter open errors, etc)"),
724         OPT_INTEGER('r', "repeat", &stat_config.run_count,
725                     "repeat command and print average + stddev (max: 100, forever: 0)"),
726         OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
727                     "display details about each run (only with -r option)"),
728         OPT_BOOLEAN('n', "null", &stat_config.null_run,
729                     "null run - dont start any counters"),
730         OPT_INCR('d', "detailed", &detailed_run,
731                     "detailed run - start a lot of events"),
732         OPT_BOOLEAN('S', "sync", &sync_run,
733                     "call sync() before starting a run"),
734         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
735                            "print large numbers with thousands\' separators",
736                            stat__set_big_num),
737         OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
738                     "list of cpus to monitor in system-wide"),
739         OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
740                     "disable CPU count aggregation", AGGR_NONE),
741         OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
742         OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
743                    "print counts with custom separator"),
744         OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
745                      "monitor event in cgroup name only", parse_cgroups),
746         OPT_STRING('o', "output", &output_name, "file", "output file name"),
747         OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
748         OPT_INTEGER(0, "log-fd", &output_fd,
749                     "log output to fd, instead of stderr"),
750         OPT_STRING(0, "pre", &pre_cmd, "command",
751                         "command to run prior to the measured command"),
752         OPT_STRING(0, "post", &post_cmd, "command",
753                         "command to run after to the measured command"),
754         OPT_UINTEGER('I', "interval-print", &stat_config.interval,
755                     "print counts at regular interval in ms "
756                     "(overhead is possible for values <= 100ms)"),
757         OPT_INTEGER(0, "interval-count", &stat_config.times,
758                     "print counts for fixed number of times"),
759         OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
760                     "clear screen in between new interval"),
761         OPT_UINTEGER(0, "timeout", &stat_config.timeout,
762                     "stop workload and print counts after a timeout period in ms (>= 10ms)"),
763         OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
764                      "aggregate counts per processor socket", AGGR_SOCKET),
765         OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
766                      "aggregate counts per physical processor core", AGGR_CORE),
767         OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
768                      "aggregate counts per thread", AGGR_THREAD),
769         OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
770                      "ms to wait before starting measurement after program start"),
771         OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
772                         "Only print computed metrics. No raw values", enable_metric_only),
773         OPT_BOOLEAN(0, "topdown", &topdown_run,
774                         "measure topdown level 1 statistics"),
775         OPT_BOOLEAN(0, "smi-cost", &smi_cost,
776                         "measure SMI cost"),
777         OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
778                      "monitor specified metrics or metric groups (separated by ,)",
779                      parse_metric_groups),
780         OPT_END()
781 };
782
783 static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
784                                  struct cpu_map *map, int cpu)
785 {
786         return cpu_map__get_socket(map, cpu, NULL);
787 }
788
789 static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
790                                struct cpu_map *map, int cpu)
791 {
792         return cpu_map__get_core(map, cpu, NULL);
793 }
794
795 static int cpu_map__get_max(struct cpu_map *map)
796 {
797         int i, max = -1;
798
799         for (i = 0; i < map->nr; i++) {
800                 if (map->map[i] > max)
801                         max = map->map[i];
802         }
803
804         return max;
805 }
806
807 static int perf_stat__get_aggr(struct perf_stat_config *config,
808                                aggr_get_id_t get_id, struct cpu_map *map, int idx)
809 {
810         int cpu;
811
812         if (idx >= map->nr)
813                 return -1;
814
815         cpu = map->map[idx];
816
817         if (config->cpus_aggr_map->map[cpu] == -1)
818                 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
819
820         return config->cpus_aggr_map->map[cpu];
821 }
822
823 static int perf_stat__get_socket_cached(struct perf_stat_config *config,
824                                         struct cpu_map *map, int idx)
825 {
826         return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
827 }
828
829 static int perf_stat__get_core_cached(struct perf_stat_config *config,
830                                       struct cpu_map *map, int idx)
831 {
832         return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
833 }
834
835 static int perf_stat_init_aggr_mode(void)
836 {
837         int nr;
838
839         switch (stat_config.aggr_mode) {
840         case AGGR_SOCKET:
841                 if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) {
842                         perror("cannot build socket map");
843                         return -1;
844                 }
845                 stat_config.aggr_get_id = perf_stat__get_socket_cached;
846                 break;
847         case AGGR_CORE:
848                 if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
849                         perror("cannot build core map");
850                         return -1;
851                 }
852                 stat_config.aggr_get_id = perf_stat__get_core_cached;
853                 break;
854         case AGGR_NONE:
855         case AGGR_GLOBAL:
856         case AGGR_THREAD:
857         case AGGR_UNSET:
858         default:
859                 break;
860         }
861
862         /*
863          * The evsel_list->cpus is the base we operate on,
864          * taking the highest cpu number to be the size of
865          * the aggregation translate cpumap.
866          */
867         nr = cpu_map__get_max(evsel_list->cpus);
868         stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
869         return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
870 }
871
872 static void perf_stat__exit_aggr_mode(void)
873 {
874         cpu_map__put(stat_config.aggr_map);
875         cpu_map__put(stat_config.cpus_aggr_map);
876         stat_config.aggr_map = NULL;
877         stat_config.cpus_aggr_map = NULL;
878 }
879
880 static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
881 {
882         int cpu;
883
884         if (idx > map->nr)
885                 return -1;
886
887         cpu = map->map[idx];
888
889         if (cpu >= env->nr_cpus_avail)
890                 return -1;
891
892         return cpu;
893 }
894
895 static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
896 {
897         struct perf_env *env = data;
898         int cpu = perf_env__get_cpu(env, map, idx);
899
900         return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
901 }
902
903 static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
904 {
905         struct perf_env *env = data;
906         int core = -1, cpu = perf_env__get_cpu(env, map, idx);
907
908         if (cpu != -1) {
909                 int socket_id = env->cpu[cpu].socket_id;
910
911                 /*
912                  * Encode socket in upper 16 bits
913                  * core_id is relative to socket, and
914                  * we need a global id. So we combine
915                  * socket + core id.
916                  */
917                 core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
918         }
919
920         return core;
921 }
922
923 static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
924                                       struct cpu_map **sockp)
925 {
926         return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
927 }
928
929 static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
930                                     struct cpu_map **corep)
931 {
932         return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
933 }
934
935 static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
936                                       struct cpu_map *map, int idx)
937 {
938         return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
939 }
940
941 static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
942                                     struct cpu_map *map, int idx)
943 {
944         return perf_env__get_core(map, idx, &perf_stat.session->header.env);
945 }
946
947 static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
948 {
949         struct perf_env *env = &st->session->header.env;
950
951         switch (stat_config.aggr_mode) {
952         case AGGR_SOCKET:
953                 if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
954                         perror("cannot build socket map");
955                         return -1;
956                 }
957                 stat_config.aggr_get_id = perf_stat__get_socket_file;
958                 break;
959         case AGGR_CORE:
960                 if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
961                         perror("cannot build core map");
962                         return -1;
963                 }
964                 stat_config.aggr_get_id = perf_stat__get_core_file;
965                 break;
966         case AGGR_NONE:
967         case AGGR_GLOBAL:
968         case AGGR_THREAD:
969         case AGGR_UNSET:
970         default:
971                 break;
972         }
973
974         return 0;
975 }
976
977 static int topdown_filter_events(const char **attr, char **str, bool use_group)
978 {
979         int off = 0;
980         int i;
981         int len = 0;
982         char *s;
983
984         for (i = 0; attr[i]; i++) {
985                 if (pmu_have_event("cpu", attr[i])) {
986                         len += strlen(attr[i]) + 1;
987                         attr[i - off] = attr[i];
988                 } else
989                         off++;
990         }
991         attr[i - off] = NULL;
992
993         *str = malloc(len + 1 + 2);
994         if (!*str)
995                 return -1;
996         s = *str;
997         if (i - off == 0) {
998                 *s = 0;
999                 return 0;
1000         }
1001         if (use_group)
1002                 *s++ = '{';
1003         for (i = 0; attr[i]; i++) {
1004                 strcpy(s, attr[i]);
1005                 s += strlen(s);
1006                 *s++ = ',';
1007         }
1008         if (use_group) {
1009                 s[-1] = '}';
1010                 *s = 0;
1011         } else
1012                 s[-1] = 0;
1013         return 0;
1014 }
1015
1016 __weak bool arch_topdown_check_group(bool *warn)
1017 {
1018         *warn = false;
1019         return false;
1020 }
1021
1022 __weak void arch_topdown_group_warn(void)
1023 {
1024 }
1025
1026 /*
1027  * Add default attributes, if there were no attributes specified or
1028  * if -d/--detailed, -d -d or -d -d -d is used:
1029  */
1030 static int add_default_attributes(void)
1031 {
1032         int err;
1033         struct perf_event_attr default_attrs0[] = {
1034
1035   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
1036   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
1037   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
1038   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
1039
1040   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
1041 };
1042         struct perf_event_attr frontend_attrs[] = {
1043   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1044 };
1045         struct perf_event_attr backend_attrs[] = {
1046   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND  },
1047 };
1048         struct perf_event_attr default_attrs1[] = {
1049   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
1050   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
1051   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
1052
1053 };
1054
1055 /*
1056  * Detailed stats (-d), covering the L1 and last level data caches:
1057  */
1058         struct perf_event_attr detailed_attrs[] = {
1059
1060   { .type = PERF_TYPE_HW_CACHE,
1061     .config =
1062          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1063         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1064         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1065
1066   { .type = PERF_TYPE_HW_CACHE,
1067     .config =
1068          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1069         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1070         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1071
1072   { .type = PERF_TYPE_HW_CACHE,
1073     .config =
1074          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1075         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1076         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1077
1078   { .type = PERF_TYPE_HW_CACHE,
1079     .config =
1080          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1081         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1082         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1083 };
1084
1085 /*
1086  * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1087  */
1088         struct perf_event_attr very_detailed_attrs[] = {
1089
1090   { .type = PERF_TYPE_HW_CACHE,
1091     .config =
1092          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1093         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1094         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1095
1096   { .type = PERF_TYPE_HW_CACHE,
1097     .config =
1098          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1099         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1100         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1101
1102   { .type = PERF_TYPE_HW_CACHE,
1103     .config =
1104          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1105         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1106         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1107
1108   { .type = PERF_TYPE_HW_CACHE,
1109     .config =
1110          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1111         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1112         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1113
1114   { .type = PERF_TYPE_HW_CACHE,
1115     .config =
1116          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1117         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1118         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1119
1120   { .type = PERF_TYPE_HW_CACHE,
1121     .config =
1122          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1123         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1124         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1125
1126 };
1127
1128 /*
1129  * Very, very detailed stats (-d -d -d), adding prefetch events:
1130  */
1131         struct perf_event_attr very_very_detailed_attrs[] = {
1132
1133   { .type = PERF_TYPE_HW_CACHE,
1134     .config =
1135          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1136         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1137         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1138
1139   { .type = PERF_TYPE_HW_CACHE,
1140     .config =
1141          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1142         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1143         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1144 };
1145         struct parse_events_error errinfo;
1146
1147         /* Set attrs if no event is selected and !null_run: */
1148         if (stat_config.null_run)
1149                 return 0;
1150
1151         if (transaction_run) {
1152                 /* Handle -T as -M transaction. Once platform specific metrics
1153                  * support has been added to the json files, all archictures
1154                  * will use this approach. To determine transaction support
1155                  * on an architecture test for such a metric name.
1156                  */
1157                 if (metricgroup__has_metric("transaction")) {
1158                         struct option opt = { .value = &evsel_list };
1159
1160                         return metricgroup__parse_groups(&opt, "transaction",
1161                                                          &stat_config.metric_events);
1162                 }
1163
1164                 if (pmu_have_event("cpu", "cycles-ct") &&
1165                     pmu_have_event("cpu", "el-start"))
1166                         err = parse_events(evsel_list, transaction_attrs,
1167                                            &errinfo);
1168                 else
1169                         err = parse_events(evsel_list,
1170                                            transaction_limited_attrs,
1171                                            &errinfo);
1172                 if (err) {
1173                         fprintf(stderr, "Cannot set up transaction events\n");
1174                         parse_events_print_error(&errinfo, transaction_attrs);
1175                         return -1;
1176                 }
1177                 return 0;
1178         }
1179
1180         if (smi_cost) {
1181                 int smi;
1182
1183                 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
1184                         fprintf(stderr, "freeze_on_smi is not supported.\n");
1185                         return -1;
1186                 }
1187
1188                 if (!smi) {
1189                         if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
1190                                 fprintf(stderr, "Failed to set freeze_on_smi.\n");
1191                                 return -1;
1192                         }
1193                         smi_reset = true;
1194                 }
1195
1196                 if (pmu_have_event("msr", "aperf") &&
1197                     pmu_have_event("msr", "smi")) {
1198                         if (!force_metric_only)
1199                                 stat_config.metric_only = true;
1200                         err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
1201                 } else {
1202                         fprintf(stderr, "To measure SMI cost, it needs "
1203                                 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
1204                         parse_events_print_error(&errinfo, smi_cost_attrs);
1205                         return -1;
1206                 }
1207                 if (err) {
1208                         fprintf(stderr, "Cannot set up SMI cost events\n");
1209                         return -1;
1210                 }
1211                 return 0;
1212         }
1213
1214         if (topdown_run) {
1215                 char *str = NULL;
1216                 bool warn = false;
1217
1218                 if (stat_config.aggr_mode != AGGR_GLOBAL &&
1219                     stat_config.aggr_mode != AGGR_CORE) {
1220                         pr_err("top down event configuration requires --per-core mode\n");
1221                         return -1;
1222                 }
1223                 stat_config.aggr_mode = AGGR_CORE;
1224                 if (nr_cgroups || !target__has_cpu(&target)) {
1225                         pr_err("top down event configuration requires system-wide mode (-a)\n");
1226                         return -1;
1227                 }
1228
1229                 if (!force_metric_only)
1230                         stat_config.metric_only = true;
1231                 if (topdown_filter_events(topdown_attrs, &str,
1232                                 arch_topdown_check_group(&warn)) < 0) {
1233                         pr_err("Out of memory\n");
1234                         return -1;
1235                 }
1236                 if (topdown_attrs[0] && str) {
1237                         if (warn)
1238                                 arch_topdown_group_warn();
1239                         err = parse_events(evsel_list, str, &errinfo);
1240                         if (err) {
1241                                 fprintf(stderr,
1242                                         "Cannot set up top down events %s: %d\n",
1243                                         str, err);
1244                                 free(str);
1245                                 parse_events_print_error(&errinfo, str);
1246                                 return -1;
1247                         }
1248                 } else {
1249                         fprintf(stderr, "System does not support topdown\n");
1250                         return -1;
1251                 }
1252                 free(str);
1253         }
1254
1255         if (!evsel_list->nr_entries) {
1256                 if (target__has_cpu(&target))
1257                         default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1258
1259                 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1260                         return -1;
1261                 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1262                         if (perf_evlist__add_default_attrs(evsel_list,
1263                                                 frontend_attrs) < 0)
1264                                 return -1;
1265                 }
1266                 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1267                         if (perf_evlist__add_default_attrs(evsel_list,
1268                                                 backend_attrs) < 0)
1269                                 return -1;
1270                 }
1271                 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
1272                         return -1;
1273         }
1274
1275         /* Detailed events get appended to the event list: */
1276
1277         if (detailed_run <  1)
1278                 return 0;
1279
1280         /* Append detailed run extra attributes: */
1281         if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1282                 return -1;
1283
1284         if (detailed_run < 2)
1285                 return 0;
1286
1287         /* Append very detailed run extra attributes: */
1288         if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1289                 return -1;
1290
1291         if (detailed_run < 3)
1292                 return 0;
1293
1294         /* Append very, very detailed run extra attributes: */
1295         return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1296 }
1297
1298 static const char * const stat_record_usage[] = {
1299         "perf stat record [<options>]",
1300         NULL,
1301 };
1302
1303 static void init_features(struct perf_session *session)
1304 {
1305         int feat;
1306
1307         for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1308                 perf_header__set_feat(&session->header, feat);
1309
1310         perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1311         perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1312         perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1313         perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1314 }
1315
1316 static int __cmd_record(int argc, const char **argv)
1317 {
1318         struct perf_session *session;
1319         struct perf_data *data = &perf_stat.data;
1320
1321         argc = parse_options(argc, argv, stat_options, stat_record_usage,
1322                              PARSE_OPT_STOP_AT_NON_OPTION);
1323
1324         if (output_name)
1325                 data->file.path = output_name;
1326
1327         if (stat_config.run_count != 1 || forever) {
1328                 pr_err("Cannot use -r option with perf stat record.\n");
1329                 return -1;
1330         }
1331
1332         session = perf_session__new(data, false, NULL);
1333         if (session == NULL) {
1334                 pr_err("Perf session creation failed.\n");
1335                 return -1;
1336         }
1337
1338         init_features(session);
1339
1340         session->evlist   = evsel_list;
1341         perf_stat.session = session;
1342         perf_stat.record  = true;
1343         return argc;
1344 }
1345
1346 static int process_stat_round_event(struct perf_session *session,
1347                                     union perf_event *event)
1348 {
1349         struct stat_round_event *stat_round = &event->stat_round;
1350         struct perf_evsel *counter;
1351         struct timespec tsh, *ts = NULL;
1352         const char **argv = session->header.env.cmdline_argv;
1353         int argc = session->header.env.nr_cmdline;
1354
1355         evlist__for_each_entry(evsel_list, counter)
1356                 perf_stat_process_counter(&stat_config, counter);
1357
1358         if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
1359                 update_stats(&walltime_nsecs_stats, stat_round->time);
1360
1361         if (stat_config.interval && stat_round->time) {
1362                 tsh.tv_sec  = stat_round->time / NSEC_PER_SEC;
1363                 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
1364                 ts = &tsh;
1365         }
1366
1367         print_counters(ts, argc, argv);
1368         return 0;
1369 }
1370
1371 static
1372 int process_stat_config_event(struct perf_session *session,
1373                               union perf_event *event)
1374 {
1375         struct perf_tool *tool = session->tool;
1376         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1377
1378         perf_event__read_stat_config(&stat_config, &event->stat_config);
1379
1380         if (cpu_map__empty(st->cpus)) {
1381                 if (st->aggr_mode != AGGR_UNSET)
1382                         pr_warning("warning: processing task data, aggregation mode not set\n");
1383                 return 0;
1384         }
1385
1386         if (st->aggr_mode != AGGR_UNSET)
1387                 stat_config.aggr_mode = st->aggr_mode;
1388
1389         if (perf_stat.data.is_pipe)
1390                 perf_stat_init_aggr_mode();
1391         else
1392                 perf_stat_init_aggr_mode_file(st);
1393
1394         return 0;
1395 }
1396
1397 static int set_maps(struct perf_stat *st)
1398 {
1399         if (!st->cpus || !st->threads)
1400                 return 0;
1401
1402         if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1403                 return -EINVAL;
1404
1405         perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
1406
1407         if (perf_evlist__alloc_stats(evsel_list, true))
1408                 return -ENOMEM;
1409
1410         st->maps_allocated = true;
1411         return 0;
1412 }
1413
1414 static
1415 int process_thread_map_event(struct perf_session *session,
1416                              union perf_event *event)
1417 {
1418         struct perf_tool *tool = session->tool;
1419         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1420
1421         if (st->threads) {
1422                 pr_warning("Extra thread map event, ignoring.\n");
1423                 return 0;
1424         }
1425
1426         st->threads = thread_map__new_event(&event->thread_map);
1427         if (!st->threads)
1428                 return -ENOMEM;
1429
1430         return set_maps(st);
1431 }
1432
1433 static
1434 int process_cpu_map_event(struct perf_session *session,
1435                           union perf_event *event)
1436 {
1437         struct perf_tool *tool = session->tool;
1438         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1439         struct cpu_map *cpus;
1440
1441         if (st->cpus) {
1442                 pr_warning("Extra cpu map event, ignoring.\n");
1443                 return 0;
1444         }
1445
1446         cpus = cpu_map__new_data(&event->cpu_map.data);
1447         if (!cpus)
1448                 return -ENOMEM;
1449
1450         st->cpus = cpus;
1451         return set_maps(st);
1452 }
1453
1454 static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
1455 {
1456         int i;
1457
1458         config->stats = calloc(nthreads, sizeof(struct runtime_stat));
1459         if (!config->stats)
1460                 return -1;
1461
1462         config->stats_num = nthreads;
1463
1464         for (i = 0; i < nthreads; i++)
1465                 runtime_stat__init(&config->stats[i]);
1466
1467         return 0;
1468 }
1469
1470 static void runtime_stat_delete(struct perf_stat_config *config)
1471 {
1472         int i;
1473
1474         if (!config->stats)
1475                 return;
1476
1477         for (i = 0; i < config->stats_num; i++)
1478                 runtime_stat__exit(&config->stats[i]);
1479
1480         free(config->stats);
1481 }
1482
1483 static const char * const stat_report_usage[] = {
1484         "perf stat report [<options>]",
1485         NULL,
1486 };
1487
1488 static struct perf_stat perf_stat = {
1489         .tool = {
1490                 .attr           = perf_event__process_attr,
1491                 .event_update   = perf_event__process_event_update,
1492                 .thread_map     = process_thread_map_event,
1493                 .cpu_map        = process_cpu_map_event,
1494                 .stat_config    = process_stat_config_event,
1495                 .stat           = perf_event__process_stat_event,
1496                 .stat_round     = process_stat_round_event,
1497         },
1498         .aggr_mode = AGGR_UNSET,
1499 };
1500
1501 static int __cmd_report(int argc, const char **argv)
1502 {
1503         struct perf_session *session;
1504         const struct option options[] = {
1505         OPT_STRING('i', "input", &input_name, "file", "input file name"),
1506         OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
1507                      "aggregate counts per processor socket", AGGR_SOCKET),
1508         OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
1509                      "aggregate counts per physical processor core", AGGR_CORE),
1510         OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
1511                      "disable CPU count aggregation", AGGR_NONE),
1512         OPT_END()
1513         };
1514         struct stat st;
1515         int ret;
1516
1517         argc = parse_options(argc, argv, options, stat_report_usage, 0);
1518
1519         if (!input_name || !strlen(input_name)) {
1520                 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1521                         input_name = "-";
1522                 else
1523                         input_name = "perf.data";
1524         }
1525
1526         perf_stat.data.file.path = input_name;
1527         perf_stat.data.mode      = PERF_DATA_MODE_READ;
1528
1529         session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
1530         if (session == NULL)
1531                 return -1;
1532
1533         perf_stat.session  = session;
1534         stat_config.output = stderr;
1535         evsel_list         = session->evlist;
1536
1537         ret = perf_session__process_events(session);
1538         if (ret)
1539                 return ret;
1540
1541         perf_session__delete(session);
1542         return 0;
1543 }
1544
1545 static void setup_system_wide(int forks)
1546 {
1547         /*
1548          * Make system wide (-a) the default target if
1549          * no target was specified and one of following
1550          * conditions is met:
1551          *
1552          *   - there's no workload specified
1553          *   - there is workload specified but all requested
1554          *     events are system wide events
1555          */
1556         if (!target__none(&target))
1557                 return;
1558
1559         if (!forks)
1560                 target.system_wide = true;
1561         else {
1562                 struct perf_evsel *counter;
1563
1564                 evlist__for_each_entry(evsel_list, counter) {
1565                         if (!counter->system_wide)
1566                                 return;
1567                 }
1568
1569                 if (evsel_list->nr_entries)
1570                         target.system_wide = true;
1571         }
1572 }
1573
1574 int cmd_stat(int argc, const char **argv)
1575 {
1576         const char * const stat_usage[] = {
1577                 "perf stat [<options>] [<command>]",
1578                 NULL
1579         };
1580         int status = -EINVAL, run_idx;
1581         const char *mode;
1582         FILE *output = stderr;
1583         unsigned int interval, timeout;
1584         const char * const stat_subcommands[] = { "record", "report" };
1585
1586         setlocale(LC_ALL, "");
1587
1588         evsel_list = perf_evlist__new();
1589         if (evsel_list == NULL)
1590                 return -ENOMEM;
1591
1592         parse_events__shrink_config_terms();
1593
1594         /* String-parsing callback-based options would segfault when negated */
1595         set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
1596         set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
1597         set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
1598
1599         argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
1600                                         (const char **) stat_usage,
1601                                         PARSE_OPT_STOP_AT_NON_OPTION);
1602         perf_stat__collect_metric_expr(evsel_list);
1603         perf_stat__init_shadow_stats();
1604
1605         if (stat_config.csv_sep) {
1606                 stat_config.csv_output = true;
1607                 if (!strcmp(stat_config.csv_sep, "\\t"))
1608                         stat_config.csv_sep = "\t";
1609         } else
1610                 stat_config.csv_sep = DEFAULT_SEPARATOR;
1611
1612         if (argc && !strncmp(argv[0], "rec", 3)) {
1613                 argc = __cmd_record(argc, argv);
1614                 if (argc < 0)
1615                         return -1;
1616         } else if (argc && !strncmp(argv[0], "rep", 3))
1617                 return __cmd_report(argc, argv);
1618
1619         interval = stat_config.interval;
1620         timeout = stat_config.timeout;
1621
1622         /*
1623          * For record command the -o is already taken care of.
1624          */
1625         if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
1626                 output = NULL;
1627
1628         if (output_name && output_fd) {
1629                 fprintf(stderr, "cannot use both --output and --log-fd\n");
1630                 parse_options_usage(stat_usage, stat_options, "o", 1);
1631                 parse_options_usage(NULL, stat_options, "log-fd", 0);
1632                 goto out;
1633         }
1634
1635         if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
1636                 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
1637                 goto out;
1638         }
1639
1640         if (stat_config.metric_only && stat_config.run_count > 1) {
1641                 fprintf(stderr, "--metric-only is not supported with -r\n");
1642                 goto out;
1643         }
1644
1645         if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
1646                 fprintf(stderr, "--table is only supported with -r\n");
1647                 parse_options_usage(stat_usage, stat_options, "r", 1);
1648                 parse_options_usage(NULL, stat_options, "table", 0);
1649                 goto out;
1650         }
1651
1652         if (output_fd < 0) {
1653                 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1654                 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
1655                 goto out;
1656         }
1657
1658         if (!output) {
1659                 struct timespec tm;
1660                 mode = append_file ? "a" : "w";
1661
1662                 output = fopen(output_name, mode);
1663                 if (!output) {
1664                         perror("failed to create output file");
1665                         return -1;
1666                 }
1667                 clock_gettime(CLOCK_REALTIME, &tm);
1668                 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1669         } else if (output_fd > 0) {
1670                 mode = append_file ? "a" : "w";
1671                 output = fdopen(output_fd, mode);
1672                 if (!output) {
1673                         perror("Failed opening logfd");
1674                         return -errno;
1675                 }
1676         }
1677
1678         stat_config.output = output;
1679
1680         /*
1681          * let the spreadsheet do the pretty-printing
1682          */
1683         if (stat_config.csv_output) {
1684                 /* User explicitly passed -B? */
1685                 if (big_num_opt == 1) {
1686                         fprintf(stderr, "-B option not supported with -x\n");
1687                         parse_options_usage(stat_usage, stat_options, "B", 1);
1688                         parse_options_usage(NULL, stat_options, "x", 1);
1689                         goto out;
1690                 } else /* Nope, so disable big number formatting */
1691                         stat_config.big_num = false;
1692         } else if (big_num_opt == 0) /* User passed --no-big-num */
1693                 stat_config.big_num = false;
1694
1695         setup_system_wide(argc);
1696
1697         /*
1698          * Display user/system times only for single
1699          * run and when there's specified tracee.
1700          */
1701         if ((stat_config.run_count == 1) && target__none(&target))
1702                 stat_config.ru_display = true;
1703
1704         if (stat_config.run_count < 0) {
1705                 pr_err("Run count must be a positive number\n");
1706                 parse_options_usage(stat_usage, stat_options, "r", 1);
1707                 goto out;
1708         } else if (stat_config.run_count == 0) {
1709                 forever = true;
1710                 stat_config.run_count = 1;
1711         }
1712
1713         if (stat_config.walltime_run_table) {
1714                 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
1715                 if (!stat_config.walltime_run) {
1716                         pr_err("failed to setup -r option");
1717                         goto out;
1718                 }
1719         }
1720
1721         if ((stat_config.aggr_mode == AGGR_THREAD) &&
1722                 !target__has_task(&target)) {
1723                 if (!target.system_wide || target.cpu_list) {
1724                         fprintf(stderr, "The --per-thread option is only "
1725                                 "available when monitoring via -p -t -a "
1726                                 "options or only --per-thread.\n");
1727                         parse_options_usage(NULL, stat_options, "p", 1);
1728                         parse_options_usage(NULL, stat_options, "t", 1);
1729                         goto out;
1730                 }
1731         }
1732
1733         /*
1734          * no_aggr, cgroup are for system-wide only
1735          * --per-thread is aggregated per thread, we dont mix it with cpu mode
1736          */
1737         if (((stat_config.aggr_mode != AGGR_GLOBAL &&
1738               stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
1739             !target__has_cpu(&target)) {
1740                 fprintf(stderr, "both cgroup and no-aggregation "
1741                         "modes only available in system-wide mode\n");
1742
1743                 parse_options_usage(stat_usage, stat_options, "G", 1);
1744                 parse_options_usage(NULL, stat_options, "A", 1);
1745                 parse_options_usage(NULL, stat_options, "a", 1);
1746                 goto out;
1747         }
1748
1749         if (add_default_attributes())
1750                 goto out;
1751
1752         target__validate(&target);
1753
1754         if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
1755                 target.per_thread = true;
1756
1757         if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1758                 if (target__has_task(&target)) {
1759                         pr_err("Problems finding threads of monitor\n");
1760                         parse_options_usage(stat_usage, stat_options, "p", 1);
1761                         parse_options_usage(NULL, stat_options, "t", 1);
1762                 } else if (target__has_cpu(&target)) {
1763                         perror("failed to parse CPUs map");
1764                         parse_options_usage(stat_usage, stat_options, "C", 1);
1765                         parse_options_usage(NULL, stat_options, "a", 1);
1766                 }
1767                 goto out;
1768         }
1769
1770         /*
1771          * Initialize thread_map with comm names,
1772          * so we could print it out on output.
1773          */
1774         if (stat_config.aggr_mode == AGGR_THREAD) {
1775                 thread_map__read_comms(evsel_list->threads);
1776                 if (target.system_wide) {
1777                         if (runtime_stat_new(&stat_config,
1778                                 thread_map__nr(evsel_list->threads))) {
1779                                 goto out;
1780                         }
1781                 }
1782         }
1783
1784         if (stat_config.times && interval)
1785                 interval_count = true;
1786         else if (stat_config.times && !interval) {
1787                 pr_err("interval-count option should be used together with "
1788                                 "interval-print.\n");
1789                 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
1790                 parse_options_usage(stat_usage, stat_options, "I", 1);
1791                 goto out;
1792         }
1793
1794         if (timeout && timeout < 100) {
1795                 if (timeout < 10) {
1796                         pr_err("timeout must be >= 10ms.\n");
1797                         parse_options_usage(stat_usage, stat_options, "timeout", 0);
1798                         goto out;
1799                 } else
1800                         pr_warning("timeout < 100ms. "
1801                                    "The overhead percentage could be high in some cases. "
1802                                    "Please proceed with caution.\n");
1803         }
1804         if (timeout && interval) {
1805                 pr_err("timeout option is not supported with interval-print.\n");
1806                 parse_options_usage(stat_usage, stat_options, "timeout", 0);
1807                 parse_options_usage(stat_usage, stat_options, "I", 1);
1808                 goto out;
1809         }
1810
1811         if (perf_evlist__alloc_stats(evsel_list, interval))
1812                 goto out;
1813
1814         if (perf_stat_init_aggr_mode())
1815                 goto out;
1816
1817         /*
1818          * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
1819          * while avoiding that older tools show confusing messages.
1820          *
1821          * However for pipe sessions we need to keep it zero,
1822          * because script's perf_evsel__check_attr is triggered
1823          * by attr->sample_type != 0, and we can't run it on
1824          * stat sessions.
1825          */
1826         stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
1827
1828         /*
1829          * We dont want to block the signals - that would cause
1830          * child tasks to inherit that and Ctrl-C would not work.
1831          * What we want is for Ctrl-C to work in the exec()-ed
1832          * task, but being ignored by perf stat itself:
1833          */
1834         atexit(sig_atexit);
1835         if (!forever)
1836                 signal(SIGINT,  skip_signal);
1837         signal(SIGCHLD, skip_signal);
1838         signal(SIGALRM, skip_signal);
1839         signal(SIGABRT, skip_signal);
1840
1841         status = 0;
1842         for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
1843                 if (stat_config.run_count != 1 && verbose > 0)
1844                         fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1845                                 run_idx + 1);
1846
1847                 status = run_perf_stat(argc, argv, run_idx);
1848                 if (forever && status != -1) {
1849                         print_counters(NULL, argc, argv);
1850                         perf_stat__reset_stats();
1851                 }
1852         }
1853
1854         if (!forever && status != -1 && !interval)
1855                 print_counters(NULL, argc, argv);
1856
1857         if (STAT_RECORD) {
1858                 /*
1859                  * We synthesize the kernel mmap record just so that older tools
1860                  * don't emit warnings about not being able to resolve symbols
1861                  * due to /proc/sys/kernel/kptr_restrict settings and instear provide
1862                  * a saner message about no samples being in the perf.data file.
1863                  *
1864                  * This also serves to suppress a warning about f_header.data.size == 0
1865                  * in header.c at the moment 'perf stat record' gets introduced, which
1866                  * is not really needed once we start adding the stat specific PERF_RECORD_
1867                  * records, but the need to suppress the kptr_restrict messages in older
1868                  * tools remain  -acme
1869                  */
1870                 int fd = perf_data__fd(&perf_stat.data);
1871                 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
1872                                                              process_synthesized_event,
1873                                                              &perf_stat.session->machines.host);
1874                 if (err) {
1875                         pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
1876                                    "older tools may produce warnings about this file\n.");
1877                 }
1878
1879                 if (!interval) {
1880                         if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
1881                                 pr_err("failed to write stat round event\n");
1882                 }
1883
1884                 if (!perf_stat.data.is_pipe) {
1885                         perf_stat.session->header.data_size += perf_stat.bytes_written;
1886                         perf_session__write_header(perf_stat.session, evsel_list, fd, true);
1887                 }
1888
1889                 perf_session__delete(perf_stat.session);
1890         }
1891
1892         perf_stat__exit_aggr_mode();
1893         perf_evlist__free_stats(evsel_list);
1894 out:
1895         free(stat_config.walltime_run);
1896
1897         if (smi_cost && smi_reset)
1898                 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
1899
1900         perf_evlist__delete(evsel_list);
1901
1902         runtime_stat_delete(&stat_config);
1903
1904         return status;
1905 }