4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
38 static int __on_exit_count = 0;
39 typedef void (*on_exit_func_t) (int, void *);
40 static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
41 static void *__on_exit_args[ATEXIT_MAX];
42 static int __exitcode = 0;
43 static void __handle_on_exit_funcs(void);
44 static int on_exit(on_exit_func_t function, void *arg);
45 #define exit(x) (exit)(__exitcode = (x))
47 static int on_exit(on_exit_func_t function, void *arg)
49 if (__on_exit_count == ATEXIT_MAX)
51 else if (__on_exit_count == 0)
52 atexit(__handle_on_exit_funcs);
53 __on_exit_funcs[__on_exit_count] = function;
54 __on_exit_args[__on_exit_count++] = arg;
58 static void __handle_on_exit_funcs(void)
61 for (i = 0; i < __on_exit_count; i++)
62 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
72 struct perf_tool tool;
73 struct perf_record_opts opts;
75 const char *output_name;
76 struct perf_evlist *evlist;
77 struct perf_session *session;
80 unsigned int page_size;
82 enum write_mode_t write_mode;
84 bool no_buildid_cache;
89 off_t post_processing_offset;
92 static void advance_output(struct perf_record *rec, size_t size)
94 rec->bytes_written += size;
97 static int write_output(struct perf_record *rec, void *buf, size_t size)
100 int ret = write(rec->output, buf, size);
103 pr_err("failed to write\n");
110 rec->bytes_written += ret;
116 static int process_synthesized_event(struct perf_tool *tool,
117 union perf_event *event,
118 struct perf_sample *sample __maybe_unused,
119 struct machine *machine __maybe_unused)
121 struct perf_record *rec = container_of(tool, struct perf_record, tool);
122 if (write_output(rec, event, event->header.size) < 0)
128 static int perf_record__mmap_read(struct perf_record *rec,
129 struct perf_mmap *md)
131 unsigned int head = perf_mmap__read_head(md);
132 unsigned int old = md->prev;
133 unsigned char *data = md->base + rec->page_size;
145 if ((old & md->mask) + size != (head & md->mask)) {
146 buf = &data[old & md->mask];
147 size = md->mask + 1 - (old & md->mask);
150 if (write_output(rec, buf, size) < 0) {
156 buf = &data[old & md->mask];
160 if (write_output(rec, buf, size) < 0) {
166 perf_mmap__write_tail(md, old);
172 static volatile int done = 0;
173 static volatile int signr = -1;
174 static volatile int child_finished = 0;
176 static void sig_handler(int sig)
185 static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
187 struct perf_record *rec = arg;
190 if (rec->evlist->workload.pid > 0) {
192 kill(rec->evlist->workload.pid, SIGTERM);
195 if (WIFSIGNALED(status))
196 psignal(WTERMSIG(status), rec->progname);
199 if (signr == -1 || signr == SIGUSR1)
202 signal(signr, SIG_DFL);
203 kill(getpid(), signr);
206 static bool perf_evlist__equal(struct perf_evlist *evlist,
207 struct perf_evlist *other)
209 struct perf_evsel *pos, *pair;
211 if (evlist->nr_entries != other->nr_entries)
214 pair = perf_evlist__first(other);
216 list_for_each_entry(pos, &evlist->entries, node) {
217 if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
219 pair = perf_evsel__next(pair);
225 static int perf_record__open(struct perf_record *rec)
227 struct perf_evsel *pos;
228 struct perf_evlist *evlist = rec->evlist;
229 struct perf_session *session = rec->session;
230 struct perf_record_opts *opts = &rec->opts;
233 perf_evlist__config(evlist, opts);
235 list_for_each_entry(pos, &evlist->entries, node) {
236 struct perf_event_attr *attr = &pos->attr;
238 * Check if parse_single_tracepoint_event has already asked for
241 * XXX this is kludgy but short term fix for problems introduced by
242 * eac23d1c that broke 'perf script' by having different sample_types
243 * when using multiple tracepoint events when we use a perf binary
244 * that tries to use sample_id_all on an older kernel.
246 * We need to move counter creation to perf_session, support
247 * different sample_types, etc.
249 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
251 fallback_missing_features:
252 if (opts->exclude_guest_missing)
253 attr->exclude_guest = attr->exclude_host = 0;
255 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
257 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
260 if (err == EPERM || err == EACCES) {
261 ui__error_paranoid();
264 } else if (err == ENODEV && opts->target.cpu_list) {
265 pr_err("No such device - did you specify"
266 " an out-of-range profile CPU?\n");
269 } else if (err == EINVAL) {
270 if (!opts->exclude_guest_missing &&
271 (attr->exclude_guest || attr->exclude_host)) {
272 pr_debug("Old kernel, cannot exclude "
273 "guest or host samples.\n");
274 opts->exclude_guest_missing = true;
275 goto fallback_missing_features;
276 } else if (!opts->sample_id_all_missing) {
278 * Old kernel, no attr->sample_id_type_all field
280 opts->sample_id_all_missing = true;
281 if (!opts->sample_time && !opts->raw_samples && !time_needed)
282 perf_evsel__reset_sample_bit(pos, TIME);
284 goto retry_sample_id;
289 * If it's cycles then fall back to hrtimer
290 * based cpu-clock-tick sw counter, which
291 * is always available even if no PMU support.
293 * PPC returns ENXIO until 2.6.37 (behavior changed
294 * with commit b0a873e).
296 if ((err == ENOENT || err == ENXIO)
297 && attr->type == PERF_TYPE_HARDWARE
298 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
301 ui__warning("The cycles event is not supported, "
302 "trying to fall back to cpu-clock-ticks\n");
303 attr->type = PERF_TYPE_SOFTWARE;
304 attr->config = PERF_COUNT_SW_CPU_CLOCK;
313 ui__error("The %s event is not supported.\n",
314 perf_evsel__name(pos));
317 } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
318 ui__error("\'precise\' request may not be supported. "
319 "Try removing 'p' modifier\n");
325 error("sys_perf_event_open() syscall returned with %d "
326 "(%s) for event %s. /bin/dmesg may provide "
327 "additional information.\n",
328 err, strerror(err), perf_evsel__name(pos));
330 #if defined(__i386__) || defined(__x86_64__)
331 if (attr->type == PERF_TYPE_HARDWARE &&
333 pr_err("No hardware sampling interrupt available."
334 " No APIC? If so then you can boot the kernel"
335 " with the \"lapic\" boot parameter to"
336 " force-enable it.\n");
342 pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
348 if (perf_evlist__apply_filters(evlist)) {
349 error("failed to set filter with %d (%s)\n", errno,
355 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
356 if (errno == EPERM) {
357 pr_err("Permission error mapping pages.\n"
358 "Consider increasing "
359 "/proc/sys/kernel/perf_event_mlock_kb,\n"
360 "or try again with a smaller value of -m/--mmap_pages.\n"
361 "(current value: %d)\n", opts->mmap_pages);
363 } else if (!is_power_of_2(opts->mmap_pages) &&
364 (opts->mmap_pages != UINT_MAX)) {
365 pr_err("--mmap_pages/-m value must be a power of two.");
368 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
375 session->evlist = evlist;
377 if (!perf_evlist__equal(session->evlist, evlist)) {
378 fprintf(stderr, "incompatible append\n");
384 perf_session__set_id_hdr_size(session);
389 static int process_buildids(struct perf_record *rec)
391 u64 size = lseek(rec->output, 0, SEEK_CUR);
396 rec->session->fd = rec->output;
397 return __perf_session__process_events(rec->session, rec->post_processing_offset,
398 size - rec->post_processing_offset,
399 size, &build_id__mark_dso_hit_ops);
402 static void perf_record__exit(int status, void *arg)
404 struct perf_record *rec = arg;
409 if (!rec->opts.pipe_output) {
410 rec->session->header.data_size += rec->bytes_written;
412 if (!rec->no_buildid)
413 process_buildids(rec);
414 perf_session__write_header(rec->session, rec->evlist,
416 perf_session__delete(rec->session);
417 perf_evlist__delete(rec->evlist);
422 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
425 struct perf_tool *tool = data;
427 if (machine__is_host(machine))
431 *As for guest kernel when processing subcommand record&report,
432 *we arrange module mmap prior to guest kernel mmap and trigger
433 *a preload dso because default guest module symbols are loaded
434 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
435 *method is used to avoid symbol missing when the first addr is
436 *in module instead of in guest kernel.
438 err = perf_event__synthesize_modules(tool, process_synthesized_event,
441 pr_err("Couldn't record guest kernel [%d]'s reference"
442 " relocation symbol.\n", machine->pid);
445 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
446 * have no _text sometimes.
448 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
451 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
454 pr_err("Couldn't record guest kernel [%d]'s reference"
455 " relocation symbol.\n", machine->pid);
458 static struct perf_event_header finished_round_event = {
459 .size = sizeof(struct perf_event_header),
460 .type = PERF_RECORD_FINISHED_ROUND,
463 static int perf_record__mmap_read_all(struct perf_record *rec)
468 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
469 if (rec->evlist->mmap[i].base) {
470 if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
477 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
478 rc = write_output(rec, &finished_round_event,
479 sizeof(finished_round_event));
485 static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
489 int err, output, feat;
490 unsigned long waking = 0;
491 const bool forks = argc > 0;
492 struct machine *machine;
493 struct perf_tool *tool = &rec->tool;
494 struct perf_record_opts *opts = &rec->opts;
495 struct perf_evlist *evsel_list = rec->evlist;
496 const char *output_name = rec->output_name;
497 struct perf_session *session;
498 bool disabled = false;
500 rec->progname = argv[0];
502 rec->page_size = sysconf(_SC_PAGE_SIZE);
504 on_exit(perf_record__sig_exit, rec);
505 signal(SIGCHLD, sig_handler);
506 signal(SIGINT, sig_handler);
507 signal(SIGUSR1, sig_handler);
510 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
511 opts->pipe_output = true;
513 rec->output_name = output_name = "perf.data";
516 if (!strcmp(output_name, "-"))
517 opts->pipe_output = true;
518 else if (!stat(output_name, &st) && st.st_size) {
519 if (rec->write_mode == WRITE_FORCE) {
520 char oldname[PATH_MAX];
521 snprintf(oldname, sizeof(oldname), "%s.old",
524 rename(output_name, oldname);
526 } else if (rec->write_mode == WRITE_APPEND) {
527 rec->write_mode = WRITE_FORCE;
531 flags = O_CREAT|O_RDWR;
532 if (rec->write_mode == WRITE_APPEND)
537 if (opts->pipe_output)
538 output = STDOUT_FILENO;
540 output = open(output_name, flags, S_IRUSR | S_IWUSR);
542 perror("failed to create output file");
546 rec->output = output;
548 session = perf_session__new(output_name, O_WRONLY,
549 rec->write_mode == WRITE_FORCE, false, NULL);
550 if (session == NULL) {
551 pr_err("Not enough memory for reading perf file header\n");
555 rec->session = session;
557 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
558 perf_header__set_feat(&session->header, feat);
561 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
563 if (!have_tracepoints(&evsel_list->entries))
564 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
566 if (!rec->opts.branch_stack)
567 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
569 if (!rec->file_new) {
570 err = perf_session__read_header(session, output);
572 goto out_delete_session;
576 err = perf_evlist__prepare_workload(evsel_list, opts, argv);
578 pr_err("Couldn't run the workload!\n");
579 goto out_delete_session;
583 if (perf_record__open(rec) != 0) {
585 goto out_delete_session;
589 * perf_session__delete(session) will be called at perf_record__exit()
591 on_exit(perf_record__exit, rec);
593 if (opts->pipe_output) {
594 err = perf_header__write_pipe(output);
596 goto out_delete_session;
597 } else if (rec->file_new) {
598 err = perf_session__write_header(session, evsel_list,
601 goto out_delete_session;
605 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
606 pr_err("Couldn't generate buildids. "
607 "Use --no-buildid to profile anyway.\n");
609 goto out_delete_session;
612 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
614 machine = perf_session__find_host_machine(session);
616 pr_err("Couldn't find native kernel information.\n");
618 goto out_delete_session;
621 if (opts->pipe_output) {
622 err = perf_event__synthesize_attrs(tool, session,
623 process_synthesized_event);
625 pr_err("Couldn't synthesize attrs.\n");
626 goto out_delete_session;
629 err = perf_event__synthesize_event_types(tool, process_synthesized_event,
632 pr_err("Couldn't synthesize event_types.\n");
633 goto out_delete_session;
636 if (have_tracepoints(&evsel_list->entries)) {
638 * FIXME err <= 0 here actually means that
639 * there were no tracepoints so its not really
640 * an error, just that we don't need to
641 * synthesize anything. We really have to
642 * return this more properly and also
643 * propagate errors that now are calling die()
645 err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
646 process_synthesized_event);
648 pr_err("Couldn't record tracing data.\n");
649 goto out_delete_session;
651 advance_output(rec, err);
655 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
658 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
661 pr_err("Couldn't record kernel reference relocation symbol\n"
662 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
663 "Check /proc/kallsyms permission or run as root.\n");
665 err = perf_event__synthesize_modules(tool, process_synthesized_event,
668 pr_err("Couldn't record kernel module information.\n"
669 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
670 "Check /proc/modules permission or run as root.\n");
673 perf_session__process_machines(session, tool,
674 perf_event__synthesize_guest_os);
676 if (!opts->target.system_wide)
677 err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
678 process_synthesized_event,
681 err = perf_event__synthesize_threads(tool, process_synthesized_event,
685 goto out_delete_session;
687 if (rec->realtime_prio) {
688 struct sched_param param;
690 param.sched_priority = rec->realtime_prio;
691 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
692 pr_err("Could not set realtime priority.\n");
694 goto out_delete_session;
699 * When perf is starting the traced process, all the events
700 * (apart from group members) have enable_on_exec=1 set,
701 * so don't spoil it by prematurely enabling them.
703 if (!perf_target__none(&opts->target))
704 perf_evlist__enable(evsel_list);
710 perf_evlist__start_workload(evsel_list);
713 int hits = rec->samples;
715 if (perf_record__mmap_read_all(rec) < 0) {
717 goto out_delete_session;
720 if (hits == rec->samples) {
723 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
728 * When perf is starting the traced process, at the end events
729 * die with the process and we wait for that. Thus no need to
730 * disable events in this case.
732 if (done && !disabled && !perf_target__none(&opts->target)) {
733 perf_evlist__disable(evsel_list);
738 if (quiet || signr == SIGUSR1)
741 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
744 * Approximate RIP event size: 24 bytes.
747 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
748 (double)rec->bytes_written / 1024.0 / 1024.0,
750 rec->bytes_written / 24);
755 perf_session__delete(session);
759 #define BRANCH_OPT(n, m) \
760 { .name = n, .mode = (m) }
762 #define BRANCH_END { .name = NULL }
769 static const struct branch_mode branch_modes[] = {
770 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
771 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
772 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
773 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
774 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
775 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
776 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
781 parse_branch_stack(const struct option *opt, const char *str, int unset)
784 (PERF_SAMPLE_BRANCH_USER |\
785 PERF_SAMPLE_BRANCH_KERNEL |\
786 PERF_SAMPLE_BRANCH_HV)
788 uint64_t *mode = (uint64_t *)opt->value;
789 const struct branch_mode *br;
790 char *s, *os = NULL, *p;
797 * cannot set it twice, -b + --branch-filter for instance
802 /* str may be NULL in case no arg is passed to -b */
804 /* because str is read-only */
805 s = os = strdup(str);
814 for (br = branch_modes; br->name; br++) {
815 if (!strcasecmp(s, br->name))
819 ui__warning("unknown branch filter %s,"
820 " check man page\n", s);
834 /* default to any branch */
835 if ((*mode & ~ONLY_PLM) == 0) {
836 *mode = PERF_SAMPLE_BRANCH_ANY;
843 #ifdef LIBUNWIND_SUPPORT
844 static int get_stack_size(char *str, unsigned long *_size)
848 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
850 size = strtoul(str, &endptr, 0);
856 size = round_up(size, sizeof(u64));
857 if (!size || size > max_size)
865 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
869 #endif /* LIBUNWIND_SUPPORT */
871 int record_parse_callchain_opt(const struct option *opt,
872 const char *arg, int unset)
874 struct perf_record_opts *opts = opt->value;
875 char *tok, *name, *saveptr = NULL;
879 /* --no-call-graph */
883 /* We specified default option if none is provided. */
886 /* We need buffer that we know we can write to. */
887 buf = malloc(strlen(arg) + 1);
893 tok = strtok_r((char *)buf, ",", &saveptr);
894 name = tok ? : (char *)buf;
897 /* Framepointer style */
898 if (!strncmp(name, "fp", sizeof("fp"))) {
899 if (!strtok_r(NULL, ",", &saveptr)) {
900 opts->call_graph = CALLCHAIN_FP;
903 pr_err("callchain: No more arguments "
904 "needed for -g fp\n");
907 #ifdef LIBUNWIND_SUPPORT
909 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
910 const unsigned long default_stack_dump_size = 8192;
913 opts->call_graph = CALLCHAIN_DWARF;
914 opts->stack_dump_size = default_stack_dump_size;
916 tok = strtok_r(NULL, ",", &saveptr);
918 unsigned long size = 0;
920 ret = get_stack_size(tok, &size);
921 opts->stack_dump_size = size;
925 pr_debug("callchain: stack dump size %d\n",
926 opts->stack_dump_size);
927 #endif /* LIBUNWIND_SUPPORT */
929 pr_err("callchain: Unknown -g option "
939 pr_debug("callchain: type %d\n", opts->call_graph);
944 static const char * const record_usage[] = {
945 "perf record [<options>] [<command>]",
946 "perf record [<options>] -- <command> [<options>]",
951 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
952 * because we need to have access to it in perf_record__exit, that is called
953 * after cmd_record() exits, but since record_options need to be accessible to
954 * builtin-script, leave it here.
956 * At least we don't ouch it in all the other functions here directly.
958 * Just say no to tons of global variables, sigh.
960 static struct perf_record record = {
962 .mmap_pages = UINT_MAX,
963 .user_freq = UINT_MAX,
964 .user_interval = ULLONG_MAX,
970 .write_mode = WRITE_FORCE,
974 #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
976 #ifdef LIBUNWIND_SUPPORT
977 const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
979 const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
983 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
984 * with it and switch to use the library functions in perf_evlist that came
985 * from builtin-record.c, i.e. use perf_record_opts,
986 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
989 const struct option record_options[] = {
990 OPT_CALLBACK('e', "event", &record.evlist, "event",
991 "event selector. use 'perf list' to list available events",
992 parse_events_option),
993 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
994 "event filter", parse_filter),
995 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
996 "record events on existing process id"),
997 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
998 "record events on existing thread id"),
999 OPT_INTEGER('r', "realtime", &record.realtime_prio,
1000 "collect data with this RT SCHED_FIFO priority"),
1001 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
1002 "collect data without buffering"),
1003 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1004 "collect raw sample records from all opened counters"),
1005 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1006 "system-wide collection from all CPUs"),
1007 OPT_BOOLEAN('A', "append", &record.append_file,
1008 "append to the output file to do incremental profiling"),
1009 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1010 "list of cpus to monitor"),
1011 OPT_BOOLEAN('f', "force", &record.force,
1012 "overwrite existing data file (deprecated)"),
1013 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1014 OPT_STRING('o', "output", &record.output_name, "file",
1015 "output file name"),
1016 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
1017 "child tasks do not inherit counters"),
1018 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
1019 OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
1020 "number of mmap data pages"),
1021 OPT_BOOLEAN(0, "group", &record.opts.group,
1022 "put the counters into a counter group"),
1023 OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
1024 "mode[,dump_size]", record_callchain_help,
1025 &record_parse_callchain_opt, "fp"),
1026 OPT_INCR('v', "verbose", &verbose,
1027 "be more verbose (show counter open errors, etc)"),
1028 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1029 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1030 "per thread counts"),
1031 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
1032 "Sample addresses"),
1033 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
1034 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
1035 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1037 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
1038 "do not update the buildid cache"),
1039 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
1040 "do not collect buildids in perf.data"),
1041 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1042 "monitor event in cgroup name only",
1044 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1047 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1048 "branch any", "sample any taken branches",
1049 parse_branch_stack),
1051 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1052 "branch filter mask", "branch stack filter modes",
1053 parse_branch_stack),
1057 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1060 struct perf_evsel *pos;
1061 struct perf_evlist *evsel_list;
1062 struct perf_record *rec = &record;
1063 char errbuf[BUFSIZ];
1065 evsel_list = perf_evlist__new(NULL, NULL);
1066 if (evsel_list == NULL)
1069 rec->evlist = evsel_list;
1071 argc = parse_options(argc, argv, record_options, record_usage,
1072 PARSE_OPT_STOP_AT_NON_OPTION);
1073 if (!argc && perf_target__none(&rec->opts.target))
1074 usage_with_options(record_usage, record_options);
1076 if (rec->force && rec->append_file) {
1077 ui__error("Can't overwrite and append at the same time."
1078 " You need to choose between -f and -A");
1079 usage_with_options(record_usage, record_options);
1080 } else if (rec->append_file) {
1081 rec->write_mode = WRITE_APPEND;
1083 rec->write_mode = WRITE_FORCE;
1086 if (nr_cgroups && !rec->opts.target.system_wide) {
1087 ui__error("cgroup monitoring only available in"
1088 " system-wide mode\n");
1089 usage_with_options(record_usage, record_options);
1094 if (symbol_conf.kptr_restrict)
1096 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1097 "check /proc/sys/kernel/kptr_restrict.\n\n"
1098 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1099 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1100 "Samples in kernel modules won't be resolved at all.\n\n"
1101 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1102 "even with a suitable vmlinux or kallsyms file.\n\n");
1104 if (rec->no_buildid_cache || rec->no_buildid)
1105 disable_buildid_cache();
1107 if (evsel_list->nr_entries == 0 &&
1108 perf_evlist__add_default(evsel_list) < 0) {
1109 pr_err("Not enough memory for event selector list\n");
1110 goto out_symbol_exit;
1113 err = perf_target__validate(&rec->opts.target);
1115 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1116 ui__warning("%s", errbuf);
1119 err = perf_target__parse_uid(&rec->opts.target);
1121 int saved_errno = errno;
1123 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1124 ui__error("%s", errbuf);
1131 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
1132 usage_with_options(record_usage, record_options);
1134 list_for_each_entry(pos, &evsel_list->entries, node) {
1135 if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
1139 if (rec->opts.user_interval != ULLONG_MAX)
1140 rec->opts.default_interval = rec->opts.user_interval;
1141 if (rec->opts.user_freq != UINT_MAX)
1142 rec->opts.freq = rec->opts.user_freq;
1145 * User specified count overrides default frequency.
1147 if (rec->opts.default_interval)
1149 else if (rec->opts.freq) {
1150 rec->opts.default_interval = rec->opts.freq;
1152 ui__error("frequency and count are zero, aborting\n");
1157 err = __cmd_record(&record, argc, argv);
1159 perf_evlist__delete_maps(evsel_list);