1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
6 * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement.
19 #include <linux/capability.h>
20 #include <linux/string.h>
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include <api/fs/tracing_path.h>
29 #include "thread_map.h"
30 #include "strfilter.h"
32 #include "util/config.h"
33 #include "util/ftrace.h"
34 #include "util/units.h"
35 #include "util/parse-sublevel-options.h"
37 #define DEFAULT_TRACER "function_graph"
39 static volatile sig_atomic_t workload_exec_errno;
40 static volatile sig_atomic_t done;
42 static void sig_handler(int sig __maybe_unused)
48 * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
49 * we asked by setting its exec_error to the function below,
50 * ftrace__workload_exec_failed_signal.
52 * XXX We need to handle this more appropriately, emitting an error, etc.
54 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
55 siginfo_t *info __maybe_unused,
56 void *ucontext __maybe_unused)
58 workload_exec_errno = info->si_value.sival_int;
62 static int __write_tracing_file(const char *name, const char *val, bool append)
66 ssize_t size = strlen(val);
71 file = get_tracing_file(name);
73 pr_debug("cannot get tracing file: %s\n", name);
82 fd = open(file, flags);
84 pr_debug("cannot open tracing file: %s: %s\n",
85 name, str_error_r(errno, errbuf, sizeof(errbuf)));
90 * Copy the original value and append a '\n'. Without this,
91 * the kernel can hide possible errors.
93 val_copy = strdup(val);
96 val_copy[size] = '\n';
98 if (write(fd, val_copy, size + 1) == size + 1)
101 pr_debug("write '%s' to tracing/%s failed: %s\n",
102 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
108 put_tracing_file(file);
112 static int write_tracing_file(const char *name, const char *val)
114 return __write_tracing_file(name, val, false);
117 static int append_tracing_file(const char *name, const char *val)
119 return __write_tracing_file(name, val, true);
122 static int read_tracing_file_to_stdout(const char *name)
129 file = get_tracing_file(name);
131 pr_debug("cannot get tracing file: %s\n", name);
135 fd = open(file, O_RDONLY);
137 pr_debug("cannot open tracing file: %s: %s\n",
138 name, str_error_r(errno, buf, sizeof(buf)));
142 /* read contents to stdout */
144 int n = read(fd, buf, sizeof(buf));
150 if (fwrite(buf, n, 1, stdout) != 1)
158 put_tracing_file(file);
162 static int read_tracing_file_by_line(const char *name,
163 void (*cb)(char *str, void *arg),
171 file = get_tracing_file(name);
173 pr_debug("cannot get tracing file: %s\n", name);
177 fp = fopen(file, "r");
179 pr_debug("cannot open tracing file: %s\n", name);
180 put_tracing_file(file);
184 while (getline(&line, &len, fp) != -1) {
192 put_tracing_file(file);
196 static int write_tracing_file_int(const char *name, int value)
200 snprintf(buf, sizeof(buf), "%d", value);
201 if (write_tracing_file(name, buf) < 0)
207 static int write_tracing_option_file(const char *name, const char *val)
212 if (asprintf(&file, "options/%s", name) < 0)
215 ret = __write_tracing_file(file, val, false);
220 static int reset_tracing_cpu(void);
221 static void reset_tracing_filters(void);
223 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
225 write_tracing_option_file("function-fork", "0");
226 write_tracing_option_file("func_stack_trace", "0");
227 write_tracing_option_file("sleep-time", "1");
228 write_tracing_option_file("funcgraph-irqs", "1");
229 write_tracing_option_file("funcgraph-proc", "0");
230 write_tracing_option_file("funcgraph-abstime", "0");
231 write_tracing_option_file("latency-format", "0");
232 write_tracing_option_file("irq-info", "0");
235 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
237 if (write_tracing_file("tracing_on", "0") < 0)
240 if (write_tracing_file("current_tracer", "nop") < 0)
243 if (write_tracing_file("set_ftrace_pid", " ") < 0)
246 if (reset_tracing_cpu() < 0)
249 if (write_tracing_file("max_graph_depth", "0") < 0)
252 if (write_tracing_file("tracing_thresh", "0") < 0)
255 reset_tracing_filters();
256 reset_tracing_options(ftrace);
260 static int set_tracing_pid(struct perf_ftrace *ftrace)
265 if (target__has_cpu(&ftrace->target))
268 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
269 scnprintf(buf, sizeof(buf), "%d",
270 perf_thread_map__pid(ftrace->evlist->core.threads, i));
271 if (append_tracing_file("set_ftrace_pid", buf) < 0)
277 static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
284 last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
285 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
286 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
288 cpumask = malloc(mask_size);
289 if (cpumask == NULL) {
290 pr_debug("failed to allocate cpu mask\n");
294 cpu_map__snprint_mask(cpumap, cpumask, mask_size);
296 ret = write_tracing_file("tracing_cpumask", cpumask);
302 static int set_tracing_cpu(struct perf_ftrace *ftrace)
304 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
306 if (!target__has_cpu(&ftrace->target))
309 return set_tracing_cpumask(cpumap);
312 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
314 if (!ftrace->func_stack_trace)
317 if (write_tracing_option_file("func_stack_trace", "1") < 0)
323 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
325 if (!ftrace->func_irq_info)
328 if (write_tracing_option_file("irq-info", "1") < 0)
334 static int reset_tracing_cpu(void)
336 struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
339 ret = set_tracing_cpumask(cpumap);
340 perf_cpu_map__put(cpumap);
344 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
346 struct filter_entry *pos;
348 list_for_each_entry(pos, funcs, list) {
349 if (append_tracing_file(filter_file, pos->name) < 0)
356 static int set_tracing_filters(struct perf_ftrace *ftrace)
360 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
364 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
368 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
372 /* old kernels do not have this filter */
373 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
378 static void reset_tracing_filters(void)
380 write_tracing_file("set_ftrace_filter", " ");
381 write_tracing_file("set_ftrace_notrace", " ");
382 write_tracing_file("set_graph_function", " ");
383 write_tracing_file("set_graph_notrace", " ");
386 static int set_tracing_depth(struct perf_ftrace *ftrace)
388 if (ftrace->graph_depth == 0)
391 if (ftrace->graph_depth < 0) {
392 pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
396 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
402 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
406 if (ftrace->percpu_buffer_size == 0)
409 ret = write_tracing_file_int("buffer_size_kb",
410 ftrace->percpu_buffer_size / 1024);
417 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
419 if (!ftrace->inherit)
422 if (write_tracing_option_file("function-fork", "1") < 0)
428 static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
430 if (!ftrace->graph_nosleep_time)
433 if (write_tracing_option_file("sleep-time", "0") < 0)
439 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
441 if (!ftrace->graph_noirqs)
444 if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
450 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
452 if (!ftrace->graph_verbose)
455 if (write_tracing_option_file("funcgraph-proc", "1") < 0)
458 if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
461 if (write_tracing_option_file("latency-format", "1") < 0)
467 static int set_tracing_thresh(struct perf_ftrace *ftrace)
471 if (ftrace->graph_thresh == 0)
474 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
481 static int set_tracing_options(struct perf_ftrace *ftrace)
483 if (set_tracing_pid(ftrace) < 0) {
484 pr_err("failed to set ftrace pid\n");
488 if (set_tracing_cpu(ftrace) < 0) {
489 pr_err("failed to set tracing cpumask\n");
493 if (set_tracing_func_stack_trace(ftrace) < 0) {
494 pr_err("failed to set tracing option func_stack_trace\n");
498 if (set_tracing_func_irqinfo(ftrace) < 0) {
499 pr_err("failed to set tracing option irq-info\n");
503 if (set_tracing_filters(ftrace) < 0) {
504 pr_err("failed to set tracing filters\n");
508 if (set_tracing_depth(ftrace) < 0) {
509 pr_err("failed to set graph depth\n");
513 if (set_tracing_percpu_buffer_size(ftrace) < 0) {
514 pr_err("failed to set tracing per-cpu buffer size\n");
518 if (set_tracing_trace_inherit(ftrace) < 0) {
519 pr_err("failed to set tracing option function-fork\n");
523 if (set_tracing_sleep_time(ftrace) < 0) {
524 pr_err("failed to set tracing option sleep-time\n");
528 if (set_tracing_funcgraph_irqs(ftrace) < 0) {
529 pr_err("failed to set tracing option funcgraph-irqs\n");
533 if (set_tracing_funcgraph_verbose(ftrace) < 0) {
534 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
538 if (set_tracing_thresh(ftrace) < 0) {
539 pr_err("failed to set tracing thresh\n");
546 static void select_tracer(struct perf_ftrace *ftrace)
548 bool graph = !list_empty(&ftrace->graph_funcs) ||
549 !list_empty(&ftrace->nograph_funcs);
550 bool func = !list_empty(&ftrace->filters) ||
551 !list_empty(&ftrace->notrace);
553 /* The function_graph has priority over function tracer. */
555 ftrace->tracer = "function_graph";
557 ftrace->tracer = "function";
558 /* Otherwise, the default tracer is used. */
560 pr_debug("%s tracer is used\n", ftrace->tracer);
563 static int __cmd_ftrace(struct perf_ftrace *ftrace)
568 struct pollfd pollfd = {
572 if (!(perf_cap__capable(CAP_PERFMON) ||
573 perf_cap__capable(CAP_SYS_ADMIN))) {
574 pr_err("ftrace only works for %s!\n",
575 #ifdef HAVE_LIBCAP_SUPPORT
576 "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
584 select_tracer(ftrace);
586 if (reset_tracing_files(ftrace) < 0) {
587 pr_err("failed to reset ftrace\n");
591 /* reset ftrace buffer */
592 if (write_tracing_file("trace", "0") < 0)
595 if (set_tracing_options(ftrace) < 0)
598 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
599 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
605 trace_file = get_tracing_file("trace_pipe");
607 pr_err("failed to open trace_pipe\n");
611 trace_fd = open(trace_file, O_RDONLY);
613 put_tracing_file(trace_file);
616 pr_err("failed to open trace_pipe\n");
620 fcntl(trace_fd, F_SETFL, O_NONBLOCK);
621 pollfd.fd = trace_fd;
623 /* display column headers */
624 read_tracing_file_to_stdout("trace");
626 if (!ftrace->target.initial_delay) {
627 if (write_tracing_file("tracing_on", "1") < 0) {
628 pr_err("can't enable tracing\n");
633 evlist__start_workload(ftrace->evlist);
635 if (ftrace->target.initial_delay > 0) {
636 usleep(ftrace->target.initial_delay * 1000);
637 if (write_tracing_file("tracing_on", "1") < 0) {
638 pr_err("can't enable tracing\n");
644 if (poll(&pollfd, 1, -1) < 0)
647 if (pollfd.revents & POLLIN) {
648 int n = read(trace_fd, buf, sizeof(buf));
651 if (fwrite(buf, n, 1, stdout) != 1)
653 /* flush output since stdout is in full buffering mode due to pager */
658 write_tracing_file("tracing_on", "0");
660 if (workload_exec_errno) {
661 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
662 /* flush stdout first so below error msg appears at the end. */
664 pr_err("workload failed: %s\n", emsg);
668 /* read remaining buffer contents */
670 int n = read(trace_fd, buf, sizeof(buf));
673 if (fwrite(buf, n, 1, stdout) != 1)
680 reset_tracing_files(ftrace);
682 return (done && !workload_exec_errno) ? 0 : -1;
685 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf,
693 /* ensure NUL termination */
696 /* handle data line by line */
697 for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
699 /* move it to the line buffer */
703 * parse trace output to get function duration like in
705 * # tracer: function_graph
707 * # CPU DURATION FUNCTION CALLS
709 * 1) + 10.291 us | do_filp_open();
710 * 1) 4.889 us | do_filp_open();
711 * 1) 6.086 us | do_filp_open();
714 if (linebuf[0] == '#')
718 p = strchr(linebuf, ')');
722 while (*p && !isdigit(*p) && (*p != '|'))
726 if (*p == '\0' || *p == '|')
729 num = strtod(p, &unit);
730 if (!unit || strncmp(unit, " us", 3))
745 /* empty the line buffer for the next output */
749 /* preserve any remaining output (before newline) */
753 static void display_histogram(int buckets[], bool use_nsec)
757 int bar_total = 46; /* to fit in 80 column */
758 char bar[] = "###############################################";
761 for (i = 0; i < NUM_BUCKET; i++)
765 printf("No data found\n");
769 printf("# %14s | %10s | %-*s |\n",
770 " DURATION ", "COUNT", bar_total, "GRAPH");
772 bar_len = buckets[0] * bar_total / total;
773 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
774 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
776 for (i = 1; i < NUM_BUCKET - 1; i++) {
777 int start = (1 << (i - 1));
779 const char *unit = use_nsec ? "ns" : "us";
784 unit = use_nsec ? "us" : "ms";
786 bar_len = buckets[i] * bar_total / total;
787 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
788 start, stop, unit, buckets[i], bar_len, bar,
789 bar_total - bar_len, "");
792 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total;
793 printf(" %4d - %-4s %s | %10d | %.*s%*s |\n",
794 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1],
795 bar_len, bar, bar_total - bar_len, "");
799 static int prepare_func_latency(struct perf_ftrace *ftrace)
804 if (ftrace->target.use_bpf)
805 return perf_ftrace__latency_prepare_bpf(ftrace);
807 if (reset_tracing_files(ftrace) < 0) {
808 pr_err("failed to reset ftrace\n");
812 /* reset ftrace buffer */
813 if (write_tracing_file("trace", "0") < 0)
816 if (set_tracing_options(ftrace) < 0)
819 /* force to use the function_graph tracer to track duration */
820 if (write_tracing_file("current_tracer", "function_graph") < 0) {
821 pr_err("failed to set current_tracer to function_graph\n");
825 trace_file = get_tracing_file("trace_pipe");
827 pr_err("failed to open trace_pipe\n");
831 fd = open(trace_file, O_RDONLY);
833 pr_err("failed to open trace_pipe\n");
835 put_tracing_file(trace_file);
839 static int start_func_latency(struct perf_ftrace *ftrace)
841 if (ftrace->target.use_bpf)
842 return perf_ftrace__latency_start_bpf(ftrace);
844 if (write_tracing_file("tracing_on", "1") < 0) {
845 pr_err("can't enable tracing\n");
852 static int stop_func_latency(struct perf_ftrace *ftrace)
854 if (ftrace->target.use_bpf)
855 return perf_ftrace__latency_stop_bpf(ftrace);
857 write_tracing_file("tracing_on", "0");
861 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
863 if (ftrace->target.use_bpf)
864 return perf_ftrace__latency_read_bpf(ftrace, buckets);
869 static int cleanup_func_latency(struct perf_ftrace *ftrace)
871 if (ftrace->target.use_bpf)
872 return perf_ftrace__latency_cleanup_bpf(ftrace);
874 reset_tracing_files(ftrace);
878 static int __cmd_latency(struct perf_ftrace *ftrace)
883 struct pollfd pollfd = {
886 int buckets[NUM_BUCKET] = { };
888 if (!(perf_cap__capable(CAP_PERFMON) ||
889 perf_cap__capable(CAP_SYS_ADMIN))) {
890 pr_err("ftrace only works for %s!\n",
891 #ifdef HAVE_LIBCAP_SUPPORT
892 "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
900 trace_fd = prepare_func_latency(ftrace);
904 fcntl(trace_fd, F_SETFL, O_NONBLOCK);
905 pollfd.fd = trace_fd;
907 if (start_func_latency(ftrace) < 0)
910 evlist__start_workload(ftrace->evlist);
914 if (poll(&pollfd, 1, -1) < 0)
917 if (pollfd.revents & POLLIN) {
918 int n = read(trace_fd, buf, sizeof(buf) - 1);
922 make_histogram(buckets, buf, n, line, ftrace->use_nsec);
926 stop_func_latency(ftrace);
928 if (workload_exec_errno) {
929 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
930 pr_err("workload failed: %s\n", emsg);
934 /* read remaining buffer contents */
935 while (!ftrace->target.use_bpf) {
936 int n = read(trace_fd, buf, sizeof(buf) - 1);
939 make_histogram(buckets, buf, n, line, ftrace->use_nsec);
942 read_func_latency(ftrace, buckets);
944 display_histogram(buckets, ftrace->use_nsec);
948 cleanup_func_latency(ftrace);
950 return (done && !workload_exec_errno) ? 0 : -1;
953 static int perf_ftrace_config(const char *var, const char *value, void *cb)
955 struct perf_ftrace *ftrace = cb;
957 if (!strstarts(var, "ftrace."))
960 if (strcmp(var, "ftrace.tracer"))
963 if (!strcmp(value, "function_graph") ||
964 !strcmp(value, "function")) {
965 ftrace->tracer = value;
969 pr_err("Please select \"function_graph\" (default) or \"function\"\n");
973 static void list_function_cb(char *str, void *arg)
975 struct strfilter *filter = (struct strfilter *)arg;
977 if (strfilter__compare(filter, str))
981 static int opt_list_avail_functions(const struct option *opt __maybe_unused,
982 const char *str, int unset)
984 struct strfilter *filter;
985 const char *err = NULL;
991 filter = strfilter__new(str, &err);
993 return err ? -EINVAL : -ENOMEM;
995 ret = strfilter__or(filter, str, &err);
996 if (ret == -EINVAL) {
997 pr_err("Filter parse error at %td.\n", err - str + 1);
998 pr_err("Source: \"%s\"\n", str);
999 pr_err(" %*c\n", (int)(err - str + 1), '^');
1000 strfilter__delete(filter);
1004 ret = read_tracing_file_by_line("available_filter_functions",
1005 list_function_cb, filter);
1006 strfilter__delete(filter);
1013 static int parse_filter_func(const struct option *opt, const char *str,
1014 int unset __maybe_unused)
1016 struct list_head *head = opt->value;
1017 struct filter_entry *entry;
1019 entry = malloc(sizeof(*entry) + strlen(str) + 1);
1023 strcpy(entry->name, str);
1024 list_add_tail(&entry->list, head);
1029 static void delete_filter_func(struct list_head *head)
1031 struct filter_entry *pos, *tmp;
1033 list_for_each_entry_safe(pos, tmp, head, list) {
1034 list_del_init(&pos->list);
1039 static int parse_buffer_size(const struct option *opt,
1040 const char *str, int unset)
1042 unsigned long *s = (unsigned long *)opt->value;
1043 static struct parse_tag tags_size[] = {
1044 { .tag = 'B', .mult = 1 },
1045 { .tag = 'K', .mult = 1 << 10 },
1046 { .tag = 'M', .mult = 1 << 20 },
1047 { .tag = 'G', .mult = 1 << 30 },
1057 val = parse_tag_value(str, tags_size);
1058 if (val != (unsigned long) -1) {
1060 pr_err("buffer size too small, must larger than 1KB.");
1070 static int parse_func_tracer_opts(const struct option *opt,
1071 const char *str, int unset)
1074 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1075 struct sublevel_option func_tracer_opts[] = {
1076 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
1077 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
1084 ret = perf_parse_sublevel_options(str, func_tracer_opts);
1091 static int parse_graph_tracer_opts(const struct option *opt,
1092 const char *str, int unset)
1095 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1096 struct sublevel_option graph_tracer_opts[] = {
1097 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
1098 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
1099 { .name = "verbose", .value_ptr = &ftrace->graph_verbose },
1100 { .name = "thresh", .value_ptr = &ftrace->graph_thresh },
1101 { .name = "depth", .value_ptr = &ftrace->graph_depth },
1108 ret = perf_parse_sublevel_options(str, graph_tracer_opts);
1115 enum perf_ftrace_subcommand {
1118 PERF_FTRACE_LATENCY,
1121 int cmd_ftrace(int argc, const char **argv)
1124 int (*cmd_func)(struct perf_ftrace *) = NULL;
1125 struct perf_ftrace ftrace = {
1126 .tracer = DEFAULT_TRACER,
1127 .target = { .uid = UINT_MAX, },
1129 const struct option common_options[] = {
1130 OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
1131 "Trace on existing process id"),
1132 /* TODO: Add short option -t after -t/--tracer can be removed. */
1133 OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
1134 "Trace on existing thread id (exclusive to --pid)"),
1135 OPT_INCR('v', "verbose", &verbose,
1137 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
1138 "System-wide collection from all CPUs"),
1139 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
1140 "List of cpus to monitor"),
1143 const struct option ftrace_options[] = {
1144 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
1145 "Tracer to use: function_graph(default) or function"),
1146 OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
1147 "Show available functions to filter",
1148 opt_list_avail_functions, "*"),
1149 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1150 "Trace given functions using function tracer",
1152 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
1153 "Do not trace given functions", parse_filter_func),
1154 OPT_CALLBACK(0, "func-opts", &ftrace, "options",
1155 "Function tracer options, available options: call-graph,irq-info",
1156 parse_func_tracer_opts),
1157 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
1158 "Trace given functions using function_graph tracer",
1160 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
1161 "Set nograph filter on given functions", parse_filter_func),
1162 OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
1163 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
1164 parse_graph_tracer_opts),
1165 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
1166 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
1167 OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
1168 "Trace children processes"),
1169 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay,
1170 "Number of milliseconds to wait before starting tracing after program start"),
1171 OPT_PARENT(common_options),
1173 const struct option latency_options[] = {
1174 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1175 "Show latency of given function", parse_filter_func),
1176 #ifdef HAVE_BPF_SKEL
1177 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
1178 "Use BPF to measure function latency"),
1180 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
1181 "Use nano-second histogram"),
1182 OPT_PARENT(common_options),
1184 const struct option *options = ftrace_options;
1186 const char * const ftrace_usage[] = {
1187 "perf ftrace [<options>] [<command>]",
1188 "perf ftrace [<options>] -- [<command>] [<options>]",
1189 "perf ftrace {trace|latency} [<options>] [<command>]",
1190 "perf ftrace {trace|latency} [<options>] -- [<command>] [<options>]",
1193 enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE;
1195 INIT_LIST_HEAD(&ftrace.filters);
1196 INIT_LIST_HEAD(&ftrace.notrace);
1197 INIT_LIST_HEAD(&ftrace.graph_funcs);
1198 INIT_LIST_HEAD(&ftrace.nograph_funcs);
1200 signal(SIGINT, sig_handler);
1201 signal(SIGUSR1, sig_handler);
1202 signal(SIGCHLD, sig_handler);
1203 signal(SIGPIPE, sig_handler);
1205 ret = perf_config(perf_ftrace_config, &ftrace);
1210 if (!strcmp(argv[1], "trace")) {
1211 subcmd = PERF_FTRACE_TRACE;
1212 } else if (!strcmp(argv[1], "latency")) {
1213 subcmd = PERF_FTRACE_LATENCY;
1214 options = latency_options;
1217 if (subcmd != PERF_FTRACE_NONE) {
1222 /* for backward compatibility */
1223 if (subcmd == PERF_FTRACE_NONE)
1224 subcmd = PERF_FTRACE_TRACE;
1226 argc = parse_options(argc, argv, options, ftrace_usage,
1227 PARSE_OPT_STOP_AT_NON_OPTION);
1230 goto out_delete_filters;
1233 /* Make system wide (-a) the default target. */
1234 if (!argc && target__none(&ftrace.target))
1235 ftrace.target.system_wide = true;
1238 case PERF_FTRACE_TRACE:
1239 cmd_func = __cmd_ftrace;
1241 case PERF_FTRACE_LATENCY:
1242 if (list_empty(&ftrace.filters)) {
1243 pr_err("Should provide a function to measure\n");
1244 parse_options_usage(ftrace_usage, options, "T", 1);
1246 goto out_delete_filters;
1248 cmd_func = __cmd_latency;
1250 case PERF_FTRACE_NONE:
1252 pr_err("Invalid subcommand\n");
1254 goto out_delete_filters;
1257 ret = target__validate(&ftrace.target);
1261 target__strerror(&ftrace.target, ret, errbuf, 512);
1262 pr_err("%s\n", errbuf);
1263 goto out_delete_filters;
1266 ftrace.evlist = evlist__new();
1267 if (ftrace.evlist == NULL) {
1269 goto out_delete_filters;
1272 ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
1274 goto out_delete_evlist;
1277 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target,
1279 ftrace__workload_exec_failed_signal);
1281 goto out_delete_evlist;
1284 ret = cmd_func(&ftrace);
1287 evlist__delete(ftrace.evlist);
1290 delete_filter_func(&ftrace.filters);
1291 delete_filter_func(&ftrace.notrace);
1292 delete_filter_func(&ftrace.graph_funcs);
1293 delete_filter_func(&ftrace.nograph_funcs);