4 * Builtin regression testing command: ever growing number of sanity tests
8 #include "util/cache.h"
9 #include "util/color.h"
10 #include "util/debug.h"
11 #include "util/debugfs.h"
12 #include "util/evlist.h"
13 #include "util/machine.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
16 #include "util/symbol.h"
17 #include "util/thread_map.h"
19 #include "event-parse.h"
20 #include "../../include/linux/hw_breakpoint.h"
24 #include "util/cpumap.h"
25 #include "util/evsel.h"
26 #include <sys/types.h>
34 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
36 int i, cpu = -1, nrcpus = 1024;
40 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
41 if (errno == EINVAL && nrcpus < (1024 << 8)) {
45 perror("sched_getaffinity");
49 for (i = 0; i < nrcpus; i++) {
50 if (CPU_ISSET(i, maskp)) {
61 static int test__PERF_RECORD(void)
63 struct perf_record_opts opts = {
73 size_t cpu_mask_size = sizeof(cpu_mask);
74 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
75 struct perf_evsel *evsel;
76 struct perf_sample sample;
77 const char *cmd = "sleep";
78 const char *argv[] = { cmd, "1", NULL, };
81 bool found_cmd_mmap = false,
82 found_libc_mmap = false,
83 found_vdso_mmap = false,
84 found_ld_mmap = false;
85 int err = -1, errs = 0, i, wakeups = 0;
87 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
89 if (evlist == NULL || argv == NULL) {
90 pr_debug("Not enough memory to create evlist\n");
95 * We need at least one evsel in the evlist, use the default
98 err = perf_evlist__add_default(evlist);
100 pr_debug("Not enough memory to create evsel\n");
101 goto out_delete_evlist;
105 * Create maps of threads and cpus to monitor. In this case
106 * we start with all threads and cpus (-1, -1) but then in
107 * perf_evlist__prepare_workload we'll fill in the only thread
108 * we're monitoring, the one forked there.
110 err = perf_evlist__create_maps(evlist, &opts.target);
112 pr_debug("Not enough memory to create thread/cpu maps\n");
113 goto out_delete_evlist;
117 * Prepare the workload in argv[] to run, it'll fork it, and then wait
118 * for perf_evlist__start_workload() to exec it. This is done this way
119 * so that we have time to open the evlist (calling sys_perf_event_open
120 * on all the fds) and then mmap them.
122 err = perf_evlist__prepare_workload(evlist, &opts, argv);
124 pr_debug("Couldn't run the workload!\n");
125 goto out_delete_evlist;
129 * Config the evsels, setting attr->comm on the first one, etc.
131 evsel = perf_evlist__first(evlist);
132 evsel->attr.sample_type |= PERF_SAMPLE_CPU;
133 evsel->attr.sample_type |= PERF_SAMPLE_TID;
134 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
135 perf_evlist__config_attrs(evlist, &opts);
137 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
139 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
140 goto out_delete_evlist;
146 * So that we can check perf_sample.cpu on all the samples.
148 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
149 pr_debug("sched_setaffinity: %s\n", strerror(errno));
150 goto out_delete_evlist;
154 * Call sys_perf_event_open on all the fds on all the evsels,
155 * grouping them if asked to.
157 err = perf_evlist__open(evlist);
159 pr_debug("perf_evlist__open: %s\n", strerror(errno));
160 goto out_delete_evlist;
164 * mmap the first fd on a given CPU and ask for events for the other
165 * fds in the same CPU to be injected in the same mmap ring buffer
166 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
168 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
170 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
171 goto out_delete_evlist;
175 * Now that all is properly set up, enable the events, they will
176 * count just on workload.pid, which will start...
178 perf_evlist__enable(evlist);
183 perf_evlist__start_workload(evlist);
186 int before = total_events;
188 for (i = 0; i < evlist->nr_mmaps; i++) {
189 union perf_event *event;
191 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
192 const u32 type = event->header.type;
193 const char *name = perf_event__name(type);
196 if (type < PERF_RECORD_MAX)
199 err = perf_evlist__parse_sample(evlist, event, &sample);
202 perf_event__fprintf(event, stderr);
203 pr_debug("Couldn't parse sample\n");
208 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
209 perf_event__fprintf(event, stderr);
212 if (prev_time > sample.time) {
213 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
214 name, prev_time, sample.time);
218 prev_time = sample.time;
220 if (sample.cpu != cpu) {
221 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
222 name, cpu, sample.cpu);
226 if ((pid_t)sample.pid != evlist->workload.pid) {
227 pr_debug("%s with unexpected pid, expected %d, got %d\n",
228 name, evlist->workload.pid, sample.pid);
232 if ((pid_t)sample.tid != evlist->workload.pid) {
233 pr_debug("%s with unexpected tid, expected %d, got %d\n",
234 name, evlist->workload.pid, sample.tid);
238 if ((type == PERF_RECORD_COMM ||
239 type == PERF_RECORD_MMAP ||
240 type == PERF_RECORD_FORK ||
241 type == PERF_RECORD_EXIT) &&
242 (pid_t)event->comm.pid != evlist->workload.pid) {
243 pr_debug("%s with unexpected pid/tid\n", name);
247 if ((type == PERF_RECORD_COMM ||
248 type == PERF_RECORD_MMAP) &&
249 event->comm.pid != event->comm.tid) {
250 pr_debug("%s with different pid/tid!\n", name);
255 case PERF_RECORD_COMM:
256 if (strcmp(event->comm.comm, cmd)) {
257 pr_debug("%s with unexpected comm!\n", name);
261 case PERF_RECORD_EXIT:
263 case PERF_RECORD_MMAP:
264 bname = strrchr(event->mmap.filename, '/');
267 found_cmd_mmap = !strcmp(bname + 1, cmd);
268 if (!found_libc_mmap)
269 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
271 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
272 } else if (!found_vdso_mmap)
273 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
276 case PERF_RECORD_SAMPLE:
277 /* Just ignore samples for now */
280 pr_debug("Unexpected perf_event->header.type %d!\n",
288 * We don't use poll here because at least at 3.1 times the
289 * PERF_RECORD_{!SAMPLE} events don't honour
290 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
292 if (total_events == before && false)
293 poll(evlist->pollfd, evlist->nr_fds, -1);
297 pr_debug("No PERF_RECORD_EXIT event!\n");
303 if (nr_events[PERF_RECORD_COMM] > 1) {
304 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
308 if (nr_events[PERF_RECORD_COMM] == 0) {
309 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
313 if (!found_cmd_mmap) {
314 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
318 if (!found_libc_mmap) {
319 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
323 if (!found_ld_mmap) {
324 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
328 if (!found_vdso_mmap) {
329 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
333 perf_evlist__munmap(evlist);
335 perf_evlist__delete(evlist);
337 return (err < 0 || errs > 0) ? -1 : 0;
341 #if defined(__x86_64__) || defined(__i386__)
343 #define barrier() asm volatile("" ::: "memory")
345 static u64 rdpmc(unsigned int counter)
347 unsigned int low, high;
349 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
351 return low | ((u64)high) << 32;
354 static u64 rdtsc(void)
356 unsigned int low, high;
358 asm volatile("rdtsc" : "=a" (low), "=d" (high));
360 return low | ((u64)high) << 32;
363 static u64 mmap_read_self(void *addr)
365 struct perf_event_mmap_page *pc = addr;
366 u32 seq, idx, time_mult = 0, time_shift = 0;
367 u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
373 enabled = pc->time_enabled;
374 running = pc->time_running;
376 if (enabled != running) {
378 time_mult = pc->time_mult;
379 time_shift = pc->time_shift;
380 time_offset = pc->time_offset;
386 count += rdpmc(idx - 1);
389 } while (pc->lock != seq);
391 if (enabled != running) {
394 quot = (cyc >> time_shift);
395 rem = cyc & ((1 << time_shift) - 1);
396 delta = time_offset + quot * time_mult +
397 ((rem * time_mult) >> time_shift);
403 quot = count / running;
404 rem = count % running;
405 count = quot * enabled + (rem * enabled) / running;
412 * If the RDPMC instruction faults then signal this back to the test parent task:
414 static void segfault_handler(int sig __maybe_unused,
415 siginfo_t *info __maybe_unused,
416 void *uc __maybe_unused)
421 static int __test__rdpmc(void)
423 volatile int tmp = 0;
428 struct perf_event_attr attr = {
429 .type = PERF_TYPE_HARDWARE,
430 .config = PERF_COUNT_HW_INSTRUCTIONS,
436 sigfillset(&sa.sa_mask);
437 sa.sa_sigaction = segfault_handler;
438 sigaction(SIGSEGV, &sa, NULL);
440 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
442 pr_err("Error: sys_perf_event_open() syscall returned "
443 "with %d (%s)\n", fd, strerror(errno));
447 addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
448 if (addr == (void *)(-1)) {
449 pr_err("Error: mmap() syscall returned with (%s)\n",
454 for (n = 0; n < 6; n++) {
455 u64 stamp, now, delta;
457 stamp = mmap_read_self(addr);
459 for (i = 0; i < loops; i++)
462 now = mmap_read_self(addr);
466 pr_debug("%14d: %14Lu\n", n, (long long)delta);
471 munmap(addr, page_size);
482 static int test__rdpmc(void)
494 ret = __test__rdpmc();
499 wret = waitpid(pid, &status, 0);
500 if (wret < 0 || status)
508 static int test__perf_pmu(void)
510 return perf_pmu__test();
513 static int perf_evsel__roundtrip_cache_name_test(void)
516 int type, op, err = 0, ret = 0, i, idx;
517 struct perf_evsel *evsel;
518 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
523 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
524 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
525 /* skip invalid cache type */
526 if (!perf_evsel__is_cache_op_valid(type, op))
529 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
530 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
532 err = parse_events(evlist, name, 0);
540 evsel = perf_evlist__first(evlist);
542 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
543 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
544 /* skip invalid cache type */
545 if (!perf_evsel__is_cache_op_valid(type, op))
548 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
549 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
551 if (evsel->idx != idx)
556 if (strcmp(perf_evsel__name(evsel), name)) {
557 pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
561 evsel = perf_evsel__next(evsel);
566 perf_evlist__delete(evlist);
570 static int __perf_evsel__name_array_test(const char *names[], int nr_names)
573 struct perf_evsel *evsel;
574 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
579 for (i = 0; i < nr_names; ++i) {
580 err = parse_events(evlist, names[i], 0);
582 pr_debug("failed to parse event '%s', err %d\n",
584 goto out_delete_evlist;
589 list_for_each_entry(evsel, &evlist->entries, node) {
590 if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
592 pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
597 perf_evlist__delete(evlist);
601 #define perf_evsel__name_array_test(names) \
602 __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
604 static int perf_evsel__roundtrip_name_test(void)
606 int err = 0, ret = 0;
608 err = perf_evsel__name_array_test(perf_evsel__hw_names);
612 err = perf_evsel__name_array_test(perf_evsel__sw_names);
616 err = perf_evsel__roundtrip_cache_name_test();
623 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
624 int size, bool should_be_signed)
626 struct format_field *field = perf_evsel__field(evsel, name);
631 pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
635 is_signed = !!(field->flags | FIELD_IS_SIGNED);
636 if (should_be_signed && !is_signed) {
637 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
638 evsel->name, name, is_signed, should_be_signed);
642 if (field->size != size) {
643 pr_debug("%s: \"%s\" size (%d) should be %d!\n",
644 evsel->name, name, field->size, size);
651 static int perf_evsel__tp_sched_test(void)
653 struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
657 pr_debug("perf_evsel__new\n");
661 if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
664 if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
667 if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
670 if (perf_evsel__test_field(evsel, "prev_state", 8, true))
673 if (perf_evsel__test_field(evsel, "next_comm", 16, true))
676 if (perf_evsel__test_field(evsel, "next_pid", 4, true))
679 if (perf_evsel__test_field(evsel, "next_prio", 4, true))
682 perf_evsel__delete(evsel);
684 evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
686 if (perf_evsel__test_field(evsel, "comm", 16, true))
689 if (perf_evsel__test_field(evsel, "pid", 4, true))
692 if (perf_evsel__test_field(evsel, "prio", 4, true))
695 if (perf_evsel__test_field(evsel, "success", 4, true))
698 if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
704 static int test__syscall_open_tp_fields(void)
706 struct perf_record_opts opts = {
716 const char *filename = "/etc/passwd";
717 int flags = O_RDONLY | O_DIRECTORY;
718 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
719 struct perf_evsel *evsel;
720 int err = -1, i, nr_events = 0, nr_polls = 0;
722 if (evlist == NULL) {
723 pr_debug("%s: perf_evlist__new\n", __func__);
727 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
729 pr_debug("%s: perf_evsel__newtp\n", __func__);
730 goto out_delete_evlist;
733 perf_evlist__add(evlist, evsel);
735 err = perf_evlist__create_maps(evlist, &opts.target);
737 pr_debug("%s: perf_evlist__create_maps\n", __func__);
738 goto out_delete_evlist;
741 perf_evsel__config(evsel, &opts, evsel);
743 evlist->threads->map[0] = getpid();
745 err = perf_evlist__open(evlist);
747 pr_debug("perf_evlist__open: %s\n", strerror(errno));
748 goto out_delete_evlist;
751 err = perf_evlist__mmap(evlist, UINT_MAX, false);
753 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
754 goto out_delete_evlist;
757 perf_evlist__enable(evlist);
760 * Generate the event:
762 open(filename, flags);
765 int before = nr_events;
767 for (i = 0; i < evlist->nr_mmaps; i++) {
768 union perf_event *event;
770 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
771 const u32 type = event->header.type;
773 struct perf_sample sample;
777 if (type != PERF_RECORD_SAMPLE)
780 err = perf_evsel__parse_sample(evsel, event, &sample);
782 pr_err("Can't parse sample, err = %d\n", err);
786 tp_flags = perf_evsel__intval(evsel, &sample, "flags");
788 if (flags != tp_flags) {
789 pr_debug("%s: Expected flags=%#x, got %#x\n",
790 __func__, flags, tp_flags);
798 if (nr_events == before)
799 poll(evlist->pollfd, evlist->nr_fds, 10);
801 if (++nr_polls > 5) {
802 pr_debug("%s: no events!\n", __func__);
809 perf_evlist__munmap(evlist);
811 perf_evlist__delete(evlist);
821 .desc = "vmlinux symtab matches kallsyms",
822 .func = test__vmlinux_matches_kallsyms,
825 .desc = "detect open syscall event",
826 .func = test__open_syscall_event,
829 .desc = "detect open syscall event on all cpus",
830 .func = test__open_syscall_event_on_all_cpus,
833 .desc = "read samples using the mmap interface",
834 .func = test__basic_mmap,
837 .desc = "parse events tests",
838 .func = parse_events__test,
840 #if defined(__x86_64__) || defined(__i386__)
842 .desc = "x86 rdpmc test",
847 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
848 .func = test__PERF_RECORD,
851 .desc = "Test perf pmu format parsing",
852 .func = test__perf_pmu,
855 .desc = "Test dso data interface",
856 .func = dso__test_data,
859 .desc = "roundtrip evsel->name check",
860 .func = perf_evsel__roundtrip_name_test,
863 .desc = "Check parsing of sched tracepoints fields",
864 .func = perf_evsel__tp_sched_test,
867 .desc = "Generate and check syscalls:sys_enter_open event fields",
868 .func = test__syscall_open_tp_fields,
871 .desc = "struct perf_event_attr setup",
872 .func = test_attr__run,
879 static bool perf_test__matches(int curr, int argc, const char *argv[])
886 for (i = 0; i < argc; ++i) {
888 long nr = strtoul(argv[i], &end, 10);
896 if (strstr(tests[curr].desc, argv[i]))
903 static int __cmd_test(int argc, const char *argv[])
908 while (tests[i].func) {
909 int len = strlen(tests[i].desc);
917 while (tests[i].func) {
920 if (!perf_test__matches(curr, argc, argv))
923 pr_info("%2d: %-*s:", i, width, tests[curr].desc);
924 pr_debug("\n--- start ---\n");
925 err = tests[curr].func();
926 pr_debug("---- end ----\n%s:", tests[curr].desc);
928 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
936 static int perf_test__list(int argc, const char **argv)
940 while (tests[i].func) {
943 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
946 pr_info("%2d: %s\n", i, tests[curr].desc);
952 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
954 const char * const test_usage[] = {
955 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
958 const struct option test_options[] = {
959 OPT_INCR('v', "verbose", &verbose,
960 "be more verbose (show symbol address, etc)"),
964 argc = parse_options(argc, argv, test_options, test_usage, 0);
965 if (argc >= 1 && !strcmp(argv[0], "list"))
966 return perf_test__list(argc, argv);
968 symbol_conf.priv_size = sizeof(int);
969 symbol_conf.sort_by_name = true;
970 symbol_conf.try_vmlinux_path = true;
972 if (symbol__init() < 0)
975 return __cmd_test(argc, argv);