2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1;
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, cpus, threads);
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
55 struct perf_evsel *evsel;
57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true;
60 list_for_each_entry(evsel, &evlist->entries, node) {
61 perf_evsel__config(evsel, opts);
63 if (evlist->nr_entries > 1)
64 evsel->attr.sample_type |= PERF_SAMPLE_ID;
68 static void perf_evlist__purge(struct perf_evlist *evlist)
70 struct perf_evsel *pos, *n;
72 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
73 list_del_init(&pos->node);
74 perf_evsel__delete(pos);
77 evlist->nr_entries = 0;
80 void perf_evlist__exit(struct perf_evlist *evlist)
85 evlist->pollfd = NULL;
88 void perf_evlist__delete(struct perf_evlist *evlist)
90 perf_evlist__purge(evlist);
91 perf_evlist__exit(evlist);
95 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
97 list_add_tail(&entry->node, &evlist->entries);
101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
105 list_splice_tail(list, &evlist->entries);
106 evlist->nr_entries += nr_entries;
109 void __perf_evlist__set_leader(struct list_head *list)
111 struct perf_evsel *evsel, *leader;
113 leader = list_entry(list->next, struct perf_evsel, node);
114 leader->leader = NULL;
116 list_for_each_entry(evsel, list, node) {
118 evsel->leader = leader;
122 void perf_evlist__set_leader(struct perf_evlist *evlist)
124 if (evlist->nr_entries)
125 __perf_evlist__set_leader(&evlist->entries);
128 int perf_evlist__add_default(struct perf_evlist *evlist)
130 struct perf_event_attr attr = {
131 .type = PERF_TYPE_HARDWARE,
132 .config = PERF_COUNT_HW_CPU_CYCLES,
134 struct perf_evsel *evsel;
136 event_attr_init(&attr);
138 evsel = perf_evsel__new(&attr, 0);
142 /* use strdup() because free(evsel) assumes name is allocated */
143 evsel->name = strdup("cycles");
147 perf_evlist__add(evlist, evsel);
150 perf_evsel__delete(evsel);
155 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
156 struct perf_event_attr *attrs, size_t nr_attrs)
158 struct perf_evsel *evsel, *n;
162 for (i = 0; i < nr_attrs; i++) {
163 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
165 goto out_delete_partial_list;
166 list_add_tail(&evsel->node, &head);
169 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
173 out_delete_partial_list:
174 list_for_each_entry_safe(evsel, n, &head, node)
175 perf_evsel__delete(evsel);
179 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
180 struct perf_event_attr *attrs, size_t nr_attrs)
184 for (i = 0; i < nr_attrs; i++)
185 event_attr_init(attrs + i);
187 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
191 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
193 struct perf_evsel *evsel;
195 list_for_each_entry(evsel, &evlist->entries, node) {
196 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
197 (int)evsel->attr.config == id)
204 int perf_evlist__add_newtp(struct perf_evlist *evlist,
205 const char *sys, const char *name, void *handler)
207 struct perf_evsel *evsel;
209 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
213 evsel->handler.func = handler;
214 perf_evlist__add(evlist, evsel);
218 void perf_evlist__disable(struct perf_evlist *evlist)
221 struct perf_evsel *pos;
223 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
224 list_for_each_entry(pos, &evlist->entries, node) {
225 if (perf_evsel__is_group_member(pos))
227 for (thread = 0; thread < evlist->threads->nr; thread++)
228 ioctl(FD(pos, cpu, thread),
229 PERF_EVENT_IOC_DISABLE, 0);
234 void perf_evlist__enable(struct perf_evlist *evlist)
237 struct perf_evsel *pos;
239 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
240 list_for_each_entry(pos, &evlist->entries, node) {
241 if (perf_evsel__is_group_member(pos))
243 for (thread = 0; thread < evlist->threads->nr; thread++)
244 ioctl(FD(pos, cpu, thread),
245 PERF_EVENT_IOC_ENABLE, 0);
250 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
252 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
253 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
254 return evlist->pollfd != NULL ? 0 : -ENOMEM;
257 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
259 fcntl(fd, F_SETFL, O_NONBLOCK);
260 evlist->pollfd[evlist->nr_fds].fd = fd;
261 evlist->pollfd[evlist->nr_fds].events = POLLIN;
265 static void perf_evlist__id_hash(struct perf_evlist *evlist,
266 struct perf_evsel *evsel,
267 int cpu, int thread, u64 id)
270 struct perf_sample_id *sid = SID(evsel, cpu, thread);
274 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
275 hlist_add_head(&sid->node, &evlist->heads[hash]);
278 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
279 int cpu, int thread, u64 id)
281 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
282 evsel->id[evsel->ids++] = id;
285 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
286 struct perf_evsel *evsel,
287 int cpu, int thread, int fd)
289 u64 read_data[4] = { 0, };
290 int id_idx = 1; /* The first entry is the counter value */
292 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
293 read(fd, &read_data, sizeof(read_data)) == -1)
296 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
298 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
301 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
305 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
307 struct hlist_head *head;
308 struct hlist_node *pos;
309 struct perf_sample_id *sid;
312 if (evlist->nr_entries == 1)
313 return perf_evlist__first(evlist);
315 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
316 head = &evlist->heads[hash];
318 hlist_for_each_entry(sid, pos, head, node)
322 if (!perf_evlist__sample_id_all(evlist))
323 return perf_evlist__first(evlist);
328 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
330 struct perf_mmap *md = &evlist->mmap[idx];
331 unsigned int head = perf_mmap__read_head(md);
332 unsigned int old = md->prev;
333 unsigned char *data = md->base + page_size;
334 union perf_event *event = NULL;
336 if (evlist->overwrite) {
338 * If we're further behind than half the buffer, there's a chance
339 * the writer will bite our tail and mess up the samples under us.
341 * If we somehow ended up ahead of the head, we got messed up.
343 * In either case, truncate and restart at head.
345 int diff = head - old;
346 if (diff > md->mask / 2 || diff < 0) {
347 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
350 * head points to a known good entry, start there.
359 event = (union perf_event *)&data[old & md->mask];
360 size = event->header.size;
363 * Event straddles the mmap boundary -- header should always
364 * be inside due to u64 alignment of output.
366 if ((old & md->mask) + size != ((old + size) & md->mask)) {
367 unsigned int offset = old;
368 unsigned int len = min(sizeof(*event), size), cpy;
369 void *dst = &evlist->event_copy;
372 cpy = min(md->mask + 1 - (offset & md->mask), len);
373 memcpy(dst, &data[offset & md->mask], cpy);
379 event = &evlist->event_copy;
387 if (!evlist->overwrite)
388 perf_mmap__write_tail(md, old);
393 void perf_evlist__munmap(struct perf_evlist *evlist)
397 for (i = 0; i < evlist->nr_mmaps; i++) {
398 if (evlist->mmap[i].base != NULL) {
399 munmap(evlist->mmap[i].base, evlist->mmap_len);
400 evlist->mmap[i].base = NULL;
408 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
410 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
411 if (cpu_map__all(evlist->cpus))
412 evlist->nr_mmaps = evlist->threads->nr;
413 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
414 return evlist->mmap != NULL ? 0 : -ENOMEM;
417 static int __perf_evlist__mmap(struct perf_evlist *evlist,
418 int idx, int prot, int mask, int fd)
420 evlist->mmap[idx].prev = 0;
421 evlist->mmap[idx].mask = mask;
422 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
424 if (evlist->mmap[idx].base == MAP_FAILED) {
425 evlist->mmap[idx].base = NULL;
429 perf_evlist__add_pollfd(evlist, fd);
433 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
435 struct perf_evsel *evsel;
438 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
441 for (thread = 0; thread < evlist->threads->nr; thread++) {
442 list_for_each_entry(evsel, &evlist->entries, node) {
443 int fd = FD(evsel, cpu, thread);
447 if (__perf_evlist__mmap(evlist, cpu,
448 prot, mask, output) < 0)
451 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
455 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
456 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
465 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
466 if (evlist->mmap[cpu].base != NULL) {
467 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
468 evlist->mmap[cpu].base = NULL;
474 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
476 struct perf_evsel *evsel;
479 for (thread = 0; thread < evlist->threads->nr; thread++) {
482 list_for_each_entry(evsel, &evlist->entries, node) {
483 int fd = FD(evsel, 0, thread);
487 if (__perf_evlist__mmap(evlist, thread,
488 prot, mask, output) < 0)
491 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
495 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
496 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
504 for (thread = 0; thread < evlist->threads->nr; thread++) {
505 if (evlist->mmap[thread].base != NULL) {
506 munmap(evlist->mmap[thread].base, evlist->mmap_len);
507 evlist->mmap[thread].base = NULL;
513 /** perf_evlist__mmap - Create per cpu maps to receive events
515 * @evlist - list of events
516 * @pages - map length in pages
517 * @overwrite - overwrite older events?
519 * If overwrite is false the user needs to signal event consuption using:
521 * struct perf_mmap *m = &evlist->mmap[cpu];
522 * unsigned int head = perf_mmap__read_head(m);
524 * perf_mmap__write_tail(m, head)
526 * Using perf_evlist__read_on_cpu does this automatically.
528 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
531 struct perf_evsel *evsel;
532 const struct cpu_map *cpus = evlist->cpus;
533 const struct thread_map *threads = evlist->threads;
534 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
536 /* 512 kiB: default amount of unprivileged mlocked memory */
537 if (pages == UINT_MAX)
538 pages = (512 * 1024) / page_size;
539 else if (!is_power_of_2(pages))
542 mask = pages * page_size - 1;
544 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
547 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
550 evlist->overwrite = overwrite;
551 evlist->mmap_len = (pages + 1) * page_size;
553 list_for_each_entry(evsel, &evlist->entries, node) {
554 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
555 evsel->sample_id == NULL &&
556 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
560 if (cpu_map__all(cpus))
561 return perf_evlist__mmap_per_thread(evlist, prot, mask);
563 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
566 int perf_evlist__create_maps(struct perf_evlist *evlist,
567 struct perf_target *target)
569 evlist->threads = thread_map__new_str(target->pid, target->tid,
572 if (evlist->threads == NULL)
575 if (perf_target__has_task(target))
576 evlist->cpus = cpu_map__dummy_new();
577 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
578 evlist->cpus = cpu_map__dummy_new();
580 evlist->cpus = cpu_map__new(target->cpu_list);
582 if (evlist->cpus == NULL)
583 goto out_delete_threads;
588 thread_map__delete(evlist->threads);
592 void perf_evlist__delete_maps(struct perf_evlist *evlist)
594 cpu_map__delete(evlist->cpus);
595 thread_map__delete(evlist->threads);
597 evlist->threads = NULL;
600 int perf_evlist__apply_filters(struct perf_evlist *evlist)
602 struct perf_evsel *evsel;
604 const int ncpus = cpu_map__nr(evlist->cpus),
605 nthreads = evlist->threads->nr;
607 list_for_each_entry(evsel, &evlist->entries, node) {
608 if (evsel->filter == NULL)
611 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
619 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
621 struct perf_evsel *evsel;
623 const int ncpus = cpu_map__nr(evlist->cpus),
624 nthreads = evlist->threads->nr;
626 list_for_each_entry(evsel, &evlist->entries, node) {
627 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
635 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
637 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
639 list_for_each_entry_continue(pos, &evlist->entries, node) {
640 if (first->attr.sample_type != pos->attr.sample_type)
647 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
649 struct perf_evsel *first = perf_evlist__first(evlist);
650 return first->attr.sample_type;
653 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
655 struct perf_evsel *first = perf_evlist__first(evlist);
656 struct perf_sample *data;
660 if (!first->attr.sample_id_all)
663 sample_type = first->attr.sample_type;
665 if (sample_type & PERF_SAMPLE_TID)
666 size += sizeof(data->tid) * 2;
668 if (sample_type & PERF_SAMPLE_TIME)
669 size += sizeof(data->time);
671 if (sample_type & PERF_SAMPLE_ID)
672 size += sizeof(data->id);
674 if (sample_type & PERF_SAMPLE_STREAM_ID)
675 size += sizeof(data->stream_id);
677 if (sample_type & PERF_SAMPLE_CPU)
678 size += sizeof(data->cpu) * 2;
683 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
685 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
687 list_for_each_entry_continue(pos, &evlist->entries, node) {
688 if (first->attr.sample_id_all != pos->attr.sample_id_all)
695 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
697 struct perf_evsel *first = perf_evlist__first(evlist);
698 return first->attr.sample_id_all;
701 void perf_evlist__set_selected(struct perf_evlist *evlist,
702 struct perf_evsel *evsel)
704 evlist->selected = evsel;
707 int perf_evlist__open(struct perf_evlist *evlist)
709 struct perf_evsel *evsel;
710 int err, ncpus, nthreads;
712 list_for_each_entry(evsel, &evlist->entries, node) {
713 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
720 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
721 nthreads = evlist->threads ? evlist->threads->nr : 1;
723 list_for_each_entry_reverse(evsel, &evlist->entries, node)
724 perf_evsel__close(evsel, ncpus, nthreads);
730 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
731 struct perf_record_opts *opts,
734 int child_ready_pipe[2], go_pipe[2];
737 if (pipe(child_ready_pipe) < 0) {
738 perror("failed to create 'ready' pipe");
742 if (pipe(go_pipe) < 0) {
743 perror("failed to create 'go' pipe");
744 goto out_close_ready_pipe;
747 evlist->workload.pid = fork();
748 if (evlist->workload.pid < 0) {
749 perror("failed to fork");
750 goto out_close_pipes;
753 if (!evlist->workload.pid) {
754 if (opts->pipe_output)
757 close(child_ready_pipe[0]);
759 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
762 * Do a dummy execvp to get the PLT entry resolved,
763 * so we avoid the resolver overhead on the real
766 execvp("", (char **)argv);
769 * Tell the parent we're ready to go
771 close(child_ready_pipe[1]);
774 * Wait until the parent tells us to go.
776 if (read(go_pipe[0], &bf, 1) == -1)
777 perror("unable to read pipe");
779 execvp(argv[0], (char **)argv);
782 kill(getppid(), SIGUSR1);
786 if (perf_target__none(&opts->target))
787 evlist->threads->map[0] = evlist->workload.pid;
789 close(child_ready_pipe[1]);
792 * wait for child to settle
794 if (read(child_ready_pipe[0], &bf, 1) == -1) {
795 perror("unable to read pipe");
796 goto out_close_pipes;
799 evlist->workload.cork_fd = go_pipe[1];
800 close(child_ready_pipe[0]);
806 out_close_ready_pipe:
807 close(child_ready_pipe[0]);
808 close(child_ready_pipe[1]);
812 int perf_evlist__start_workload(struct perf_evlist *evlist)
814 if (evlist->workload.cork_fd > 0) {
816 * Remove the cork, let it rip!
818 return close(evlist->workload.cork_fd);
824 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
825 struct perf_sample *sample)
827 struct perf_evsel *evsel = perf_evlist__first(evlist);
828 return perf_evsel__parse_sample(evsel, event, sample);
831 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
833 struct perf_evsel *evsel;
836 list_for_each_entry(evsel, &evlist->entries, node) {
837 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
838 perf_evsel__name(evsel));
841 return printed + fprintf(fp, "\n");;