1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/lib.h>
15 #include <linux/zalloc.h>
23 #include <perf/cpumap.h>
24 #include <perf/threadmap.h>
25 #include <api/fd/array.h>
28 void perf_evlist__init(struct perf_evlist *evlist)
30 INIT_LIST_HEAD(&evlist->entries);
31 evlist->nr_entries = 0;
32 fdarray__init(&evlist->pollfd, 64);
33 perf_evlist__reset_id_hash(evlist);
36 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37 struct perf_evsel *evsel)
40 * We already have cpus for evsel (via PMU sysfs) so
41 * keep it, if there's no target cpu list defined.
43 if (evsel->system_wide) {
44 perf_cpu_map__put(evsel->cpus);
45 evsel->cpus = perf_cpu_map__new(NULL);
46 } else if (!evsel->own_cpus || evlist->has_user_cpus ||
47 (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) {
48 perf_cpu_map__put(evsel->cpus);
49 evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
50 } else if (evsel->cpus != evsel->own_cpus) {
51 perf_cpu_map__put(evsel->cpus);
52 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
55 if (evsel->system_wide) {
56 perf_thread_map__put(evsel->threads);
57 evsel->threads = perf_thread_map__new_dummy();
59 perf_thread_map__put(evsel->threads);
60 evsel->threads = perf_thread_map__get(evlist->threads);
63 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
66 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
68 struct perf_evsel *evsel;
70 evlist->needs_map_propagation = true;
72 perf_evlist__for_each_evsel(evlist, evsel)
73 __perf_evlist__propagate_maps(evlist, evsel);
76 void perf_evlist__add(struct perf_evlist *evlist,
77 struct perf_evsel *evsel)
79 evsel->idx = evlist->nr_entries;
80 list_add_tail(&evsel->node, &evlist->entries);
81 evlist->nr_entries += 1;
83 if (evlist->needs_map_propagation)
84 __perf_evlist__propagate_maps(evlist, evsel);
87 void perf_evlist__remove(struct perf_evlist *evlist,
88 struct perf_evsel *evsel)
90 list_del_init(&evsel->node);
91 evlist->nr_entries -= 1;
94 struct perf_evlist *perf_evlist__new(void)
96 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
99 perf_evlist__init(evlist);
105 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
107 struct perf_evsel *next;
110 next = list_first_entry(&evlist->entries,
114 next = list_next_entry(prev, node);
117 /* Empty list is noticed here so don't need checking on entry. */
118 if (&next->node == &evlist->entries)
124 static void perf_evlist__purge(struct perf_evlist *evlist)
126 struct perf_evsel *pos, *n;
128 perf_evlist__for_each_entry_safe(evlist, n, pos) {
129 list_del_init(&pos->node);
130 perf_evsel__delete(pos);
133 evlist->nr_entries = 0;
136 void perf_evlist__exit(struct perf_evlist *evlist)
138 perf_cpu_map__put(evlist->user_requested_cpus);
139 perf_cpu_map__put(evlist->all_cpus);
140 perf_thread_map__put(evlist->threads);
141 evlist->user_requested_cpus = NULL;
142 evlist->all_cpus = NULL;
143 evlist->threads = NULL;
144 fdarray__exit(&evlist->pollfd);
147 void perf_evlist__delete(struct perf_evlist *evlist)
152 perf_evlist__munmap(evlist);
153 perf_evlist__close(evlist);
154 perf_evlist__purge(evlist);
155 perf_evlist__exit(evlist);
159 void perf_evlist__set_maps(struct perf_evlist *evlist,
160 struct perf_cpu_map *cpus,
161 struct perf_thread_map *threads)
164 * Allow for the possibility that one or another of the maps isn't being
165 * changed i.e. don't put it. Note we are assuming the maps that are
166 * being applied are brand new and evlist is taking ownership of the
167 * original reference count of 1. If that is not the case it is up to
168 * the caller to increase the reference count.
170 if (cpus != evlist->user_requested_cpus) {
171 perf_cpu_map__put(evlist->user_requested_cpus);
172 evlist->user_requested_cpus = perf_cpu_map__get(cpus);
175 if (threads != evlist->threads) {
176 perf_thread_map__put(evlist->threads);
177 evlist->threads = perf_thread_map__get(threads);
180 perf_evlist__propagate_maps(evlist);
183 int perf_evlist__open(struct perf_evlist *evlist)
185 struct perf_evsel *evsel;
188 perf_evlist__for_each_entry(evlist, evsel) {
189 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
197 perf_evlist__close(evlist);
201 void perf_evlist__close(struct perf_evlist *evlist)
203 struct perf_evsel *evsel;
205 perf_evlist__for_each_entry_reverse(evlist, evsel)
206 perf_evsel__close(evsel);
209 void perf_evlist__enable(struct perf_evlist *evlist)
211 struct perf_evsel *evsel;
213 perf_evlist__for_each_entry(evlist, evsel)
214 perf_evsel__enable(evsel);
217 void perf_evlist__disable(struct perf_evlist *evlist)
219 struct perf_evsel *evsel;
221 perf_evlist__for_each_entry(evlist, evsel)
222 perf_evsel__disable(evsel);
225 u64 perf_evlist__read_format(struct perf_evlist *evlist)
227 struct perf_evsel *first = perf_evlist__first(evlist);
229 return first->attr.read_format;
232 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
234 static void perf_evlist__id_hash(struct perf_evlist *evlist,
235 struct perf_evsel *evsel,
236 int cpu, int thread, u64 id)
239 struct perf_sample_id *sid = SID(evsel, cpu, thread);
243 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
244 hlist_add_head(&sid->node, &evlist->heads[hash]);
247 void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
251 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
252 INIT_HLIST_HEAD(&evlist->heads[i]);
255 void perf_evlist__id_add(struct perf_evlist *evlist,
256 struct perf_evsel *evsel,
257 int cpu, int thread, u64 id)
259 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
260 evsel->id[evsel->ids++] = id;
263 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
264 struct perf_evsel *evsel,
265 int cpu, int thread, int fd)
267 u64 read_data[4] = { 0, };
268 int id_idx = 1; /* The first entry is the counter value */
272 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
279 /* Legacy way to get event id.. All hail to old kernels! */
282 * This way does not work with group format read, so bail
285 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
288 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
289 read(fd, &read_data, sizeof(read_data)) == -1)
292 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
294 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
297 id = read_data[id_idx];
300 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
304 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
306 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
307 int nr_threads = perf_thread_map__nr(evlist->threads);
309 struct perf_evsel *evsel;
311 perf_evlist__for_each_entry(evlist, evsel) {
312 if (evsel->system_wide)
315 nfds += nr_cpus * nr_threads;
318 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
319 fdarray__grow(&evlist->pollfd, nfds) < 0)
325 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
326 void *ptr, short revent, enum fdarray_flags flags)
328 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
331 evlist->pollfd.priv[pos].ptr = ptr;
332 fcntl(fd, F_SETFL, O_NONBLOCK);
338 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
339 void *arg __maybe_unused)
341 struct perf_mmap *map = fda->priv[fd].ptr;
347 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
349 return fdarray__filter(&evlist->pollfd, revents_and_mask,
350 perf_evlist__munmap_filtered, NULL);
353 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
355 return fdarray__poll(&evlist->pollfd, timeout);
358 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
361 struct perf_mmap *map;
363 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
367 for (i = 0; i < evlist->nr_mmaps; i++) {
368 struct perf_mmap *prev = i ? &map[i - 1] : NULL;
371 * When the perf_mmap() call is made we grab one refcount, plus
372 * one extra to let perf_mmap__consume() get the last
373 * events after all real references (perf_mmap__get()) are
376 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
377 * thus does perf_mmap__get() on it.
379 perf_mmap__init(&map[i], prev, overwrite, NULL);
385 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
387 struct perf_sample_id *sid = SID(evsel, cpu, thread);
390 sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
391 sid->tid = perf_thread_map__pid(evsel->threads, thread);
394 static struct perf_mmap*
395 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
397 struct perf_mmap *maps;
399 maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
402 maps = perf_evlist__alloc_mmap(evlist, overwrite);
407 evlist->mmap_ovw = maps;
415 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
418 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
419 int output, struct perf_cpu cpu)
421 return perf_mmap__mmap(map, mp, output, cpu);
424 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
428 evlist->mmap_ovw_first = map;
430 evlist->mmap_first = map;
434 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
435 int idx, struct perf_mmap_param *mp, int cpu_idx,
436 int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
438 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
439 struct perf_evsel *evsel;
442 perf_evlist__for_each_entry(evlist, evsel) {
443 bool overwrite = evsel->attr.write_backward;
444 enum fdarray_flags flgs;
445 struct perf_mmap *map;
446 int *output, fd, cpu;
448 if (evsel->system_wide && thread)
451 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
455 map = ops->get(evlist, overwrite, idx);
460 mp->prot = PROT_READ;
461 output = _output_overwrite;
463 mp->prot = PROT_READ | PROT_WRITE;
467 fd = FD(evsel, cpu, thread);
473 * The last one will be done at perf_mmap__consume(), so that we
474 * make sure we don't prevent tools from consuming every last event in
477 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
478 * anymore, but the last events for it are still in the ring buffer,
479 * waiting to be consumed.
481 * Tools can chose to ignore this at their own discretion, but the
482 * evlist layer can't just drop it when filtering events in
483 * perf_evlist__filter_pollfd().
485 refcount_set(&map->refcnt, 2);
488 ops->idx(evlist, evsel, mp, idx);
490 /* Debug message used by test scripts */
491 pr_debug("idx %d: mmapping fd %d\n", idx, *output);
492 if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
498 perf_evlist__set_mmap_first(evlist, map, overwrite);
500 /* Debug message used by test scripts */
501 pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
502 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
508 revent = !overwrite ? POLLIN : 0;
510 flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
511 if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
516 if (evsel->attr.read_format & PERF_FORMAT_ID) {
517 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
520 perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
528 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
529 struct perf_mmap_param *mp)
531 int nr_threads = perf_thread_map__nr(evlist->threads);
532 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
533 int cpu, thread, idx = 0;
536 pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
537 __func__, nr_cpus, nr_threads);
539 /* per-thread mmaps */
540 for (thread = 0; thread < nr_threads; thread++, idx++) {
542 int output_overwrite = -1;
544 if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
545 &output_overwrite, &nr_mmaps))
549 /* system-wide mmaps i.e. per-cpu */
550 for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
552 int output_overwrite = -1;
554 if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
555 &output_overwrite, &nr_mmaps))
559 if (nr_mmaps != evlist->nr_mmaps)
560 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
565 perf_evlist__munmap(evlist);
570 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
571 struct perf_mmap_param *mp)
573 int nr_threads = perf_thread_map__nr(evlist->threads);
574 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
578 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
580 for (cpu = 0; cpu < nr_cpus; cpu++) {
582 int output_overwrite = -1;
584 for (thread = 0; thread < nr_threads; thread++) {
585 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
586 thread, &output, &output_overwrite, &nr_mmaps))
591 if (nr_mmaps != evlist->nr_mmaps)
592 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
597 perf_evlist__munmap(evlist);
601 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
605 /* One for each CPU */
606 nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
607 if (perf_cpu_map__empty(evlist->all_cpus)) {
608 /* Plus one for each thread */
609 nr_mmaps += perf_thread_map__nr(evlist->threads);
610 /* Minus the per-thread CPU (-1) */
617 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
618 struct perf_evlist_mmap_ops *ops,
619 struct perf_mmap_param *mp)
621 const struct perf_cpu_map *cpus = evlist->all_cpus;
622 struct perf_evsel *evsel;
624 if (!ops || !ops->get || !ops->mmap)
627 mp->mask = evlist->mmap_len - page_size - 1;
629 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
631 perf_evlist__for_each_entry(evlist, evsel) {
632 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
633 evsel->sample_id == NULL &&
634 perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
638 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
641 if (perf_cpu_map__empty(cpus))
642 return mmap_per_thread(evlist, ops, mp);
644 return mmap_per_cpu(evlist, ops, mp);
647 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
649 struct perf_mmap_param mp;
650 struct perf_evlist_mmap_ops ops = {
651 .get = perf_evlist__mmap_cb_get,
652 .mmap = perf_evlist__mmap_cb_mmap,
655 evlist->mmap_len = (pages + 1) * page_size;
657 return perf_evlist__mmap_ops(evlist, &ops, &mp);
660 void perf_evlist__munmap(struct perf_evlist *evlist)
665 for (i = 0; i < evlist->nr_mmaps; i++)
666 perf_mmap__munmap(&evlist->mmap[i]);
669 if (evlist->mmap_ovw) {
670 for (i = 0; i < evlist->nr_mmaps; i++)
671 perf_mmap__munmap(&evlist->mmap_ovw[i]);
674 zfree(&evlist->mmap);
675 zfree(&evlist->mmap_ovw);
679 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
685 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
688 void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
690 struct perf_evsel *evsel;
693 __perf_evlist__for_each_entry(list, evsel) {
694 evsel->leader = leader;
697 leader->nr_members = n;
700 void perf_evlist__set_leader(struct perf_evlist *evlist)
702 if (evlist->nr_entries) {
703 struct perf_evsel *first = list_entry(evlist->entries.next,
704 struct perf_evsel, node);
706 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
707 __perf_evlist__set_leader(&evlist->entries, first);