1 // SPDX-License-Identifier: GPL-2.0
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
25 INIT_LIST_HEAD(&evsel->node);
28 evsel->leader = evsel;
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
36 perf_evsel__init(evsel, attr, 0);
41 void perf_evsel__delete(struct perf_evsel *evsel)
46 #define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
47 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
49 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
51 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55 for (cpu = 0; cpu < ncpus; cpu++) {
56 for (thread = 0; thread < nthreads; thread++) {
57 int *fd = FD(evsel, cpu, thread);
65 return evsel->fd != NULL ? 0 : -ENOMEM;
68 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
70 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
72 return evsel->mmap != NULL ? 0 : -ENOMEM;
76 sys_perf_event_open(struct perf_event_attr *attr,
77 pid_t pid, int cpu, int group_fd,
80 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
83 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
85 struct perf_evsel *leader = evsel->leader;
88 if (evsel == leader) {
94 * Leader must be already processed/open,
100 fd = FD(leader, cpu, thread);
101 if (fd == NULL || *fd == -1)
109 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
110 struct perf_thread_map *threads)
112 int cpu, thread, err = 0;
115 static struct perf_cpu_map *empty_cpu_map;
117 if (empty_cpu_map == NULL) {
118 empty_cpu_map = perf_cpu_map__dummy_new();
119 if (empty_cpu_map == NULL)
123 cpus = empty_cpu_map;
126 if (threads == NULL) {
127 static struct perf_thread_map *empty_thread_map;
129 if (empty_thread_map == NULL) {
130 empty_thread_map = perf_thread_map__new_dummy();
131 if (empty_thread_map == NULL)
135 threads = empty_thread_map;
138 if (evsel->fd == NULL &&
139 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
142 for (cpu = 0; cpu < cpus->nr; cpu++) {
143 for (thread = 0; thread < threads->nr; thread++) {
144 int fd, group_fd, *evsel_fd;
146 evsel_fd = FD(evsel, cpu, thread);
147 if (evsel_fd == NULL)
150 err = get_group_fd(evsel, cpu, thread, &group_fd);
154 fd = sys_perf_event_open(&evsel->attr,
155 threads->map[thread].pid,
156 cpus->map[cpu], group_fd, 0);
168 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
172 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
173 int *fd = FD(evsel, cpu, thread);
175 if (fd && *fd >= 0) {
182 void perf_evsel__close_fd(struct perf_evsel *evsel)
186 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
187 perf_evsel__close_fd_cpu(evsel, cpu);
190 void perf_evsel__free_fd(struct perf_evsel *evsel)
192 xyarray__delete(evsel->fd);
196 void perf_evsel__close(struct perf_evsel *evsel)
198 if (evsel->fd == NULL)
201 perf_evsel__close_fd(evsel);
202 perf_evsel__free_fd(evsel);
205 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
207 if (evsel->fd == NULL)
210 perf_evsel__close_fd_cpu(evsel, cpu);
213 void perf_evsel__munmap(struct perf_evsel *evsel)
217 if (evsel->fd == NULL || evsel->mmap == NULL)
220 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
221 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
222 int *fd = FD(evsel, cpu, thread);
224 if (fd == NULL || *fd < 0)
227 perf_mmap__munmap(MMAP(evsel, cpu, thread));
231 xyarray__delete(evsel->mmap);
235 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
237 int ret, cpu, thread;
238 struct perf_mmap_param mp = {
239 .prot = PROT_READ | PROT_WRITE,
240 .mask = (pages * page_size) - 1,
243 if (evsel->fd == NULL || evsel->mmap)
246 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
249 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
250 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
251 int *fd = FD(evsel, cpu, thread);
252 struct perf_mmap *map;
254 if (fd == NULL || *fd < 0)
257 map = MMAP(evsel, cpu, thread);
258 perf_mmap__init(map, NULL, false, NULL);
260 ret = perf_mmap__mmap(map, &mp, *fd, cpu);
262 perf_evsel__munmap(evsel);
271 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
273 int *fd = FD(evsel, cpu, thread);
275 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
278 return MMAP(evsel, cpu, thread)->base;
281 int perf_evsel__read_size(struct perf_evsel *evsel)
283 u64 read_format = evsel->attr.read_format;
284 int entry = sizeof(u64); /* value */
288 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
291 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
294 if (read_format & PERF_FORMAT_ID)
295 entry += sizeof(u64);
297 if (read_format & PERF_FORMAT_GROUP) {
298 nr = evsel->nr_members;
306 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
307 struct perf_counts_values *count)
309 size_t size = perf_evsel__read_size(evsel);
310 int *fd = FD(evsel, cpu, thread);
312 memset(count, 0, sizeof(*count));
314 if (fd == NULL || *fd < 0)
317 if (MMAP(evsel, cpu, thread) &&
318 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
321 if (readn(*fd, count->values, size) <= 0)
327 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
333 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
335 int *fd = FD(evsel, cpu, thread);
337 if (fd == NULL || *fd < 0)
340 err = ioctl(*fd, ioc, arg);
349 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
351 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
354 int perf_evsel__enable(struct perf_evsel *evsel)
359 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
360 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
364 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
366 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
369 int perf_evsel__disable(struct perf_evsel *evsel)
374 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
375 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
379 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
383 for (i = 0; i < evsel->cpus->nr && !err; i++)
384 err = perf_evsel__run_ioctl(evsel,
385 PERF_EVENT_IOC_SET_FILTER,
390 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
395 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
397 return evsel->threads;
400 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
405 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
407 if (ncpus == 0 || nthreads == 0)
410 if (evsel->system_wide)
413 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
414 if (evsel->sample_id == NULL)
417 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
418 if (evsel->id == NULL) {
419 xyarray__delete(evsel->sample_id);
420 evsel->sample_id = NULL;
427 void perf_evsel__free_id(struct perf_evsel *evsel)
429 xyarray__delete(evsel->sample_id);
430 evsel->sample_id = NULL;