1 // SPDX-License-Identifier: GPL-2.0
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
25 INIT_LIST_HEAD(&evsel->node);
28 evsel->leader = evsel;
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
36 perf_evsel__init(evsel, attr, 0);
41 void perf_evsel__delete(struct perf_evsel *evsel)
46 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
47 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
49 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
51 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55 for (cpu = 0; cpu < ncpus; cpu++) {
56 for (thread = 0; thread < nthreads; thread++) {
57 FD(evsel, cpu, thread) = -1;
62 return evsel->fd != NULL ? 0 : -ENOMEM;
65 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
67 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
69 return evsel->mmap != NULL ? 0 : -ENOMEM;
73 sys_perf_event_open(struct perf_event_attr *attr,
74 pid_t pid, int cpu, int group_fd,
77 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
80 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
82 struct perf_evsel *leader = evsel->leader;
85 if (evsel == leader) {
91 * Leader must be already processed/open,
97 fd = FD(leader, cpu, thread);
106 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
107 struct perf_thread_map *threads)
109 int cpu, thread, err = 0;
112 static struct perf_cpu_map *empty_cpu_map;
114 if (empty_cpu_map == NULL) {
115 empty_cpu_map = perf_cpu_map__dummy_new();
116 if (empty_cpu_map == NULL)
120 cpus = empty_cpu_map;
123 if (threads == NULL) {
124 static struct perf_thread_map *empty_thread_map;
126 if (empty_thread_map == NULL) {
127 empty_thread_map = perf_thread_map__new_dummy();
128 if (empty_thread_map == NULL)
132 threads = empty_thread_map;
135 if (evsel->fd == NULL &&
136 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
139 for (cpu = 0; cpu < cpus->nr; cpu++) {
140 for (thread = 0; thread < threads->nr; thread++) {
143 err = get_group_fd(evsel, cpu, thread, &group_fd);
147 fd = sys_perf_event_open(&evsel->attr,
148 threads->map[thread].pid,
149 cpus->map[cpu], group_fd, 0);
154 FD(evsel, cpu, thread) = fd;
161 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
165 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
166 if (FD(evsel, cpu, thread) >= 0)
167 close(FD(evsel, cpu, thread));
168 FD(evsel, cpu, thread) = -1;
172 void perf_evsel__close_fd(struct perf_evsel *evsel)
176 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
177 perf_evsel__close_fd_cpu(evsel, cpu);
180 void perf_evsel__free_fd(struct perf_evsel *evsel)
182 xyarray__delete(evsel->fd);
186 void perf_evsel__close(struct perf_evsel *evsel)
188 if (evsel->fd == NULL)
191 perf_evsel__close_fd(evsel);
192 perf_evsel__free_fd(evsel);
195 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
197 if (evsel->fd == NULL)
200 perf_evsel__close_fd_cpu(evsel, cpu);
203 void perf_evsel__munmap(struct perf_evsel *evsel)
207 if (evsel->fd == NULL || evsel->mmap == NULL)
210 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
211 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
212 int fd = FD(evsel, cpu, thread);
213 struct perf_mmap *map = MMAP(evsel, cpu, thread);
218 perf_mmap__munmap(map);
222 xyarray__delete(evsel->mmap);
226 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
228 int ret, cpu, thread;
229 struct perf_mmap_param mp = {
230 .prot = PROT_READ | PROT_WRITE,
231 .mask = (pages * page_size) - 1,
234 if (evsel->fd == NULL || evsel->mmap)
237 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
240 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
241 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
242 int fd = FD(evsel, cpu, thread);
243 struct perf_mmap *map = MMAP(evsel, cpu, thread);
248 perf_mmap__init(map, NULL, false, NULL);
250 ret = perf_mmap__mmap(map, &mp, fd, cpu);
252 perf_evsel__munmap(evsel);
261 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
263 if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
266 return MMAP(evsel, cpu, thread)->base;
269 int perf_evsel__read_size(struct perf_evsel *evsel)
271 u64 read_format = evsel->attr.read_format;
272 int entry = sizeof(u64); /* value */
276 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
279 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
282 if (read_format & PERF_FORMAT_ID)
283 entry += sizeof(u64);
285 if (read_format & PERF_FORMAT_GROUP) {
286 nr = evsel->nr_members;
294 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
295 struct perf_counts_values *count)
297 size_t size = perf_evsel__read_size(evsel);
299 memset(count, 0, sizeof(*count));
301 if (FD(evsel, cpu, thread) < 0)
304 if (MMAP(evsel, cpu, thread) &&
305 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
308 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
314 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
320 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
321 int fd = FD(evsel, cpu, thread),
322 err = ioctl(fd, ioc, arg);
331 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
333 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
336 int perf_evsel__enable(struct perf_evsel *evsel)
341 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
342 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
346 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
348 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
351 int perf_evsel__disable(struct perf_evsel *evsel)
356 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
357 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
361 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
365 for (i = 0; i < evsel->cpus->nr && !err; i++)
366 err = perf_evsel__run_ioctl(evsel,
367 PERF_EVENT_IOC_SET_FILTER,
372 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
377 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
379 return evsel->threads;
382 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
387 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
389 if (ncpus == 0 || nthreads == 0)
392 if (evsel->system_wide)
395 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
396 if (evsel->sample_id == NULL)
399 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
400 if (evsel->id == NULL) {
401 xyarray__delete(evsel->sample_id);
402 evsel->sample_id = NULL;
409 void perf_evsel__free_id(struct perf_evsel *evsel)
411 xyarray__delete(evsel->sample_id);
412 evsel->sample_id = NULL;