1 // SPDX-License-Identifier: GPL-2.0
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
25 INIT_LIST_HEAD(&evsel->node);
28 evsel->leader = evsel;
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
36 perf_evsel__init(evsel, attr, 0);
41 void perf_evsel__delete(struct perf_evsel *evsel)
46 #define FD(_evsel, _cpu_map_idx, _thread) \
47 ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
48 #define MMAP(_evsel, _cpu_map_idx, _thread) \
49 (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
59 for (idx = 0; idx < ncpus; idx++) {
60 for (thread = 0; thread < nthreads; thread++) {
61 int *fd = FD(evsel, idx, thread);
69 return evsel->fd != NULL ? 0 : -ENOMEM;
72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
76 return evsel->mmap != NULL ? 0 : -ENOMEM;
80 sys_perf_event_open(struct perf_event_attr *attr,
81 pid_t pid, struct perf_cpu cpu, int group_fd,
84 return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
87 static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
89 struct perf_evsel *leader = evsel->leader;
92 if (evsel == leader) {
98 * Leader must be already processed/open,
104 fd = FD(leader, cpu_map_idx, thread);
105 if (fd == NULL || *fd == -1)
113 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
114 struct perf_thread_map *threads)
117 int idx, thread, err = 0;
120 static struct perf_cpu_map *empty_cpu_map;
122 if (empty_cpu_map == NULL) {
123 empty_cpu_map = perf_cpu_map__dummy_new();
124 if (empty_cpu_map == NULL)
128 cpus = empty_cpu_map;
131 if (threads == NULL) {
132 static struct perf_thread_map *empty_thread_map;
134 if (empty_thread_map == NULL) {
135 empty_thread_map = perf_thread_map__new_dummy();
136 if (empty_thread_map == NULL)
140 threads = empty_thread_map;
143 if (evsel->fd == NULL &&
144 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
147 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
148 for (thread = 0; thread < threads->nr; thread++) {
149 int fd, group_fd, *evsel_fd;
151 evsel_fd = FD(evsel, idx, thread);
152 if (evsel_fd == NULL)
155 err = get_group_fd(evsel, idx, thread, &group_fd);
159 fd = sys_perf_event_open(&evsel->attr,
160 threads->map[thread].pid,
173 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
177 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
178 int *fd = FD(evsel, cpu_map_idx, thread);
180 if (fd && *fd >= 0) {
187 void perf_evsel__close_fd(struct perf_evsel *evsel)
189 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
190 perf_evsel__close_fd_cpu(evsel, idx);
193 void perf_evsel__free_fd(struct perf_evsel *evsel)
195 xyarray__delete(evsel->fd);
199 void perf_evsel__close(struct perf_evsel *evsel)
201 if (evsel->fd == NULL)
204 perf_evsel__close_fd(evsel);
205 perf_evsel__free_fd(evsel);
208 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
210 if (evsel->fd == NULL)
213 perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
216 void perf_evsel__munmap(struct perf_evsel *evsel)
220 if (evsel->fd == NULL || evsel->mmap == NULL)
223 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
224 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
225 int *fd = FD(evsel, idx, thread);
227 if (fd == NULL || *fd < 0)
230 perf_mmap__munmap(MMAP(evsel, idx, thread));
234 xyarray__delete(evsel->mmap);
238 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
240 int ret, idx, thread;
241 struct perf_mmap_param mp = {
242 .prot = PROT_READ | PROT_WRITE,
243 .mask = (pages * page_size) - 1,
246 if (evsel->fd == NULL || evsel->mmap)
249 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
252 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
253 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
254 int *fd = FD(evsel, idx, thread);
255 struct perf_mmap *map;
256 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
258 if (fd == NULL || *fd < 0)
261 map = MMAP(evsel, idx, thread);
262 perf_mmap__init(map, NULL, false, NULL);
264 ret = perf_mmap__mmap(map, &mp, *fd, cpu);
266 perf_evsel__munmap(evsel);
275 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
277 int *fd = FD(evsel, cpu_map_idx, thread);
279 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
282 return MMAP(evsel, cpu_map_idx, thread)->base;
285 int perf_evsel__read_size(struct perf_evsel *evsel)
287 u64 read_format = evsel->attr.read_format;
288 int entry = sizeof(u64); /* value */
292 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
295 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
298 if (read_format & PERF_FORMAT_ID)
299 entry += sizeof(u64);
301 if (read_format & PERF_FORMAT_GROUP) {
302 nr = evsel->nr_members;
310 int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
311 struct perf_counts_values *count)
313 size_t size = perf_evsel__read_size(evsel);
314 int *fd = FD(evsel, cpu_map_idx, thread);
316 memset(count, 0, sizeof(*count));
318 if (fd == NULL || *fd < 0)
321 if (MMAP(evsel, cpu_map_idx, thread) &&
322 !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
325 if (readn(*fd, count->values, size) <= 0)
331 static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
332 int cpu_map_idx, int thread)
334 int *fd = FD(evsel, cpu_map_idx, thread);
336 if (fd == NULL || *fd < 0)
339 return ioctl(*fd, ioc, arg);
342 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
348 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
349 int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
358 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
360 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
363 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
365 struct perf_cpu cpu __maybe_unused;
369 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
370 err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
378 int perf_evsel__enable(struct perf_evsel *evsel)
383 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
384 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
388 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
390 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
393 int perf_evsel__disable(struct perf_evsel *evsel)
398 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
399 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
403 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
407 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
408 err = perf_evsel__run_ioctl(evsel,
409 PERF_EVENT_IOC_SET_FILTER,
414 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
419 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
421 return evsel->threads;
424 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
429 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
431 if (ncpus == 0 || nthreads == 0)
434 if (evsel->system_wide)
437 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
438 if (evsel->sample_id == NULL)
441 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
442 if (evsel->id == NULL) {
443 xyarray__delete(evsel->sample_id);
444 evsel->sample_id = NULL;
451 void perf_evsel__free_id(struct perf_evsel *evsel)
453 xyarray__delete(evsel->sample_id);
454 evsel->sample_id = NULL;
459 void perf_counts_values__scale(struct perf_counts_values *count,
460 bool scale, __s8 *pscaled)
465 if (count->run == 0) {
468 } else if (count->run < count->ena) {
470 count->val = (u64)((double)count->val * count->ena / count->run);