1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/ring_buffer.h>
8 #include <linux/perf_event.h>
10 #include <perf/event.h>
11 #include <perf/evsel.h>
12 #include <internal/mmap.h>
13 #include <internal/lib.h>
14 #include <linux/kernel.h>
15 #include <linux/math64.h>
16 #include <linux/stringify.h>
19 void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
20 bool overwrite, libperf_unmap_cb_t unmap_cb)
23 map->overwrite = overwrite;
24 map->unmap_cb = unmap_cb;
25 refcount_set(&map->refcnt, 0);
30 size_t perf_mmap__mmap_len(struct perf_mmap *map)
32 return map->mask + 1 + page_size;
35 int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
36 int fd, struct perf_cpu cpu)
40 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
42 if (map->base == MAP_FAILED) {
52 void perf_mmap__munmap(struct perf_mmap *map)
54 if (map && map->base != NULL) {
55 munmap(map->base, perf_mmap__mmap_len(map));
58 refcount_set(&map->refcnt, 0);
60 if (map && map->unmap_cb)
64 void perf_mmap__get(struct perf_mmap *map)
66 refcount_inc(&map->refcnt);
69 void perf_mmap__put(struct perf_mmap *map)
71 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
73 if (refcount_dec_and_test(&map->refcnt))
74 perf_mmap__munmap(map);
77 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
79 ring_buffer_write_tail(md->base, tail);
82 u64 perf_mmap__read_head(struct perf_mmap *map)
84 return ring_buffer_read_head(map->base);
87 static bool perf_mmap__empty(struct perf_mmap *map)
89 struct perf_event_mmap_page *pc = map->base;
91 return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
94 void perf_mmap__consume(struct perf_mmap *map)
96 if (!map->overwrite) {
99 perf_mmap__write_tail(map, old);
102 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
106 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
108 struct perf_event_header *pheader;
109 u64 evt_head = *start;
112 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
113 pheader = (struct perf_event_header *)(buf + (*start & mask));
115 if (evt_head - *start >= (unsigned int)size) {
116 pr_debug("Finished reading overwrite ring buffer: rewind\n");
117 if (evt_head - *start > (unsigned int)size)
118 evt_head -= pheader->size;
123 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
125 if (pheader->size == 0) {
126 pr_debug("Finished reading overwrite ring buffer: get start\n");
131 evt_head += pheader->size;
132 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
134 WARN_ONCE(1, "Shouldn't get here\n");
139 * Report the start and end of the available data in ringbuffer
141 static int __perf_mmap__read_init(struct perf_mmap *md)
143 u64 head = perf_mmap__read_head(md);
145 unsigned char *data = md->base + page_size;
148 md->start = md->overwrite ? head : old;
149 md->end = md->overwrite ? old : head;
151 if ((md->end - md->start) < md->flush)
154 size = md->end - md->start;
155 if (size > (unsigned long)(md->mask) + 1) {
156 if (!md->overwrite) {
157 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
160 perf_mmap__consume(md);
165 * Backward ring buffer is full. We still have a chance to read
166 * most of data from it.
168 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
175 int perf_mmap__read_init(struct perf_mmap *map)
178 * Check if event was unmapped due to a POLLHUP/POLLERR.
180 if (!refcount_read(&map->refcnt))
183 return __perf_mmap__read_init(map);
187 * Mandatory for overwrite mode
188 * The direction of overwrite mode is backward.
189 * The last perf_mmap__read() will set tail to map->core.prev.
190 * Need to correct the map->core.prev to head which is the end of next read.
192 void perf_mmap__read_done(struct perf_mmap *map)
195 * Check if event was unmapped due to a POLLHUP/POLLERR.
197 if (!refcount_read(&map->refcnt))
200 map->prev = perf_mmap__read_head(map);
203 /* When check_messup is true, 'end' must points to a good entry */
204 static union perf_event *perf_mmap__read(struct perf_mmap *map,
205 u64 *startp, u64 end)
207 unsigned char *data = map->base + page_size;
208 union perf_event *event = NULL;
209 int diff = end - *startp;
211 if (diff >= (int)sizeof(event->header)) {
214 event = (union perf_event *)&data[*startp & map->mask];
215 size = event->header.size;
217 if (size < sizeof(event->header) || diff < (int)size)
221 * Event straddles the mmap boundary -- header should always
222 * be inside due to u64 alignment of output.
224 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
225 unsigned int offset = *startp;
226 unsigned int len = min(sizeof(*event), size), cpy;
227 void *dst = map->event_copy;
230 cpy = min(map->mask + 1 - (offset & map->mask), len);
231 memcpy(dst, &data[offset & map->mask], cpy);
237 event = (union perf_event *)map->event_copy;
247 * Read event from ring buffer one by one.
248 * Return one event for each call.
251 * perf_mmap__read_init()
252 * while(event = perf_mmap__read_event()) {
253 * //process the event
254 * perf_mmap__consume()
256 * perf_mmap__read_done()
258 union perf_event *perf_mmap__read_event(struct perf_mmap *map)
260 union perf_event *event;
263 * Check if event was unmapped due to a POLLHUP/POLLERR.
265 if (!refcount_read(&map->refcnt))
268 /* non-overwirte doesn't pause the ringbuffer */
270 map->end = perf_mmap__read_head(map);
272 event = perf_mmap__read(map, &map->start, map->end);
275 map->prev = map->start;
280 #if defined(__i386__) || defined(__x86_64__)
281 static u64 read_perf_counter(unsigned int counter)
283 unsigned int low, high;
285 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
287 return low | ((u64)high) << 32;
290 static u64 read_timestamp(void)
292 unsigned int low, high;
294 asm volatile("rdtsc" : "=a" (low), "=d" (high));
296 return low | ((u64)high) << 32;
298 #elif defined(__aarch64__)
299 #define read_sysreg(r) ({ \
301 asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
305 static u64 read_pmccntr(void)
307 return read_sysreg(pmccntr_el0);
310 #define PMEVCNTR_READ(idx) \
311 static u64 read_pmevcntr_##idx(void) { \
312 return read_sysreg(pmevcntr##idx##_el0); \
348 * Read a value direct from PMEVCNTR<idx>
350 static u64 read_perf_counter(unsigned int counter)
352 static u64 (* const read_f[])(void) = {
387 if (counter < ARRAY_SIZE(read_f))
388 return (read_f[counter])();
393 static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
395 /* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
396 #elif defined(__riscv) && __riscv_xlen == 64
398 /* TODO: implement rv32 support */
400 #define CSR_CYCLE 0xc00
401 #define CSR_TIME 0xc01
403 #define csr_read(csr) \
405 register unsigned long __v; \
406 __asm__ __volatile__ ("csrr %0, %1" \
412 static unsigned long csr_read_num(int csr_num)
414 #define switchcase_csr_read(__csr_num, __val) {\
416 __val = csr_read(__csr_num); \
418 #define switchcase_csr_read_2(__csr_num, __val) {\
419 switchcase_csr_read(__csr_num + 0, __val) \
420 switchcase_csr_read(__csr_num + 1, __val)}
421 #define switchcase_csr_read_4(__csr_num, __val) {\
422 switchcase_csr_read_2(__csr_num + 0, __val) \
423 switchcase_csr_read_2(__csr_num + 2, __val)}
424 #define switchcase_csr_read_8(__csr_num, __val) {\
425 switchcase_csr_read_4(__csr_num + 0, __val) \
426 switchcase_csr_read_4(__csr_num + 4, __val)}
427 #define switchcase_csr_read_16(__csr_num, __val) {\
428 switchcase_csr_read_8(__csr_num + 0, __val) \
429 switchcase_csr_read_8(__csr_num + 8, __val)}
430 #define switchcase_csr_read_32(__csr_num, __val) {\
431 switchcase_csr_read_16(__csr_num + 0, __val) \
432 switchcase_csr_read_16(__csr_num + 16, __val)}
434 unsigned long ret = 0;
437 switchcase_csr_read_32(CSR_CYCLE, ret)
443 #undef switchcase_csr_read_32
444 #undef switchcase_csr_read_16
445 #undef switchcase_csr_read_8
446 #undef switchcase_csr_read_4
447 #undef switchcase_csr_read_2
448 #undef switchcase_csr_read
451 static u64 read_perf_counter(unsigned int counter)
453 return csr_read_num(CSR_CYCLE + counter);
456 static u64 read_timestamp(void)
458 return csr_read_num(CSR_TIME);
462 static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
463 static u64 read_timestamp(void) { return 0; }
466 int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
468 struct perf_event_mmap_page *pc = map->base;
469 u32 seq, idx, time_mult = 0, time_shift = 0;
470 u64 cnt, cyc = 0, time_offset = 0, time_cycles = 0, time_mask = ~0ULL;
472 if (!pc || !pc->cap_user_rdpmc)
476 seq = READ_ONCE(pc->lock);
479 count->ena = READ_ONCE(pc->time_enabled);
480 count->run = READ_ONCE(pc->time_running);
482 if (pc->cap_user_time && count->ena != count->run) {
483 cyc = read_timestamp();
484 time_mult = READ_ONCE(pc->time_mult);
485 time_shift = READ_ONCE(pc->time_shift);
486 time_offset = READ_ONCE(pc->time_offset);
488 if (pc->cap_user_time_short) {
489 time_cycles = READ_ONCE(pc->time_cycles);
490 time_mask = READ_ONCE(pc->time_mask);
494 idx = READ_ONCE(pc->index);
495 cnt = READ_ONCE(pc->offset);
496 if (pc->cap_user_rdpmc && idx) {
497 s64 evcnt = read_perf_counter(idx - 1);
498 u16 width = READ_ONCE(pc->pmc_width);
500 evcnt <<= 64 - width;
501 evcnt >>= 64 - width;
507 } while (READ_ONCE(pc->lock) != seq);
509 if (count->ena != count->run) {
512 /* Adjust for cap_usr_time_short, a nop if not */
513 cyc = time_cycles + ((cyc - time_cycles) & time_mask);
515 delta = time_offset + mul_u64_u32_shr(cyc, time_mult, time_shift);