1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
7 * This code is based on ARM perf event code which is in turn based on
8 * sparc64 and x86 code.
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
13 #include <linux/mod_devicetable.h>
14 #include <linux/perf/riscv_pmu.h>
15 #include <linux/platform_device.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/of_irq.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/sched/clock.h>
23 #include <asm/errata_list.h>
25 #include <asm/hwcap.h>
27 #define SYSCTL_NO_USER_ACCESS 0
28 #define SYSCTL_USER_ACCESS 1
29 #define SYSCTL_LEGACY 2
31 #define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
32 #define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
33 #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
35 PMU_FORMAT_ATTR(event, "config:0-47");
36 PMU_FORMAT_ATTR(firmware, "config:63");
38 static struct attribute *riscv_arch_formats_attr[] = {
39 &format_attr_event.attr,
40 &format_attr_firmware.attr,
44 static struct attribute_group riscv_pmu_format_group = {
46 .attrs = riscv_arch_formats_attr,
49 static const struct attribute_group *riscv_pmu_attr_groups[] = {
50 &riscv_pmu_format_group,
54 /* WORKAROUND: Allow legacy mode by default when SOC is starfive */
55 static int sysctl_perf_user_access __read_mostly = SYSCTL_LEGACY;
59 * RISC-V doesn't have heterogeneous harts yet. This need to be part of
60 * per_cpu in case of harts with different pmu counters
62 static union sbi_pmu_ctr_info *pmu_ctr_list;
63 static bool riscv_pmu_use_irq;
64 static unsigned int riscv_pmu_irq_num;
65 static unsigned int riscv_pmu_irq;
67 /* Cache the available counters in a bitmask */
68 static unsigned long cmask;
70 struct sbi_pmu_event_data {
74 uint32_t event_code:16;
75 uint32_t event_type:4;
78 struct hw_cache_event {
82 uint32_t event_type:4;
90 static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
91 [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
92 SBI_PMU_HW_CPU_CYCLES,
93 SBI_PMU_EVENT_TYPE_HW, 0}},
94 [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
95 SBI_PMU_HW_INSTRUCTIONS,
96 SBI_PMU_EVENT_TYPE_HW, 0}},
97 [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
98 SBI_PMU_HW_CACHE_REFERENCES,
99 SBI_PMU_EVENT_TYPE_HW, 0}},
100 [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
101 SBI_PMU_HW_CACHE_MISSES,
102 SBI_PMU_EVENT_TYPE_HW, 0}},
103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
104 SBI_PMU_HW_BRANCH_INSTRUCTIONS,
105 SBI_PMU_EVENT_TYPE_HW, 0}},
106 [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
107 SBI_PMU_HW_BRANCH_MISSES,
108 SBI_PMU_EVENT_TYPE_HW, 0}},
109 [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
110 SBI_PMU_HW_BUS_CYCLES,
111 SBI_PMU_EVENT_TYPE_HW, 0}},
112 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
113 SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
114 SBI_PMU_EVENT_TYPE_HW, 0}},
115 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
116 SBI_PMU_HW_STALLED_CYCLES_BACKEND,
117 SBI_PMU_EVENT_TYPE_HW, 0}},
118 [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
119 SBI_PMU_HW_REF_CPU_CYCLES,
120 SBI_PMU_EVENT_TYPE_HW, 0}},
123 #define C(x) PERF_COUNT_HW_CACHE_##x
124 static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
125 [PERF_COUNT_HW_CACHE_OP_MAX]
126 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
129 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
130 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
131 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
132 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
135 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
136 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
137 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
138 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
141 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
142 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
143 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
144 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
149 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
150 C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
151 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
152 C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
155 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
156 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
157 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
158 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
161 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
162 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
163 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
164 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
169 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
170 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
171 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
172 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
175 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
176 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
177 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
178 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
181 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
182 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
183 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
184 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
189 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
190 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
191 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
192 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
195 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
196 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
197 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
198 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
201 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
202 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
203 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
204 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
209 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
210 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
211 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
212 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
215 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
216 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
217 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
218 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
221 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
222 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
223 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
224 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
229 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
230 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
231 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
232 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
235 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
236 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
237 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
238 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
241 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
242 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
243 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
244 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
249 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
250 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
251 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
252 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
255 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
256 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
257 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
258 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
261 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
262 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
263 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
264 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
269 static int pmu_sbi_ctr_get_width(int idx)
271 return pmu_ctr_list[idx].width;
274 static bool pmu_sbi_ctr_is_fw(int cidx)
276 union sbi_pmu_ctr_info *info;
278 info = &pmu_ctr_list[cidx];
282 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
286 * Returns the counter width of a programmable counter and number of hardware
287 * counters. As we don't support heterogeneous CPUs yet, it is okay to just
288 * return the counter width of the first programmable counter.
290 int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
293 union sbi_pmu_ctr_info *info;
294 u32 hpm_width = 0, hpm_count = 0;
299 for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
300 info = &pmu_ctr_list[i];
303 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
304 hpm_width = info->width;
305 if (info->type == SBI_PMU_CTR_TYPE_HW)
309 *hw_ctr_width = hpm_width;
310 *num_hw_ctr = hpm_count;
314 EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
316 static uint8_t pmu_sbi_csr_index(struct perf_event *event)
318 return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
321 static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
323 unsigned long cflags = 0;
324 bool guest_events = false;
326 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
328 if (event->attr.exclude_kernel)
329 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
330 if (event->attr.exclude_user)
331 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
332 if (guest_events && event->attr.exclude_hv)
333 cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
334 if (event->attr.exclude_host)
335 cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
336 if (event->attr.exclude_guest)
337 cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
342 static int pmu_sbi_ctr_get_idx(struct perf_event *event)
344 struct hw_perf_event *hwc = &event->hw;
345 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
346 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
349 uint64_t cbase = 0, cmask = rvpmu->cmask;
350 unsigned long cflags = 0;
352 cflags = pmu_sbi_get_filter_flags(event);
355 * In legacy mode, we have to force the fixed counters for those events
356 * but not in the user access mode as we want to use the other counters
357 * that support sampling/filtering.
359 if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
360 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
361 cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
363 } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
364 cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
365 cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
369 /* retrieve the available counter index */
370 #if defined(CONFIG_32BIT)
371 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
372 cmask, cflags, hwc->event_base, hwc->config,
375 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
376 cmask, cflags, hwc->event_base, hwc->config, 0);
379 pr_debug("Not able to find a counter for event %lx config %llx\n",
380 hwc->event_base, hwc->config);
381 return sbi_err_map_linux_errno(ret.error);
385 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
388 /* Additional sanity check for the counter id */
389 if (pmu_sbi_ctr_is_fw(idx)) {
390 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
393 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
400 static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
403 struct hw_perf_event *hwc = &event->hw;
404 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
405 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
408 if (pmu_sbi_ctr_is_fw(idx))
409 clear_bit(idx, cpuc->used_fw_ctrs);
411 clear_bit(idx, cpuc->used_hw_ctrs);
414 static int pmu_event_find_cache(u64 config)
416 unsigned int cache_type, cache_op, cache_result, ret;
418 cache_type = (config >> 0) & 0xff;
419 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
422 cache_op = (config >> 8) & 0xff;
423 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
426 cache_result = (config >> 16) & 0xff;
427 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
430 ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
435 static bool pmu_sbi_is_fw_event(struct perf_event *event)
437 u32 type = event->attr.type;
438 u64 config = event->attr.config;
440 if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
446 static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
448 u32 type = event->attr.type;
449 u64 config = event->attr.config;
455 case PERF_TYPE_HARDWARE:
456 if (config >= PERF_COUNT_HW_MAX)
458 ret = pmu_hw_event_map[event->attr.config].event_idx;
460 case PERF_TYPE_HW_CACHE:
461 ret = pmu_event_find_cache(config);
465 * As per SBI specification, the upper 16 bits must be unused for
466 * a raw event. Use the MSB (63b) to distinguish between hardware
467 * raw event and firmware events.
469 bSoftware = config >> 63;
470 raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
472 ret = (raw_config_val & 0xFFFF) |
473 (SBI_PMU_EVENT_TYPE_FW << 16);
475 ret = RISCV_PMU_RAW_EVENT_IDX;
476 *econfig = raw_config_val;
487 static u64 pmu_sbi_ctr_read(struct perf_event *event)
489 struct hw_perf_event *hwc = &event->hw;
492 union sbi_pmu_ctr_info info;
495 if (pmu_sbi_is_fw_event(event)) {
496 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
497 hwc->idx, 0, 0, 0, 0, 0);
501 info = pmu_ctr_list[idx];
502 val = riscv_pmu_ctr_read_csr(info.csr);
503 if (IS_ENABLED(CONFIG_32BIT))
504 val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
510 static void pmu_sbi_set_scounteren(void *arg)
512 struct perf_event *event = (struct perf_event *)arg;
514 if (event->hw.idx != -1)
515 csr_write(CSR_SCOUNTEREN,
516 csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
519 static void pmu_sbi_reset_scounteren(void *arg)
521 struct perf_event *event = (struct perf_event *)arg;
523 if (event->hw.idx != -1)
524 csr_write(CSR_SCOUNTEREN,
525 csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
528 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
531 struct hw_perf_event *hwc = &event->hw;
532 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
534 #if defined(CONFIG_32BIT)
535 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
536 1, flag, ival, ival >> 32, 0);
538 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
539 1, flag, ival, 0, 0);
541 if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
542 pr_err("Starting counter idx %d failed with error %d\n",
543 hwc->idx, sbi_err_map_linux_errno(ret.error));
545 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
546 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
547 pmu_sbi_set_scounteren((void *)event);
550 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
553 struct hw_perf_event *hwc = &event->hw;
555 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
556 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
557 pmu_sbi_reset_scounteren((void *)event);
559 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
560 if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
561 flag != SBI_PMU_STOP_FLAG_RESET)
562 pr_err("Stopping counter idx %d failed with error %d\n",
563 hwc->idx, sbi_err_map_linux_errno(ret.error));
566 static int pmu_sbi_find_num_ctrs(void)
570 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
574 return sbi_err_map_linux_errno(ret.error);
577 static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
580 int i, num_hw_ctr = 0, num_fw_ctr = 0;
581 union sbi_pmu_ctr_info cinfo;
583 pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
587 for (i = 0; i < nctr; i++) {
588 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
590 /* The logical counter ids are not expected to be contiguous */
595 cinfo.value = ret.value;
596 if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
600 pmu_ctr_list[i].value = cinfo.value;
603 pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
608 static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
611 * No need to check the error because we are disabling all the counters
612 * which may include counters that are not enabled yet.
614 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
615 0, pmu->cmask, 0, 0, 0, 0);
618 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
620 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
622 /* No need to check the error here as we can't do anything about the error */
623 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
624 cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
628 * This function starts all the used counters in two step approach.
629 * Any counter that did not overflow can be start in a single step
630 * while the overflowed counters need to be started with updated initialization
633 static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
634 unsigned long ctr_ovf_mask)
637 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
638 struct perf_event *event;
639 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
640 unsigned long ctr_start_mask = 0;
642 struct hw_perf_event *hwc;
645 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
647 /* Start all the counters that did not overflow in a single shot */
648 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
651 /* Reinitialize and start all the counter that overflowed */
652 while (ctr_ovf_mask) {
653 if (ctr_ovf_mask & 0x01) {
654 event = cpu_hw_evt->events[idx];
656 max_period = riscv_pmu_ctr_get_width_mask(event);
657 init_val = local64_read(&hwc->prev_count) & max_period;
658 #if defined(CONFIG_32BIT)
659 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
660 flag, init_val, init_val >> 32, 0);
662 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
663 flag, init_val, 0, 0);
665 perf_event_update_userpage(event);
667 ctr_ovf_mask = ctr_ovf_mask >> 1;
672 static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
674 struct perf_sample_data data;
675 struct pt_regs *regs;
676 struct hw_perf_event *hw_evt;
677 union sbi_pmu_ctr_info *info;
678 int lidx, hidx, fidx;
679 struct riscv_pmu *pmu;
680 struct perf_event *event;
681 unsigned long overflow;
682 unsigned long overflowed_ctrs = 0;
683 struct cpu_hw_events *cpu_hw_evt = dev;
684 u64 start_clock = sched_clock();
686 if (WARN_ON_ONCE(!cpu_hw_evt))
689 /* Firmware counter don't support overflow yet */
690 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
691 if (fidx == RISCV_MAX_COUNTERS) {
692 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
696 event = cpu_hw_evt->events[fidx];
698 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
702 pmu = to_riscv_pmu(event->pmu);
703 pmu_sbi_stop_hw_ctrs(pmu);
705 /* Overflow status register should only be read after counter are stopped */
706 ALT_SBI_PMU_OVERFLOW(overflow);
709 * Overflow interrupt pending bit should only be cleared after stopping
710 * all the counters to avoid any race condition.
712 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
714 /* No overflow bit is set */
718 regs = get_irq_regs();
720 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
721 struct perf_event *event = cpu_hw_evt->events[lidx];
723 /* Skip if invalid event or user did not request a sampling */
724 if (!event || !is_sampling_event(event))
727 info = &pmu_ctr_list[lidx];
728 /* Do a sanity check */
729 if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
732 /* compute hardware counter index */
733 hidx = info->csr - CSR_CYCLE;
734 /* check if the corresponding bit is set in sscountovf */
735 if (!(overflow & (1 << hidx)))
739 * Keep a track of overflowed counters so that they can be started
740 * with updated initial value.
742 overflowed_ctrs |= 1 << lidx;
744 riscv_pmu_event_update(event);
745 perf_sample_data_init(&data, 0, hw_evt->last_period);
746 if (riscv_pmu_event_set_period(event)) {
748 * Unlike other ISAs, RISC-V don't have to disable interrupts
749 * to avoid throttling here. As per the specification, the
750 * interrupt remains disabled until the OF bit is set.
751 * Interrupts are enabled again only during the start.
752 * TODO: We will need to stop the guest counters once
753 * virtualization support is added.
755 perf_event_overflow(event, &data, regs);
759 pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
760 perf_sample_event_took(sched_clock() - start_clock);
765 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
767 struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
768 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
771 * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
772 * legacy option but that will be removed in the future.
774 if (sysctl_perf_user_access == SYSCTL_LEGACY)
775 csr_write(CSR_SCOUNTEREN, 0x7);
777 csr_write(CSR_SCOUNTEREN, 0x2);
779 /* Stop all the counters so that they can be enabled from perf */
780 pmu_sbi_stop_all(pmu);
782 if (riscv_pmu_use_irq) {
783 cpu_hw_evt->irq = riscv_pmu_irq;
784 csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
785 csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
786 enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
792 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
794 if (riscv_pmu_use_irq) {
795 disable_percpu_irq(riscv_pmu_irq);
796 csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
799 /* Disable all counters access for user mode now */
800 csr_write(CSR_SCOUNTEREN, 0x0);
805 static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
808 struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
809 struct irq_domain *domain = NULL;
811 if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
812 riscv_pmu_irq_num = RV_IRQ_PMU;
813 riscv_pmu_use_irq = true;
814 } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
815 riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
816 riscv_cached_marchid(0) == 0 &&
817 riscv_cached_mimpid(0) == 0) {
818 riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
819 riscv_pmu_use_irq = true;
822 if (!riscv_pmu_use_irq)
825 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
828 pr_err("Failed to find INTC IRQ root domain\n");
832 riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
833 if (!riscv_pmu_irq) {
834 pr_err("Failed to map PMU interrupt for node\n");
838 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
840 pr_err("registering percpu irq failed [%d]\n", ret);
848 static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
851 struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
852 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
853 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
854 struct perf_event *event;
860 for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
861 event = cpuc->events[idx];
868 * Stop and update the counter
870 riscv_pmu_stop(event, PERF_EF_UPDATE);
873 case CPU_PM_ENTER_FAILED:
875 * Restore and enable the counter.
877 riscv_pmu_start(event, PERF_EF_RELOAD);
887 static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
889 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
890 return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
893 static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
895 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
898 static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
899 static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
902 static void riscv_pmu_destroy(struct riscv_pmu *pmu)
904 riscv_pm_pmu_unregister(pmu);
905 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
908 static void pmu_sbi_event_init(struct perf_event *event)
911 * The permissions are set at event_init so that we do not depend
912 * on the sysctl value that can change.
914 if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
915 event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
916 else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
917 event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
919 event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
922 static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
924 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
927 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
928 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
929 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
935 * The user mmapped the event to directly access it: this is where
936 * we determine based on sysctl_perf_user_access if we grant userspace
937 * the direct access to this event. That means that within the same
938 * task, some events may be directly accessible and some other may not,
939 * if the user changes the value of sysctl_perf_user_accesss in the
943 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
946 * We must enable userspace access *before* advertising in the user page
947 * that it is possible to do so to avoid any race.
948 * And we must notify all cpus here because threads that currently run
949 * on other cpus will try to directly access the counter too without
950 * calling pmu_sbi_ctr_start.
952 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
953 on_each_cpu_mask(mm_cpumask(mm),
954 pmu_sbi_set_scounteren, (void *)event, 1);
957 static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
959 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
962 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
963 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
964 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
970 * Here we can directly remove user access since the user does not have
971 * access to the user page anymore so we avoid the racy window where the
972 * user could have read cap_user_rdpmc to true right before we disable
975 event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
977 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
978 on_each_cpu_mask(mm_cpumask(mm),
979 pmu_sbi_reset_scounteren, (void *)event, 1);
982 static void riscv_pmu_update_counter_access(void *info)
984 if (sysctl_perf_user_access == SYSCTL_LEGACY)
985 csr_write(CSR_SCOUNTEREN, 0x7);
987 csr_write(CSR_SCOUNTEREN, 0x2);
990 static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
991 int write, void *buffer,
992 size_t *lenp, loff_t *ppos)
994 int prev = sysctl_perf_user_access;
995 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
998 * Test against the previous value since we clear SCOUNTEREN when
999 * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
1000 * not do that if that was already the case.
1002 if (ret || !write || prev == sysctl_perf_user_access)
1005 on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
1010 static struct ctl_table sbi_pmu_sysctl_table[] = {
1012 .procname = "perf_user_access",
1013 .data = &sysctl_perf_user_access,
1014 .maxlen = sizeof(unsigned int),
1016 .proc_handler = riscv_pmu_proc_user_access_handler,
1017 .extra1 = SYSCTL_ZERO,
1018 .extra2 = SYSCTL_TWO,
1023 static int pmu_sbi_device_probe(struct platform_device *pdev)
1025 struct riscv_pmu *pmu = NULL;
1029 pr_info("SBI PMU extension is available\n");
1030 pmu = riscv_pmu_alloc();
1034 num_counters = pmu_sbi_find_num_ctrs();
1035 if (num_counters < 0) {
1036 pr_err("SBI PMU extension doesn't provide any counters\n");
1040 /* It is possible to get from SBI more than max number of counters */
1041 if (num_counters > RISCV_MAX_COUNTERS) {
1042 num_counters = RISCV_MAX_COUNTERS;
1043 pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
1046 /* cache all the information about counters now */
1047 if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
1050 ret = pmu_sbi_setup_irqs(pmu, pdev);
1052 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
1053 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1054 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
1057 pmu->pmu.attr_groups = riscv_pmu_attr_groups;
1059 pmu->ctr_start = pmu_sbi_ctr_start;
1060 pmu->ctr_stop = pmu_sbi_ctr_stop;
1061 pmu->event_map = pmu_sbi_event_map;
1062 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
1063 pmu->ctr_get_width = pmu_sbi_ctr_get_width;
1064 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
1065 pmu->ctr_read = pmu_sbi_ctr_read;
1066 pmu->event_init = pmu_sbi_event_init;
1067 pmu->event_mapped = pmu_sbi_event_mapped;
1068 pmu->event_unmapped = pmu_sbi_event_unmapped;
1069 pmu->csr_index = pmu_sbi_csr_index;
1071 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1075 ret = riscv_pm_pmu_register(pmu);
1077 goto out_unregister;
1079 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1081 goto out_unregister;
1083 register_sysctl("kernel", sbi_pmu_sysctl_table);
1088 riscv_pmu_destroy(pmu);
1095 static struct platform_driver pmu_sbi_driver = {
1096 .probe = pmu_sbi_device_probe,
1098 .name = RISCV_PMU_SBI_PDEV_NAME,
1102 static int __init pmu_sbi_devinit(void)
1105 struct platform_device *pdev;
1107 if (sbi_spec_version < sbi_mk_version(0, 3) ||
1108 !sbi_probe_extension(SBI_EXT_PMU)) {
1112 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
1113 "perf/riscv/pmu:starting",
1114 pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
1116 pr_err("CPU hotplug notifier could not be registered: %d\n",
1121 ret = platform_driver_register(&pmu_sbi_driver);
1125 pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
1127 platform_driver_unregister(&pmu_sbi_driver);
1128 return PTR_ERR(pdev);
1131 /* Notify legacy implementation that SBI pmu is available*/
1132 riscv_pmu_legacy_skip_init();
1136 device_initcall(pmu_sbi_devinit)