1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
7 * This code is based on ARM perf event code which is in turn based on
8 * sparc64 and x86 code.
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
13 #include <linux/mod_devicetable.h>
14 #include <linux/perf/riscv_pmu.h>
15 #include <linux/platform_device.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/of_irq.h>
20 #include <linux/cpu_pm.h>
23 #include <asm/hwcap.h>
25 PMU_FORMAT_ATTR(event, "config:0-47");
26 PMU_FORMAT_ATTR(firmware, "config:63");
28 static struct attribute *riscv_arch_formats_attr[] = {
29 &format_attr_event.attr,
30 &format_attr_firmware.attr,
34 static struct attribute_group riscv_pmu_format_group = {
36 .attrs = riscv_arch_formats_attr,
39 static const struct attribute_group *riscv_pmu_attr_groups[] = {
40 &riscv_pmu_format_group,
45 * RISC-V doesn't have hetergenous harts yet. This need to be part of
46 * per_cpu in case of harts with different pmu counters
48 static union sbi_pmu_ctr_info *pmu_ctr_list;
49 static unsigned int riscv_pmu_irq;
51 struct sbi_pmu_event_data {
55 uint32_t event_code:16;
56 uint32_t event_type:4;
59 struct hw_cache_event {
63 uint32_t event_type:4;
71 static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
72 [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
73 SBI_PMU_HW_CPU_CYCLES,
74 SBI_PMU_EVENT_TYPE_HW, 0}},
75 [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
76 SBI_PMU_HW_INSTRUCTIONS,
77 SBI_PMU_EVENT_TYPE_HW, 0}},
78 [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
79 SBI_PMU_HW_CACHE_REFERENCES,
80 SBI_PMU_EVENT_TYPE_HW, 0}},
81 [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
82 SBI_PMU_HW_CACHE_MISSES,
83 SBI_PMU_EVENT_TYPE_HW, 0}},
84 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
85 SBI_PMU_HW_BRANCH_INSTRUCTIONS,
86 SBI_PMU_EVENT_TYPE_HW, 0}},
87 [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
88 SBI_PMU_HW_BRANCH_MISSES,
89 SBI_PMU_EVENT_TYPE_HW, 0}},
90 [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
91 SBI_PMU_HW_BUS_CYCLES,
92 SBI_PMU_EVENT_TYPE_HW, 0}},
93 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
94 SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
95 SBI_PMU_EVENT_TYPE_HW, 0}},
96 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
97 SBI_PMU_HW_STALLED_CYCLES_BACKEND,
98 SBI_PMU_EVENT_TYPE_HW, 0}},
99 [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
100 SBI_PMU_HW_REF_CPU_CYCLES,
101 SBI_PMU_EVENT_TYPE_HW, 0}},
104 #define C(x) PERF_COUNT_HW_CACHE_##x
105 static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
106 [PERF_COUNT_HW_CACHE_OP_MAX]
107 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
110 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
111 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
112 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
113 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
116 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
117 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
118 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
119 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
122 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
123 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
124 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
125 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
130 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
131 C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
132 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
133 C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
136 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
137 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
138 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
139 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
142 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
143 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
144 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
145 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
150 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
151 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
152 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
153 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
156 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
157 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
158 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
159 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
162 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
163 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
164 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
165 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
170 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
171 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
172 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
173 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
176 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
177 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
178 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
179 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
182 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
183 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
184 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
185 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
190 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
191 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
192 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
193 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
196 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
197 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
198 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
199 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
202 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
203 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
204 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
205 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
210 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
211 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
212 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
213 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
216 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
217 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
218 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
219 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
222 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
223 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
224 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
225 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
230 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
231 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
232 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
233 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
236 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
237 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
238 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
239 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
242 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
243 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
244 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
245 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
250 static int pmu_sbi_ctr_get_width(int idx)
252 return pmu_ctr_list[idx].width;
255 static bool pmu_sbi_ctr_is_fw(int cidx)
257 union sbi_pmu_ctr_info *info;
259 info = &pmu_ctr_list[cidx];
263 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
266 static int pmu_sbi_ctr_get_idx(struct perf_event *event)
268 struct hw_perf_event *hwc = &event->hw;
269 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
270 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
274 uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
275 unsigned long cflags = 0;
277 if (event->attr.exclude_kernel)
278 cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
279 if (event->attr.exclude_user)
280 cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
282 /* retrieve the available counter index */
283 #if defined(CONFIG_32BIT)
284 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
285 cflags, hwc->event_base, hwc->config, hwc->config >> 32);
287 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
288 cflags, hwc->event_base, hwc->config, 0);
291 pr_debug("Not able to find a counter for event %lx config %llx\n",
292 hwc->event_base, hwc->config);
293 return sbi_err_map_linux_errno(ret.error);
297 if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
300 /* Additional sanity check for the counter id */
301 if (pmu_sbi_ctr_is_fw(idx)) {
302 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
305 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
312 static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
315 struct hw_perf_event *hwc = &event->hw;
316 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
317 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
320 if (pmu_sbi_ctr_is_fw(idx))
321 clear_bit(idx, cpuc->used_fw_ctrs);
323 clear_bit(idx, cpuc->used_hw_ctrs);
326 static int pmu_event_find_cache(u64 config)
328 unsigned int cache_type, cache_op, cache_result, ret;
330 cache_type = (config >> 0) & 0xff;
331 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
334 cache_op = (config >> 8) & 0xff;
335 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
338 cache_result = (config >> 16) & 0xff;
339 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
342 ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
347 static bool pmu_sbi_is_fw_event(struct perf_event *event)
349 u32 type = event->attr.type;
350 u64 config = event->attr.config;
352 if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
358 static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
360 u32 type = event->attr.type;
361 u64 config = event->attr.config;
367 case PERF_TYPE_HARDWARE:
368 if (config >= PERF_COUNT_HW_MAX)
370 ret = pmu_hw_event_map[event->attr.config].event_idx;
372 case PERF_TYPE_HW_CACHE:
373 ret = pmu_event_find_cache(config);
377 * As per SBI specification, the upper 16 bits must be unused for
378 * a raw event. Use the MSB (63b) to distinguish between hardware
379 * raw event and firmware events.
381 bSoftware = config >> 63;
382 raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
384 if (raw_config_val < SBI_PMU_FW_MAX)
385 ret = (raw_config_val & 0xFFFF) |
386 (SBI_PMU_EVENT_TYPE_FW << 16);
390 ret = RISCV_PMU_RAW_EVENT_IDX;
391 *econfig = raw_config_val;
402 static u64 pmu_sbi_ctr_read(struct perf_event *event)
404 struct hw_perf_event *hwc = &event->hw;
407 union sbi_pmu_ctr_info info;
410 if (pmu_sbi_is_fw_event(event)) {
411 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
412 hwc->idx, 0, 0, 0, 0, 0);
416 info = pmu_ctr_list[idx];
417 val = riscv_pmu_ctr_read_csr(info.csr);
418 if (IS_ENABLED(CONFIG_32BIT))
419 val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
425 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
428 struct hw_perf_event *hwc = &event->hw;
429 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
431 #if defined(CONFIG_32BIT)
432 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
433 1, flag, ival, ival >> 32, 0);
435 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
436 1, flag, ival, 0, 0);
438 if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
439 pr_err("Starting counter idx %d failed with error %d\n",
440 hwc->idx, sbi_err_map_linux_errno(ret.error));
443 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
446 struct hw_perf_event *hwc = &event->hw;
448 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
449 if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
450 flag != SBI_PMU_STOP_FLAG_RESET)
451 pr_err("Stopping counter idx %d failed with error %d\n",
452 hwc->idx, sbi_err_map_linux_errno(ret.error));
455 static int pmu_sbi_find_num_ctrs(void)
459 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
463 return sbi_err_map_linux_errno(ret.error);
466 static int pmu_sbi_get_ctrinfo(int nctr)
469 int i, num_hw_ctr = 0, num_fw_ctr = 0;
470 union sbi_pmu_ctr_info cinfo;
472 pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
476 for (i = 0; i < nctr; i++) {
477 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
479 /* The logical counter ids are not expected to be contiguous */
481 cinfo.value = ret.value;
482 if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
486 pmu_ctr_list[i].value = cinfo.value;
489 pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
494 static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
497 * No need to check the error because we are disabling all the counters
498 * which may include counters that are not enabled yet.
500 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
501 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
504 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
506 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
508 /* No need to check the error here as we can't do anything about the error */
509 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
510 cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
514 * This function starts all the used counters in two step approach.
515 * Any counter that did not overflow can be start in a single step
516 * while the overflowed counters need to be started with updated initialization
519 static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
520 unsigned long ctr_ovf_mask)
523 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
524 struct perf_event *event;
525 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
526 unsigned long ctr_start_mask = 0;
528 struct hw_perf_event *hwc;
531 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
533 /* Start all the counters that did not overflow in a single shot */
534 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
537 /* Reinitialize and start all the counter that overflowed */
538 while (ctr_ovf_mask) {
539 if (ctr_ovf_mask & 0x01) {
540 event = cpu_hw_evt->events[idx];
542 max_period = riscv_pmu_ctr_get_width_mask(event);
543 init_val = local64_read(&hwc->prev_count) & max_period;
544 #if defined(CONFIG_32BIT)
545 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
546 flag, init_val, init_val >> 32, 0);
548 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
549 flag, init_val, 0, 0);
551 perf_event_update_userpage(event);
553 ctr_ovf_mask = ctr_ovf_mask >> 1;
558 static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
560 struct perf_sample_data data;
561 struct pt_regs *regs;
562 struct hw_perf_event *hw_evt;
563 union sbi_pmu_ctr_info *info;
564 int lidx, hidx, fidx;
565 struct riscv_pmu *pmu;
566 struct perf_event *event;
567 unsigned long overflow;
568 unsigned long overflowed_ctrs = 0;
569 struct cpu_hw_events *cpu_hw_evt = dev;
571 if (WARN_ON_ONCE(!cpu_hw_evt))
574 /* Firmware counter don't support overflow yet */
575 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
576 event = cpu_hw_evt->events[fidx];
578 csr_clear(CSR_SIP, SIP_LCOFIP);
582 pmu = to_riscv_pmu(event->pmu);
583 pmu_sbi_stop_hw_ctrs(pmu);
585 /* Overflow status register should only be read after counter are stopped */
586 overflow = csr_read(CSR_SSCOUNTOVF);
589 * Overflow interrupt pending bit should only be cleared after stopping
590 * all the counters to avoid any race condition.
592 csr_clear(CSR_SIP, SIP_LCOFIP);
594 /* No overflow bit is set */
598 regs = get_irq_regs();
600 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
601 struct perf_event *event = cpu_hw_evt->events[lidx];
603 /* Skip if invalid event or user did not request a sampling */
604 if (!event || !is_sampling_event(event))
607 info = &pmu_ctr_list[lidx];
608 /* Do a sanity check */
609 if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
612 /* compute hardware counter index */
613 hidx = info->csr - CSR_CYCLE;
614 /* check if the corresponding bit is set in sscountovf */
615 if (!(overflow & (1 << hidx)))
619 * Keep a track of overflowed counters so that they can be started
620 * with updated initial value.
622 overflowed_ctrs |= 1 << lidx;
624 riscv_pmu_event_update(event);
625 perf_sample_data_init(&data, 0, hw_evt->last_period);
626 if (riscv_pmu_event_set_period(event)) {
628 * Unlike other ISAs, RISC-V don't have to disable interrupts
629 * to avoid throttling here. As per the specification, the
630 * interrupt remains disabled until the OF bit is set.
631 * Interrupts are enabled again only during the start.
632 * TODO: We will need to stop the guest counters once
633 * virtualization support is added.
635 perf_event_overflow(event, &data, regs);
638 pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
643 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
645 struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
646 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
648 /* Enable the access for TIME csr only from the user mode now */
649 csr_write(CSR_SCOUNTEREN, 0x2);
651 /* Stop all the counters so that they can be enabled from perf */
652 pmu_sbi_stop_all(pmu);
654 if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
655 cpu_hw_evt->irq = riscv_pmu_irq;
656 csr_clear(CSR_IP, BIT(RV_IRQ_PMU));
657 csr_set(CSR_IE, BIT(RV_IRQ_PMU));
658 enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
664 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
666 if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
667 disable_percpu_irq(riscv_pmu_irq);
668 csr_clear(CSR_IE, BIT(RV_IRQ_PMU));
671 /* Disable all counters access for user mode now */
672 csr_write(CSR_SCOUNTEREN, 0x0);
677 static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
680 struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
681 struct device_node *cpu, *child;
682 struct irq_domain *domain = NULL;
684 if (!riscv_isa_extension_available(NULL, SSCOFPMF))
687 for_each_of_cpu_node(cpu) {
688 child = of_get_compatible_child(cpu, "riscv,cpu-intc");
690 pr_err("Failed to find INTC node\n");
694 domain = irq_find_host(child);
702 pr_err("Failed to find INTC IRQ root domain\n");
706 riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU);
707 if (!riscv_pmu_irq) {
708 pr_err("Failed to map PMU interrupt for node\n");
712 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
714 pr_err("registering percpu irq failed [%d]\n", ret);
722 static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
725 struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
726 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
727 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
728 struct perf_event *event;
734 for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
735 event = cpuc->events[idx];
742 * Stop and update the counter
744 riscv_pmu_stop(event, PERF_EF_UPDATE);
747 case CPU_PM_ENTER_FAILED:
749 * Restore and enable the counter.
751 * Requires RCU read locking to be functional,
752 * wrap the call within RCU_NONIDLE to make the
753 * RCU subsystem aware this cpu is not idle from
754 * an RCU perspective for the riscv_pmu_start() call
757 RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
767 static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
769 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
770 return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
773 static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
775 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
778 static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
779 static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
782 static void riscv_pmu_destroy(struct riscv_pmu *pmu)
784 riscv_pm_pmu_unregister(pmu);
785 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
788 static int pmu_sbi_device_probe(struct platform_device *pdev)
790 struct riscv_pmu *pmu = NULL;
794 pr_info("SBI PMU extension is available\n");
795 pmu = riscv_pmu_alloc();
799 num_counters = pmu_sbi_find_num_ctrs();
800 if (num_counters < 0) {
801 pr_err("SBI PMU extension doesn't provide any counters\n");
805 /* cache all the information about counters now */
806 if (pmu_sbi_get_ctrinfo(num_counters))
809 ret = pmu_sbi_setup_irqs(pmu, pdev);
811 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
812 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
813 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
815 pmu->pmu.attr_groups = riscv_pmu_attr_groups;
816 pmu->num_counters = num_counters;
817 pmu->ctr_start = pmu_sbi_ctr_start;
818 pmu->ctr_stop = pmu_sbi_ctr_stop;
819 pmu->event_map = pmu_sbi_event_map;
820 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
821 pmu->ctr_get_width = pmu_sbi_ctr_get_width;
822 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
823 pmu->ctr_read = pmu_sbi_ctr_read;
825 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
829 ret = riscv_pm_pmu_register(pmu);
833 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
840 riscv_pmu_destroy(pmu);
847 static struct platform_driver pmu_sbi_driver = {
848 .probe = pmu_sbi_device_probe,
850 .name = RISCV_PMU_PDEV_NAME,
854 static int __init pmu_sbi_devinit(void)
857 struct platform_device *pdev;
859 if (sbi_spec_version < sbi_mk_version(0, 3) ||
860 sbi_probe_extension(SBI_EXT_PMU) <= 0) {
864 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
865 "perf/riscv/pmu:starting",
866 pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
868 pr_err("CPU hotplug notifier could not be registered: %d\n",
873 ret = platform_driver_register(&pmu_sbi_driver);
877 pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0);
879 platform_driver_unregister(&pmu_sbi_driver);
880 return PTR_ERR(pdev);
883 /* Notify legacy implementation that SBI pmu is available*/
884 riscv_pmu_legacy_skip_init();
888 device_initcall(pmu_sbi_devinit)