1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
7 * This implementation is based on old RISC-V perf and ARM perf event code
8 * which are in turn based on sparc64 and x86 code.
11 #include <linux/cpumask.h>
12 #include <linux/irq.h>
13 #include <linux/irqdesc.h>
14 #include <linux/perf/riscv_pmu.h>
15 #include <linux/printk.h>
16 #include <linux/smp.h>
18 static unsigned long csr_read_num(int csr_num)
20 #define switchcase_csr_read(__csr_num, __val) {\
22 __val = csr_read(__csr_num); \
24 #define switchcase_csr_read_2(__csr_num, __val) {\
25 switchcase_csr_read(__csr_num + 0, __val) \
26 switchcase_csr_read(__csr_num + 1, __val)}
27 #define switchcase_csr_read_4(__csr_num, __val) {\
28 switchcase_csr_read_2(__csr_num + 0, __val) \
29 switchcase_csr_read_2(__csr_num + 2, __val)}
30 #define switchcase_csr_read_8(__csr_num, __val) {\
31 switchcase_csr_read_4(__csr_num + 0, __val) \
32 switchcase_csr_read_4(__csr_num + 4, __val)}
33 #define switchcase_csr_read_16(__csr_num, __val) {\
34 switchcase_csr_read_8(__csr_num + 0, __val) \
35 switchcase_csr_read_8(__csr_num + 8, __val)}
36 #define switchcase_csr_read_32(__csr_num, __val) {\
37 switchcase_csr_read_16(__csr_num + 0, __val) \
38 switchcase_csr_read_16(__csr_num + 16, __val)}
40 unsigned long ret = 0;
43 switchcase_csr_read_32(CSR_CYCLE, ret)
44 switchcase_csr_read_32(CSR_CYCLEH, ret)
50 #undef switchcase_csr_read_32
51 #undef switchcase_csr_read_16
52 #undef switchcase_csr_read_8
53 #undef switchcase_csr_read_4
54 #undef switchcase_csr_read_2
55 #undef switchcase_csr_read
59 * Read the CSR of a corresponding counter.
61 unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
63 if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
64 (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
65 pr_err("Invalid performance counter csr %lx\n", csr);
69 return csr_read_num(csr);
72 u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
75 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
76 struct hw_perf_event *hwc = &event->hw;
78 if (!rvpmu->ctr_get_width)
80 * If the pmu driver doesn't support counter width, set it to default
81 * maximum allowed by the specification.
86 /* Handle init case where idx is not initialized yet */
87 cwidth = rvpmu->ctr_get_width(0);
89 cwidth = rvpmu->ctr_get_width(hwc->idx);
92 return GENMASK_ULL(cwidth, 0);
95 u64 riscv_pmu_event_update(struct perf_event *event)
97 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
98 struct hw_perf_event *hwc = &event->hw;
99 u64 prev_raw_count, new_raw_count;
103 if (!rvpmu->ctr_read)
106 cmask = riscv_pmu_ctr_get_width_mask(event);
109 prev_raw_count = local64_read(&hwc->prev_count);
110 new_raw_count = rvpmu->ctr_read(event);
111 oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
113 } while (oldval != prev_raw_count);
115 delta = (new_raw_count - prev_raw_count) & cmask;
116 local64_add(delta, &event->count);
117 local64_sub(delta, &hwc->period_left);
122 static void riscv_pmu_stop(struct perf_event *event, int flags)
124 struct hw_perf_event *hwc = &event->hw;
125 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
127 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
129 if (!(hwc->state & PERF_HES_STOPPED)) {
130 if (rvpmu->ctr_stop) {
131 rvpmu->ctr_stop(event, 0);
132 hwc->state |= PERF_HES_STOPPED;
134 riscv_pmu_event_update(event);
135 hwc->state |= PERF_HES_UPTODATE;
139 int riscv_pmu_event_set_period(struct perf_event *event)
141 struct hw_perf_event *hwc = &event->hw;
142 s64 left = local64_read(&hwc->period_left);
143 s64 period = hwc->sample_period;
145 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
147 if (unlikely(left <= -period)) {
149 local64_set(&hwc->period_left, left);
150 hwc->last_period = period;
154 if (unlikely(left <= 0)) {
156 local64_set(&hwc->period_left, left);
157 hwc->last_period = period;
162 * Limit the maximum period to prevent the counter value
163 * from overtaking the one we are about to program. In
164 * effect we are reducing max_period to account for
165 * interrupt latency (and we are being very conservative).
167 if (left > (max_period >> 1))
168 left = (max_period >> 1);
170 local64_set(&hwc->prev_count, (u64)-left);
171 perf_event_update_userpage(event);
176 static void riscv_pmu_start(struct perf_event *event, int flags)
178 struct hw_perf_event *hwc = &event->hw;
179 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
180 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
183 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
186 if (flags & PERF_EF_RELOAD)
187 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
190 riscv_pmu_event_set_period(event);
191 init_val = local64_read(&hwc->prev_count) & max_period;
192 rvpmu->ctr_start(event, init_val);
193 perf_event_update_userpage(event);
196 static int riscv_pmu_add(struct perf_event *event, int flags)
198 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
199 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
200 struct hw_perf_event *hwc = &event->hw;
203 idx = rvpmu->ctr_get_idx(event);
208 cpuc->events[idx] = event;
210 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
211 if (flags & PERF_EF_START)
212 riscv_pmu_start(event, PERF_EF_RELOAD);
214 /* Propagate our changes to the userspace mapping. */
215 perf_event_update_userpage(event);
220 static void riscv_pmu_del(struct perf_event *event, int flags)
222 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
223 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
224 struct hw_perf_event *hwc = &event->hw;
226 riscv_pmu_stop(event, PERF_EF_UPDATE);
227 cpuc->events[hwc->idx] = NULL;
228 /* The firmware need to reset the counter mapping */
230 rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
232 if (rvpmu->ctr_clear_idx)
233 rvpmu->ctr_clear_idx(event);
234 perf_event_update_userpage(event);
238 static void riscv_pmu_read(struct perf_event *event)
240 riscv_pmu_event_update(event);
243 static int riscv_pmu_event_init(struct perf_event *event)
245 struct hw_perf_event *hwc = &event->hw;
246 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
248 u64 event_config = 0;
252 mapped_event = rvpmu->event_map(event, &event_config);
253 if (mapped_event < 0) {
254 pr_debug("event %x:%llx not supported\n", event->attr.type,
260 * idx is set to -1 because the index of a general event should not be
261 * decided until binding to some counter in pmu->add().
262 * config will contain the information about counter CSR
263 * the idx will contain the counter index
265 hwc->config = event_config;
267 hwc->event_base = mapped_event;
269 if (!is_sampling_event(event)) {
271 * For non-sampling runs, limit the sample_period to half
272 * of the counter width. That way, the new counter value
273 * is far less likely to overtake the previous one unless
274 * you have some serious IRQ latency issues.
276 cmask = riscv_pmu_ctr_get_width_mask(event);
277 hwc->sample_period = cmask >> 1;
278 hwc->last_period = hwc->sample_period;
279 local64_set(&hwc->period_left, hwc->sample_period);
285 struct riscv_pmu *riscv_pmu_alloc(void)
287 struct riscv_pmu *pmu;
289 struct cpu_hw_events *cpuc;
291 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
295 pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
296 if (!pmu->hw_events) {
297 pr_info("failed to allocate per-cpu PMU data.\n");
301 for_each_possible_cpu(cpuid) {
302 cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
304 for (i = 0; i < RISCV_MAX_COUNTERS; i++)
305 cpuc->events[i] = NULL;
307 pmu->pmu = (struct pmu) {
308 .event_init = riscv_pmu_event_init,
309 .add = riscv_pmu_add,
310 .del = riscv_pmu_del,
311 .start = riscv_pmu_start,
312 .stop = riscv_pmu_stop,
313 .read = riscv_pmu_read,