1 // SPDX-License-Identifier: GPL-2.0-only
3 * Perf support for the Statistical Profiling Extension, introduced as
6 * Copyright (C) 2016 ARM Limited
8 * Author: Will Deacon <will.deacon@arm.com>
11 #define PMUNAME "arm_spe"
12 #define DRVNAME PMUNAME "_pmu"
13 #define pr_fmt(fmt) DRVNAME ": " fmt
15 #include <linux/bitfield.h>
16 #include <linux/bitops.h>
17 #include <linux/bug.h>
18 #include <linux/capability.h>
19 #include <linux/cpuhotplug.h>
20 #include <linux/cpumask.h>
21 #include <linux/device.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/irq.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
29 #include <linux/perf_event.h>
30 #include <linux/perf/arm_pmu.h>
31 #include <linux/platform_device.h>
32 #include <linux/printk.h>
33 #include <linux/slab.h>
34 #include <linux/smp.h>
35 #include <linux/vmalloc.h>
37 #include <asm/barrier.h>
38 #include <asm/cpufeature.h>
40 #include <asm/sysreg.h>
43 * Cache if the event is allowed to trace Context information.
44 * This allows us to perform the check, i.e, perfmon_capable(),
45 * in the context of the event owner, once, during the event_init().
47 #define SPE_PMU_HW_FLAGS_CX 0x00001
49 static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
51 static void set_spe_event_has_cx(struct perf_event *event)
53 if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
54 event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
57 static bool get_spe_event_has_cx(struct perf_event *event)
59 return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
62 #define ARM_SPE_BUF_PAD_BYTE 0
64 struct arm_spe_pmu_buf {
72 struct platform_device *pdev;
73 cpumask_t supported_cpus;
74 struct hlist_node hotplug_node;
81 #define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
82 #define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
83 #define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
84 #define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
85 #define SPE_PMU_FEAT_LDS (1UL << 4)
86 #define SPE_PMU_FEAT_ERND (1UL << 5)
87 #define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
88 #define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
93 struct perf_output_handle __percpu *handle;
96 #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
98 /* Convert a free-running index from perf into an SPE buffer offset */
99 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
101 /* Keep track of our dynamic hotplug state */
102 static enum cpuhp_state arm_spe_pmu_online;
104 enum arm_spe_pmu_buf_fault_action {
105 SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
106 SPE_PMU_BUF_FAULT_ACT_FATAL,
107 SPE_PMU_BUF_FAULT_ACT_OK,
110 /* This sysfs gunk was really good fun to write. */
111 enum arm_spe_pmu_capabilities {
112 SPE_PMU_CAP_ARCH_INST = 0,
114 SPE_PMU_CAP_FEAT_MAX,
115 SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
116 SPE_PMU_CAP_MIN_IVAL,
119 static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
120 [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
121 [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
124 static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
126 if (cap < SPE_PMU_CAP_FEAT_MAX)
127 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
130 case SPE_PMU_CAP_CNT_SZ:
131 return spe_pmu->counter_sz;
132 case SPE_PMU_CAP_MIN_IVAL:
133 return spe_pmu->min_period;
135 WARN(1, "unknown cap %d\n", cap);
141 static ssize_t arm_spe_pmu_cap_show(struct device *dev,
142 struct device_attribute *attr,
145 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
146 struct dev_ext_attribute *ea =
147 container_of(attr, struct dev_ext_attribute, attr);
148 int cap = (long)ea->var;
150 return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
153 #define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
154 &((struct dev_ext_attribute[]) { \
155 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
158 #define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
159 SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
161 static struct attribute *arm_spe_pmu_cap_attr[] = {
162 SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
163 SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
164 SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
165 SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
169 static const struct attribute_group arm_spe_pmu_cap_group = {
171 .attrs = arm_spe_pmu_cap_attr,
175 #define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
176 #define ATTR_CFG_FLD_ts_enable_LO 0
177 #define ATTR_CFG_FLD_ts_enable_HI 0
178 #define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
179 #define ATTR_CFG_FLD_pa_enable_LO 1
180 #define ATTR_CFG_FLD_pa_enable_HI 1
181 #define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
182 #define ATTR_CFG_FLD_pct_enable_LO 2
183 #define ATTR_CFG_FLD_pct_enable_HI 2
184 #define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
185 #define ATTR_CFG_FLD_jitter_LO 16
186 #define ATTR_CFG_FLD_jitter_HI 16
187 #define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
188 #define ATTR_CFG_FLD_branch_filter_LO 32
189 #define ATTR_CFG_FLD_branch_filter_HI 32
190 #define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
191 #define ATTR_CFG_FLD_load_filter_LO 33
192 #define ATTR_CFG_FLD_load_filter_HI 33
193 #define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
194 #define ATTR_CFG_FLD_store_filter_LO 34
195 #define ATTR_CFG_FLD_store_filter_HI 34
197 #define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
198 #define ATTR_CFG_FLD_event_filter_LO 0
199 #define ATTR_CFG_FLD_event_filter_HI 63
201 #define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
202 #define ATTR_CFG_FLD_min_latency_LO 0
203 #define ATTR_CFG_FLD_min_latency_HI 11
205 #define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */
206 #define ATTR_CFG_FLD_inv_event_filter_LO 0
207 #define ATTR_CFG_FLD_inv_event_filter_HI 63
209 /* Why does everything I do descend into this? */
210 #define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
211 (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
213 #define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
214 __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
216 #define GEN_PMU_FORMAT_ATTR(name) \
217 PMU_FORMAT_ATTR(name, \
218 _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
219 ATTR_CFG_FLD_##name##_LO, \
220 ATTR_CFG_FLD_##name##_HI))
222 #define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
223 ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
225 #define ATTR_CFG_GET_FLD(attr, name) \
226 _ATTR_CFG_GET_FLD(attr, \
227 ATTR_CFG_FLD_##name##_CFG, \
228 ATTR_CFG_FLD_##name##_LO, \
229 ATTR_CFG_FLD_##name##_HI)
231 GEN_PMU_FORMAT_ATTR(ts_enable);
232 GEN_PMU_FORMAT_ATTR(pa_enable);
233 GEN_PMU_FORMAT_ATTR(pct_enable);
234 GEN_PMU_FORMAT_ATTR(jitter);
235 GEN_PMU_FORMAT_ATTR(branch_filter);
236 GEN_PMU_FORMAT_ATTR(load_filter);
237 GEN_PMU_FORMAT_ATTR(store_filter);
238 GEN_PMU_FORMAT_ATTR(event_filter);
239 GEN_PMU_FORMAT_ATTR(inv_event_filter);
240 GEN_PMU_FORMAT_ATTR(min_latency);
242 static struct attribute *arm_spe_pmu_formats_attr[] = {
243 &format_attr_ts_enable.attr,
244 &format_attr_pa_enable.attr,
245 &format_attr_pct_enable.attr,
246 &format_attr_jitter.attr,
247 &format_attr_branch_filter.attr,
248 &format_attr_load_filter.attr,
249 &format_attr_store_filter.attr,
250 &format_attr_event_filter.attr,
251 &format_attr_inv_event_filter.attr,
252 &format_attr_min_latency.attr,
256 static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
257 struct attribute *attr,
260 struct device *dev = kobj_to_dev(kobj);
261 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
263 if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
269 static const struct attribute_group arm_spe_pmu_format_group = {
271 .is_visible = arm_spe_pmu_format_attr_is_visible,
272 .attrs = arm_spe_pmu_formats_attr,
275 static ssize_t cpumask_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
278 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
280 return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
282 static DEVICE_ATTR_RO(cpumask);
284 static struct attribute *arm_spe_pmu_attrs[] = {
285 &dev_attr_cpumask.attr,
289 static const struct attribute_group arm_spe_pmu_group = {
290 .attrs = arm_spe_pmu_attrs,
293 static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
295 &arm_spe_pmu_cap_group,
296 &arm_spe_pmu_format_group,
300 /* Convert between user ABI and register values */
301 static u64 arm_spe_event_to_pmscr(struct perf_event *event)
303 struct perf_event_attr *attr = &event->attr;
306 reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
307 reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
308 reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
310 if (!attr->exclude_user)
311 reg |= PMSCR_EL1_E0SPE;
313 if (!attr->exclude_kernel)
314 reg |= PMSCR_EL1_E1SPE;
316 if (get_spe_event_has_cx(event))
322 static void arm_spe_event_sanitise_period(struct perf_event *event)
324 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
325 u64 period = event->hw.sample_period;
326 u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
328 if (period < spe_pmu->min_period)
329 period = spe_pmu->min_period;
330 else if (period > max_period)
333 period &= max_period;
335 event->hw.sample_period = period;
338 static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
340 struct perf_event_attr *attr = &event->attr;
343 arm_spe_event_sanitise_period(event);
345 reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
346 reg |= event->hw.sample_period;
351 static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
353 struct perf_event_attr *attr = &event->attr;
356 reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
357 reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
358 reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
361 reg |= PMSFCR_EL1_FT;
363 if (ATTR_CFG_GET_FLD(attr, event_filter))
364 reg |= PMSFCR_EL1_FE;
366 if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
367 reg |= PMSFCR_EL1_FnE;
369 if (ATTR_CFG_GET_FLD(attr, min_latency))
370 reg |= PMSFCR_EL1_FL;
375 static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
377 struct perf_event_attr *attr = &event->attr;
378 return ATTR_CFG_GET_FLD(attr, event_filter);
381 static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
383 struct perf_event_attr *attr = &event->attr;
384 return ATTR_CFG_GET_FLD(attr, inv_event_filter);
387 static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
389 struct perf_event_attr *attr = &event->attr;
390 return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
393 static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
395 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
396 u64 head = PERF_IDX2OFF(handle->head, buf);
398 memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
400 perf_aux_output_skip(handle, len);
403 static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
405 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
406 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
407 u64 head = PERF_IDX2OFF(handle->head, buf);
408 u64 limit = buf->nr_pages * PAGE_SIZE;
411 * The trace format isn't parseable in reverse, so clamp
412 * the limit to half of the buffer size in snapshot mode
413 * so that the worst case is half a buffer of records, as
414 * opposed to a single record.
416 if (head < limit >> 1)
420 * If we're within max_record_sz of the limit, we must
421 * pad, move the head index and recompute the limit.
423 if (limit - head < spe_pmu->max_record_sz) {
424 arm_spe_pmu_pad_buf(handle, limit - head);
425 handle->head = PERF_IDX2OFF(limit, buf);
426 limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
432 static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
434 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
435 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
436 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
438 u64 head, tail, wakeup;
441 * The head can be misaligned for two reasons:
443 * 1. The hardware left PMBPTR pointing to the first byte after
444 * a record when generating a buffer management event.
446 * 2. We used perf_aux_output_skip to consume handle->size bytes
447 * and CIRC_SPACE was used to compute the size, which always
448 * leaves one entry free.
450 * Deal with this by padding to the next alignment boundary and
451 * moving the head index. If we run out of buffer space, we'll
452 * reduce handle->size to zero and end up reporting truncation.
454 head = PERF_IDX2OFF(handle->head, buf);
455 if (!IS_ALIGNED(head, spe_pmu->align)) {
456 unsigned long delta = roundup(head, spe_pmu->align) - head;
458 delta = min(delta, handle->size);
459 arm_spe_pmu_pad_buf(handle, delta);
460 head = PERF_IDX2OFF(handle->head, buf);
463 /* If we've run out of free space, then nothing more to do */
467 /* Compute the tail and wakeup indices now that we've aligned head */
468 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
469 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
472 * Avoid clobbering unconsumed data. We know we have space, so
473 * if we see head == tail we know that the buffer is empty. If
474 * head > tail, then there's nothing to clobber prior to
478 limit = round_down(tail, PAGE_SIZE);
481 * Wakeup may be arbitrarily far into the future. If it's not in
482 * the current generation, either we'll wrap before hitting it,
483 * or it's in the past and has been handled already.
485 * If there's a wakeup before we wrap, arrange to be woken up by
486 * the page boundary following it. Keep the tail boundary if
489 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
490 limit = min(limit, round_up(wakeup, PAGE_SIZE));
495 arm_spe_pmu_pad_buf(handle, handle->size);
497 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
498 perf_aux_output_end(handle, 0);
502 static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
504 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
505 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
506 u64 limit = __arm_spe_pmu_next_off(handle);
507 u64 head = PERF_IDX2OFF(handle->head, buf);
510 * If the head has come too close to the end of the buffer,
511 * then pad to the end and recompute the limit.
513 if (limit && (limit - head < spe_pmu->max_record_sz)) {
514 arm_spe_pmu_pad_buf(handle, limit - head);
515 limit = __arm_spe_pmu_next_off(handle);
521 static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
522 struct perf_event *event)
525 struct arm_spe_pmu_buf *buf;
527 /* Start a new aux session */
528 buf = perf_aux_output_begin(handle, event);
530 event->hw.state |= PERF_HES_STOPPED;
532 * We still need to clear the limit pointer, since the
533 * profiler might only be disabled by virtue of a fault.
536 goto out_write_limit;
539 limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
540 : arm_spe_pmu_next_off(handle);
542 limit |= PMBLIMITR_EL1_E;
544 limit += (u64)buf->base;
545 base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
546 write_sysreg_s(base, SYS_PMBPTR_EL1);
549 write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
552 static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
554 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
557 offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
558 size = offset - PERF_IDX2OFF(handle->head, buf);
561 handle->head = offset;
563 perf_aux_output_end(handle, size);
566 static void arm_spe_pmu_disable_and_drain_local(void)
568 /* Disable profiling at EL0 and EL1 */
569 write_sysreg_s(0, SYS_PMSCR_EL1);
572 /* Drain any buffered data */
576 /* Disable the profiling buffer */
577 write_sysreg_s(0, SYS_PMBLIMITR_EL1);
582 static enum arm_spe_pmu_buf_fault_action
583 arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
587 enum arm_spe_pmu_buf_fault_action ret;
590 * Ensure new profiling data is visible to the CPU and any external
591 * aborts have been resolved.
596 /* Ensure hardware updates to PMBPTR_EL1 are visible */
599 /* Service required? */
600 pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
601 if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
602 return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
605 * If we've lost data, disable profiling and also set the PARTIAL
606 * flag to indicate that the last record is corrupted.
608 if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
609 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
610 PERF_AUX_FLAG_PARTIAL);
612 /* Report collisions to userspace so that it can up the period */
613 if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
614 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
616 /* We only expect buffer management events */
617 switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
618 case PMBSR_EL1_EC_BUF:
621 case PMBSR_EL1_EC_FAULT_S1:
622 case PMBSR_EL1_EC_FAULT_S2:
623 err_str = "Unexpected buffer fault";
626 err_str = "Unknown error code";
630 /* Buffer management event */
631 switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
632 case PMBSR_EL1_BUF_BSC_FULL:
633 ret = SPE_PMU_BUF_FAULT_ACT_OK;
636 err_str = "Unknown buffer status code";
640 pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
641 err_str, smp_processor_id(), pmbsr,
642 read_sysreg_s(SYS_PMBPTR_EL1),
643 read_sysreg_s(SYS_PMBLIMITR_EL1));
644 ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
647 arm_spe_perf_aux_output_end(handle);
651 static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
653 struct perf_output_handle *handle = dev;
654 struct perf_event *event = handle->event;
655 enum arm_spe_pmu_buf_fault_action act;
657 if (!perf_get_aux(handle))
660 act = arm_spe_pmu_buf_get_fault_act(handle);
661 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
665 * Ensure perf callbacks have completed, which may disable the
666 * profiling buffer in response to a TRUNCATION flag.
671 case SPE_PMU_BUF_FAULT_ACT_FATAL:
673 * If a fatal exception occurred then leaving the profiling
674 * buffer enabled is a recipe waiting to happen. Since
675 * fatal faults don't always imply truncation, make sure
676 * that the profiling buffer is disabled explicitly before
677 * clearing the syndrome register.
679 arm_spe_pmu_disable_and_drain_local();
681 case SPE_PMU_BUF_FAULT_ACT_OK:
683 * We handled the fault (the buffer was full), so resume
684 * profiling as long as we didn't detect truncation.
685 * PMBPTR might be misaligned, but we'll burn that bridge
688 if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
689 arm_spe_perf_aux_output_begin(handle, event);
693 case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
694 /* We've seen you before, but GCC has the memory of a sieve. */
698 /* The buffer pointers are now sane, so resume profiling. */
699 write_sysreg_s(0, SYS_PMBSR_EL1);
703 static u64 arm_spe_pmsevfr_res0(u16 pmsver)
706 case ID_AA64DFR0_EL1_PMSVer_IMP:
707 return PMSEVFR_EL1_RES0_IMP;
708 case ID_AA64DFR0_EL1_PMSVer_V1P1:
709 return PMSEVFR_EL1_RES0_V1P1;
710 case ID_AA64DFR0_EL1_PMSVer_V1P2:
711 /* Return the highest version we support in default */
713 return PMSEVFR_EL1_RES0_V1P2;
718 static int arm_spe_pmu_event_init(struct perf_event *event)
721 struct perf_event_attr *attr = &event->attr;
722 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
724 /* This is, of course, deeply driver-specific */
725 if (attr->type != event->pmu->type)
728 if (event->cpu >= 0 &&
729 !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
732 if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
735 if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
738 if (attr->exclude_idle)
742 * Feedback-directed frequency throttling doesn't work when we
743 * have a buffer of samples. We'd need to manually count the
744 * samples in the buffer when it fills up and adjust the event
745 * count to reflect that. Instead, just force the user to specify
751 reg = arm_spe_event_to_pmsfcr(event);
752 if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
753 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
756 if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
757 !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
760 if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
761 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
764 if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
765 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
768 set_spe_event_has_cx(event);
769 reg = arm_spe_event_to_pmscr(event);
770 if (!perfmon_capable() &&
771 (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
777 static void arm_spe_pmu_start(struct perf_event *event, int flags)
780 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
781 struct hw_perf_event *hwc = &event->hw;
782 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
785 arm_spe_perf_aux_output_begin(handle, event);
789 reg = arm_spe_event_to_pmsfcr(event);
790 write_sysreg_s(reg, SYS_PMSFCR_EL1);
792 reg = arm_spe_event_to_pmsevfr(event);
793 write_sysreg_s(reg, SYS_PMSEVFR_EL1);
795 if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
796 reg = arm_spe_event_to_pmsnevfr(event);
797 write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
800 reg = arm_spe_event_to_pmslatfr(event);
801 write_sysreg_s(reg, SYS_PMSLATFR_EL1);
803 if (flags & PERF_EF_RELOAD) {
804 reg = arm_spe_event_to_pmsirr(event);
805 write_sysreg_s(reg, SYS_PMSIRR_EL1);
807 reg = local64_read(&hwc->period_left);
808 write_sysreg_s(reg, SYS_PMSICR_EL1);
811 reg = arm_spe_event_to_pmscr(event);
813 write_sysreg_s(reg, SYS_PMSCR_EL1);
816 static void arm_spe_pmu_stop(struct perf_event *event, int flags)
818 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
819 struct hw_perf_event *hwc = &event->hw;
820 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
822 /* If we're already stopped, then nothing to do */
823 if (hwc->state & PERF_HES_STOPPED)
826 /* Stop all trace generation */
827 arm_spe_pmu_disable_and_drain_local();
829 if (flags & PERF_EF_UPDATE) {
831 * If there's a fault pending then ensure we contain it
832 * to this buffer, since we might be on the context-switch
835 if (perf_get_aux(handle)) {
836 enum arm_spe_pmu_buf_fault_action act;
838 act = arm_spe_pmu_buf_get_fault_act(handle);
839 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
840 arm_spe_perf_aux_output_end(handle);
842 write_sysreg_s(0, SYS_PMBSR_EL1);
846 * This may also contain ECOUNT, but nobody else should
847 * be looking at period_left, since we forbid frequency
850 local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
851 hwc->state |= PERF_HES_UPTODATE;
854 hwc->state |= PERF_HES_STOPPED;
857 static int arm_spe_pmu_add(struct perf_event *event, int flags)
860 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
861 struct hw_perf_event *hwc = &event->hw;
862 int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
864 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
867 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
869 if (flags & PERF_EF_START) {
870 arm_spe_pmu_start(event, PERF_EF_RELOAD);
871 if (hwc->state & PERF_HES_STOPPED)
878 static void arm_spe_pmu_del(struct perf_event *event, int flags)
880 arm_spe_pmu_stop(event, PERF_EF_UPDATE);
883 static void arm_spe_pmu_read(struct perf_event *event)
887 static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
888 int nr_pages, bool snapshot)
890 int i, cpu = event->cpu;
891 struct page **pglist;
892 struct arm_spe_pmu_buf *buf;
894 /* We need at least two pages for this to work. */
899 * We require an even number of pages for snapshot mode, so that
900 * we can effectively treat the buffer as consisting of two equal
901 * parts and give userspace a fighting chance of getting some
902 * useful data out of it.
904 if (snapshot && (nr_pages & 1))
908 cpu = raw_smp_processor_id();
910 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
914 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
918 for (i = 0; i < nr_pages; ++i)
919 pglist[i] = virt_to_page(pages[i]);
921 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
923 goto out_free_pglist;
925 buf->nr_pages = nr_pages;
926 buf->snapshot = snapshot;
938 static void arm_spe_pmu_free_aux(void *aux)
940 struct arm_spe_pmu_buf *buf = aux;
946 /* Initialisation and teardown functions */
947 static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
949 static atomic_t pmu_idx = ATOMIC_INIT(-1);
953 struct device *dev = &spe_pmu->pdev->dev;
955 spe_pmu->pmu = (struct pmu) {
956 .module = THIS_MODULE,
957 .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
958 .attr_groups = arm_spe_pmu_attr_groups,
960 * We hitch a ride on the software context here, so that
961 * we can support per-task profiling (which is not possible
962 * with the invalid context as it doesn't get sched callbacks).
963 * This requires that userspace either uses a dummy event for
964 * perf_event_open, since the aux buffer is not setup until
965 * a subsequent mmap, or creates the profiling event in a
966 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
967 * once the buffer has been created.
969 .task_ctx_nr = perf_sw_context,
970 .event_init = arm_spe_pmu_event_init,
971 .add = arm_spe_pmu_add,
972 .del = arm_spe_pmu_del,
973 .start = arm_spe_pmu_start,
974 .stop = arm_spe_pmu_stop,
975 .read = arm_spe_pmu_read,
976 .setup_aux = arm_spe_pmu_setup_aux,
977 .free_aux = arm_spe_pmu_free_aux,
980 idx = atomic_inc_return(&pmu_idx);
981 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
983 dev_err(dev, "failed to allocate name for pmu %d\n", idx);
987 return perf_pmu_register(&spe_pmu->pmu, name, -1);
990 static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
992 perf_pmu_unregister(&spe_pmu->pmu);
995 static void __arm_spe_pmu_dev_probe(void *info)
999 struct arm_spe_pmu *spe_pmu = info;
1000 struct device *dev = &spe_pmu->pdev->dev;
1002 fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
1003 ID_AA64DFR0_EL1_PMSVer_SHIFT);
1006 "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
1007 fld, smp_processor_id());
1010 spe_pmu->pmsver = (u16)fld;
1012 /* Read PMBIDR first to determine whether or not we have access */
1013 reg = read_sysreg_s(SYS_PMBIDR_EL1);
1014 if (FIELD_GET(PMBIDR_EL1_P, reg)) {
1016 "profiling buffer owned by higher exception level\n");
1020 /* Minimum alignment. If it's out-of-range, then fail the probe */
1021 fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
1022 spe_pmu->align = 1 << fld;
1023 if (spe_pmu->align > SZ_2K) {
1024 dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
1025 fld, smp_processor_id());
1029 /* It's now safe to read PMSIDR and figure out what we've got */
1030 reg = read_sysreg_s(SYS_PMSIDR_EL1);
1031 if (FIELD_GET(PMSIDR_EL1_FE, reg))
1032 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
1034 if (FIELD_GET(PMSIDR_EL1_FnE, reg))
1035 spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
1037 if (FIELD_GET(PMSIDR_EL1_FT, reg))
1038 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
1040 if (FIELD_GET(PMSIDR_EL1_FL, reg))
1041 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
1043 if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
1044 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
1046 if (FIELD_GET(PMSIDR_EL1_LDS, reg))
1047 spe_pmu->features |= SPE_PMU_FEAT_LDS;
1049 if (FIELD_GET(PMSIDR_EL1_ERND, reg))
1050 spe_pmu->features |= SPE_PMU_FEAT_ERND;
1052 /* This field has a spaced out encoding, so just use a look-up */
1053 fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
1055 case PMSIDR_EL1_INTERVAL_256:
1056 spe_pmu->min_period = 256;
1058 case PMSIDR_EL1_INTERVAL_512:
1059 spe_pmu->min_period = 512;
1061 case PMSIDR_EL1_INTERVAL_768:
1062 spe_pmu->min_period = 768;
1064 case PMSIDR_EL1_INTERVAL_1024:
1065 spe_pmu->min_period = 1024;
1067 case PMSIDR_EL1_INTERVAL_1536:
1068 spe_pmu->min_period = 1536;
1070 case PMSIDR_EL1_INTERVAL_2048:
1071 spe_pmu->min_period = 2048;
1073 case PMSIDR_EL1_INTERVAL_3072:
1074 spe_pmu->min_period = 3072;
1077 dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1080 case PMSIDR_EL1_INTERVAL_4096:
1081 spe_pmu->min_period = 4096;
1084 /* Maximum record size. If it's out-of-range, then fail the probe */
1085 fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
1086 spe_pmu->max_record_sz = 1 << fld;
1087 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1088 dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1089 fld, smp_processor_id());
1093 fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
1096 dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1099 case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
1100 spe_pmu->counter_sz = 12;
1102 case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
1103 spe_pmu->counter_sz = 16;
1107 "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1108 spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
1109 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1111 spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1114 static void __arm_spe_pmu_reset_local(void)
1117 * This is probably overkill, as we have no idea where we're
1118 * draining any buffered data to...
1120 arm_spe_pmu_disable_and_drain_local();
1122 /* Reset the buffer base pointer */
1123 write_sysreg_s(0, SYS_PMBPTR_EL1);
1126 /* Clear any pending management interrupts */
1127 write_sysreg_s(0, SYS_PMBSR_EL1);
1131 static void __arm_spe_pmu_setup_one(void *info)
1133 struct arm_spe_pmu *spe_pmu = info;
1135 __arm_spe_pmu_reset_local();
1136 enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1139 static void __arm_spe_pmu_stop_one(void *info)
1141 struct arm_spe_pmu *spe_pmu = info;
1143 disable_percpu_irq(spe_pmu->irq);
1144 __arm_spe_pmu_reset_local();
1147 static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1149 struct arm_spe_pmu *spe_pmu;
1151 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1152 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1155 __arm_spe_pmu_setup_one(spe_pmu);
1159 static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1161 struct arm_spe_pmu *spe_pmu;
1163 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1164 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1167 __arm_spe_pmu_stop_one(spe_pmu);
1171 static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1174 cpumask_t *mask = &spe_pmu->supported_cpus;
1176 /* Make sure we probe the hardware on a relevant CPU */
1177 ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
1178 if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1181 /* Request our PPIs (note that the IRQ is still disabled) */
1182 ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1188 * Register our hotplug notifier now so we don't miss any events.
1189 * This will enable the IRQ for any supported CPUs that are already
1192 ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1193 &spe_pmu->hotplug_node);
1195 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1200 static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1202 cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1203 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1206 /* Driver and device probing */
1207 static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1209 struct platform_device *pdev = spe_pmu->pdev;
1210 int irq = platform_get_irq(pdev, 0);
1215 if (!irq_is_percpu(irq)) {
1216 dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1220 if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1221 dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1229 static const struct of_device_id arm_spe_pmu_of_match[] = {
1230 { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1233 MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1235 static const struct platform_device_id arm_spe_match[] = {
1236 { ARMV8_SPE_PDEV_NAME, 0},
1239 MODULE_DEVICE_TABLE(platform, arm_spe_match);
1241 static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1244 struct arm_spe_pmu *spe_pmu;
1245 struct device *dev = &pdev->dev;
1248 * If kernelspace is unmapped when running at EL0, then the SPE
1249 * buffer will fault and prematurely terminate the AUX session.
1251 if (arm64_kernel_unmapped_at_el0()) {
1252 dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1256 spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1260 spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1261 if (!spe_pmu->handle)
1264 spe_pmu->pdev = pdev;
1265 platform_set_drvdata(pdev, spe_pmu);
1267 ret = arm_spe_pmu_irq_probe(spe_pmu);
1269 goto out_free_handle;
1271 ret = arm_spe_pmu_dev_init(spe_pmu);
1273 goto out_free_handle;
1275 ret = arm_spe_pmu_perf_init(spe_pmu);
1277 goto out_teardown_dev;
1282 arm_spe_pmu_dev_teardown(spe_pmu);
1284 free_percpu(spe_pmu->handle);
1288 static int arm_spe_pmu_device_remove(struct platform_device *pdev)
1290 struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1292 arm_spe_pmu_perf_destroy(spe_pmu);
1293 arm_spe_pmu_dev_teardown(spe_pmu);
1294 free_percpu(spe_pmu->handle);
1298 static struct platform_driver arm_spe_pmu_driver = {
1299 .id_table = arm_spe_match,
1302 .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
1303 .suppress_bind_attrs = true,
1305 .probe = arm_spe_pmu_device_probe,
1306 .remove = arm_spe_pmu_device_remove,
1309 static int __init arm_spe_pmu_init(void)
1313 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1314 arm_spe_pmu_cpu_startup,
1315 arm_spe_pmu_cpu_teardown);
1318 arm_spe_pmu_online = ret;
1320 ret = platform_driver_register(&arm_spe_pmu_driver);
1322 cpuhp_remove_multi_state(arm_spe_pmu_online);
1327 static void __exit arm_spe_pmu_exit(void)
1329 platform_driver_unregister(&arm_spe_pmu_driver);
1330 cpuhp_remove_multi_state(arm_spe_pmu_online);
1333 module_init(arm_spe_pmu_init);
1334 module_exit(arm_spe_pmu_exit);
1336 MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1337 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1338 MODULE_LICENSE("GPL v2");