4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw perfevents: " fmt
23 #include <linux/bitmap.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/export.h>
27 #include <linux/perf_event.h>
28 #include <linux/platform_device.h>
29 #include <linux/spinlock.h>
30 #include <linux/uaccess.h>
32 #include <asm/cputype.h>
34 #include <asm/irq_regs.h>
36 #include <asm/stacktrace.h>
39 * ARMv8 supports a maximum of 32 events.
40 * The cycle counter is included in this total.
42 #define ARMPMU_MAX_HWEVENTS 32
44 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
45 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
46 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
48 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
50 /* Set at runtime when we know what CPU type we are. */
51 static struct arm_pmu *cpu_pmu;
54 armpmu_get_max_events(void)
59 max_events = cpu_pmu->num_events;
63 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
65 int perf_num_counters(void)
67 return armpmu_get_max_events();
69 EXPORT_SYMBOL_GPL(perf_num_counters);
71 #define HW_OP_UNSUPPORTED 0xFFFF
74 PERF_COUNT_HW_CACHE_##_x
76 #define CACHE_OP_UNSUPPORTED 0xFFFF
79 armpmu_map_cache_event(const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
85 unsigned int cache_type, cache_op, cache_result, ret;
87 cache_type = (config >> 0) & 0xff;
88 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
91 cache_op = (config >> 8) & 0xff;
92 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
95 cache_result = (config >> 16) & 0xff;
96 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
99 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
101 if (ret == CACHE_OP_UNSUPPORTED)
108 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
110 int mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
115 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
117 return (int)(config & raw_event_mask);
120 static int map_cpu_event(struct perf_event *event,
121 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
122 const unsigned (*cache_map)
123 [PERF_COUNT_HW_CACHE_MAX]
124 [PERF_COUNT_HW_CACHE_OP_MAX]
125 [PERF_COUNT_HW_CACHE_RESULT_MAX],
128 u64 config = event->attr.config;
130 switch (event->attr.type) {
131 case PERF_TYPE_HARDWARE:
132 return armpmu_map_event(event_map, config);
133 case PERF_TYPE_HW_CACHE:
134 return armpmu_map_cache_event(cache_map, config);
136 return armpmu_map_raw_event(raw_event_mask, config);
143 armpmu_event_set_period(struct perf_event *event,
144 struct hw_perf_event *hwc,
147 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
148 s64 left = local64_read(&hwc->period_left);
149 s64 period = hwc->sample_period;
152 if (unlikely(left <= -period)) {
154 local64_set(&hwc->period_left, left);
155 hwc->last_period = period;
159 if (unlikely(left <= 0)) {
161 local64_set(&hwc->period_left, left);
162 hwc->last_period = period;
166 if (left > (s64)armpmu->max_period)
167 left = armpmu->max_period;
169 local64_set(&hwc->prev_count, (u64)-left);
171 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
173 perf_event_update_userpage(event);
179 armpmu_event_update(struct perf_event *event,
180 struct hw_perf_event *hwc,
183 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
184 u64 delta, prev_raw_count, new_raw_count;
187 prev_raw_count = local64_read(&hwc->prev_count);
188 new_raw_count = armpmu->read_counter(idx);
190 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
191 new_raw_count) != prev_raw_count)
194 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
196 local64_add(delta, &event->count);
197 local64_sub(delta, &hwc->period_left);
199 return new_raw_count;
203 armpmu_read(struct perf_event *event)
205 struct hw_perf_event *hwc = &event->hw;
207 /* Don't read disabled counters! */
211 armpmu_event_update(event, hwc, hwc->idx);
215 armpmu_stop(struct perf_event *event, int flags)
217 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
218 struct hw_perf_event *hwc = &event->hw;
221 * ARM pmu always has to update the counter, so ignore
222 * PERF_EF_UPDATE, see comments in armpmu_start().
224 if (!(hwc->state & PERF_HES_STOPPED)) {
225 armpmu->disable(hwc, hwc->idx);
226 barrier(); /* why? */
227 armpmu_event_update(event, hwc, hwc->idx);
228 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
233 armpmu_start(struct perf_event *event, int flags)
235 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
236 struct hw_perf_event *hwc = &event->hw;
239 * ARM pmu always has to reprogram the period, so ignore
240 * PERF_EF_RELOAD, see the comment below.
242 if (flags & PERF_EF_RELOAD)
243 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
247 * Set the period again. Some counters can't be stopped, so when we
248 * were stopped we simply disabled the IRQ source and the counter
249 * may have been left counting. If we don't do this step then we may
250 * get an interrupt too soon or *way* too late if the overflow has
251 * happened since disabling.
253 armpmu_event_set_period(event, hwc, hwc->idx);
254 armpmu->enable(hwc, hwc->idx);
258 armpmu_del(struct perf_event *event, int flags)
260 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
261 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
262 struct hw_perf_event *hwc = &event->hw;
267 armpmu_stop(event, PERF_EF_UPDATE);
268 hw_events->events[idx] = NULL;
269 clear_bit(idx, hw_events->used_mask);
271 perf_event_update_userpage(event);
275 armpmu_add(struct perf_event *event, int flags)
277 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
278 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
279 struct hw_perf_event *hwc = &event->hw;
283 perf_pmu_disable(event->pmu);
285 /* If we don't have a space for the counter then finish early. */
286 idx = armpmu->get_event_idx(hw_events, hwc);
293 * If there is an event in the counter we are going to use then make
294 * sure it is disabled.
297 armpmu->disable(hwc, idx);
298 hw_events->events[idx] = event;
300 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
301 if (flags & PERF_EF_START)
302 armpmu_start(event, PERF_EF_RELOAD);
304 /* Propagate our changes to the userspace mapping. */
305 perf_event_update_userpage(event);
308 perf_pmu_enable(event->pmu);
313 validate_event(struct pmu_hw_events *hw_events,
314 struct perf_event *event)
316 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
317 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu;
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
323 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
327 validate_group(struct perf_event *event)
329 struct perf_event *sibling, *leader = event->group_leader;
330 struct pmu_hw_events fake_pmu;
331 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
334 * Initialise the fake PMU. We only need to populate the
335 * used_mask for the purposes of validation.
337 memset(fake_used_mask, 0, sizeof(fake_used_mask));
338 fake_pmu.used_mask = fake_used_mask;
340 if (!validate_event(&fake_pmu, leader))
343 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
344 if (!validate_event(&fake_pmu, sibling))
348 if (!validate_event(&fake_pmu, event))
355 armpmu_release_hardware(struct arm_pmu *armpmu)
358 struct platform_device *pmu_device = armpmu->plat_device;
360 irqs = min(pmu_device->num_resources, num_possible_cpus());
362 for (i = 0; i < irqs; ++i) {
363 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
365 irq = platform_get_irq(pmu_device, i);
367 free_irq(irq, armpmu);
372 armpmu_reserve_hardware(struct arm_pmu *armpmu)
374 int i, err, irq, irqs;
375 struct platform_device *pmu_device = armpmu->plat_device;
378 pr_err("no PMU device registered\n");
382 irqs = min(pmu_device->num_resources, num_possible_cpus());
384 pr_err("no irqs for PMUs defined\n");
388 for (i = 0; i < irqs; ++i) {
390 irq = platform_get_irq(pmu_device, i);
395 * If we have a single PMU interrupt that we can't shift,
396 * assume that we're running on a uniprocessor machine and
397 * continue. Otherwise, continue without this interrupt.
399 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
400 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
405 err = request_irq(irq, armpmu->handle_irq,
409 pr_err("unable to request IRQ%d for ARM PMU counters\n",
411 armpmu_release_hardware(armpmu);
415 cpumask_set_cpu(i, &armpmu->active_irqs);
422 hw_perf_event_destroy(struct perf_event *event)
424 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
425 atomic_t *active_events = &armpmu->active_events;
426 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
428 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
429 armpmu_release_hardware(armpmu);
430 mutex_unlock(pmu_reserve_mutex);
435 event_requires_mode_exclusion(struct perf_event_attr *attr)
437 return attr->exclude_idle || attr->exclude_user ||
438 attr->exclude_kernel || attr->exclude_hv;
442 __hw_perf_event_init(struct perf_event *event)
444 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
445 struct hw_perf_event *hwc = &event->hw;
448 mapping = armpmu->map_event(event);
451 pr_debug("event %x:%llx not supported\n", event->attr.type,
457 * We don't assign an index until we actually place the event onto
458 * hardware. Use -1 to signify that we haven't decided where to put it
459 * yet. For SMP systems, each core has it's own PMU so we can't do any
460 * clever allocation or constraints checking at this point.
463 hwc->config_base = 0;
468 * Check whether we need to exclude the counter from certain modes.
470 if ((!armpmu->set_event_filter ||
471 armpmu->set_event_filter(hwc, &event->attr)) &&
472 event_requires_mode_exclusion(&event->attr)) {
473 pr_debug("ARM performance counters do not support mode exclusion\n");
478 * Store the event encoding into the config_base field.
480 hwc->config_base |= (unsigned long)mapping;
482 if (!hwc->sample_period) {
484 * For non-sampling runs, limit the sample_period to half
485 * of the counter width. That way, the new counter value
486 * is far less likely to overtake the previous one unless
487 * you have some serious IRQ latency issues.
489 hwc->sample_period = armpmu->max_period >> 1;
490 hwc->last_period = hwc->sample_period;
491 local64_set(&hwc->period_left, hwc->sample_period);
495 if (event->group_leader != event) {
496 err = validate_group(event);
504 static int armpmu_event_init(struct perf_event *event)
506 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
508 atomic_t *active_events = &armpmu->active_events;
510 if (armpmu->map_event(event) == -ENOENT)
513 event->destroy = hw_perf_event_destroy;
515 if (!atomic_inc_not_zero(active_events)) {
516 mutex_lock(&armpmu->reserve_mutex);
517 if (atomic_read(active_events) == 0)
518 err = armpmu_reserve_hardware(armpmu);
521 atomic_inc(active_events);
522 mutex_unlock(&armpmu->reserve_mutex);
528 err = __hw_perf_event_init(event);
530 hw_perf_event_destroy(event);
535 static void armpmu_enable(struct pmu *pmu)
537 struct arm_pmu *armpmu = to_arm_pmu(pmu);
538 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
539 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
545 static void armpmu_disable(struct pmu *pmu)
547 struct arm_pmu *armpmu = to_arm_pmu(pmu);
551 static void __init armpmu_init(struct arm_pmu *armpmu)
553 atomic_set(&armpmu->active_events, 0);
554 mutex_init(&armpmu->reserve_mutex);
556 armpmu->pmu = (struct pmu) {
557 .pmu_enable = armpmu_enable,
558 .pmu_disable = armpmu_disable,
559 .event_init = armpmu_event_init,
562 .start = armpmu_start,
568 int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
571 return perf_pmu_register(&armpmu->pmu, name, type);
575 * ARMv8 PMUv3 Performance Events handling code.
576 * Common event types.
578 enum armv8_pmuv3_perf_types {
579 /* Required events. */
580 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
581 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
582 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
583 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
584 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
585 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
587 /* At least one of the following is required. */
588 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
589 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
591 /* Common architectural events. */
592 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
593 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
594 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
595 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
596 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
597 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
598 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
599 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
600 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
601 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
603 /* Common microarchitectural events. */
604 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
605 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
606 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
607 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
608 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
609 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
610 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
611 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
612 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
613 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
614 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
615 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
618 /* PMUv3 HW events mapping. */
619 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
620 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
621 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
622 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
623 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
624 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
625 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
626 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
627 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
628 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
631 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
632 [PERF_COUNT_HW_CACHE_OP_MAX]
633 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
636 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
637 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
640 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
641 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
644 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
645 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
650 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
651 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
654 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
655 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
659 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
664 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
665 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
668 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
669 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
672 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
678 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
679 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
682 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
683 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
686 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
687 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
692 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
693 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
696 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
697 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
700 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
701 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
706 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
707 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
710 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
711 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
714 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
715 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
720 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
721 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
724 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
725 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
728 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
729 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
735 * Perf Events' indices
737 #define ARMV8_IDX_CYCLE_COUNTER 0
738 #define ARMV8_IDX_COUNTER0 1
739 #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
741 #define ARMV8_MAX_COUNTERS 32
742 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
745 * ARMv8 low level PMU access
749 * Perf Event to low level counters mapping
751 #define ARMV8_IDX_TO_COUNTER(x) \
752 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
755 * Per-CPU PMCR: config reg
757 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
758 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
759 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
760 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
761 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
762 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
763 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
764 #define ARMV8_PMCR_N_MASK 0x1f
765 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
768 * PMOVSR: counters overflow flag status reg
770 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
771 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
774 * PMXEVTYPER: Event selection reg
776 #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
777 #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
780 * Event filters for PMUv3
782 #define ARMV8_EXCLUDE_EL1 (1 << 31)
783 #define ARMV8_EXCLUDE_EL0 (1 << 30)
784 #define ARMV8_INCLUDE_EL2 (1 << 27)
786 static inline u32 armv8pmu_pmcr_read(void)
789 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
793 static inline void armv8pmu_pmcr_write(u32 val)
795 val &= ARMV8_PMCR_MASK;
797 asm volatile("msr pmcr_el0, %0" :: "r" (val));
800 static inline int armv8pmu_has_overflowed(u32 pmovsr)
802 return pmovsr & ARMV8_OVERFLOWED_MASK;
805 static inline int armv8pmu_counter_valid(int idx)
807 return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
810 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
815 if (!armv8pmu_counter_valid(idx)) {
816 pr_err("CPU%u checking wrong counter %d overflow status\n",
817 smp_processor_id(), idx);
819 counter = ARMV8_IDX_TO_COUNTER(idx);
820 ret = pmnc & BIT(counter);
826 static inline int armv8pmu_select_counter(int idx)
830 if (!armv8pmu_counter_valid(idx)) {
831 pr_err("CPU%u selecting wrong PMNC counter %d\n",
832 smp_processor_id(), idx);
836 counter = ARMV8_IDX_TO_COUNTER(idx);
837 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
843 static inline u32 armv8pmu_read_counter(int idx)
847 if (!armv8pmu_counter_valid(idx))
848 pr_err("CPU%u reading wrong counter %d\n",
849 smp_processor_id(), idx);
850 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
851 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
852 else if (armv8pmu_select_counter(idx) == idx)
853 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
858 static inline void armv8pmu_write_counter(int idx, u32 value)
860 if (!armv8pmu_counter_valid(idx))
861 pr_err("CPU%u writing wrong counter %d\n",
862 smp_processor_id(), idx);
863 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
864 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
865 else if (armv8pmu_select_counter(idx) == idx)
866 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
869 static inline void armv8pmu_write_evtype(int idx, u32 val)
871 if (armv8pmu_select_counter(idx) == idx) {
872 val &= ARMV8_EVTYPE_MASK;
873 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
877 static inline int armv8pmu_enable_counter(int idx)
881 if (!armv8pmu_counter_valid(idx)) {
882 pr_err("CPU%u enabling wrong PMNC counter %d\n",
883 smp_processor_id(), idx);
887 counter = ARMV8_IDX_TO_COUNTER(idx);
888 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
892 static inline int armv8pmu_disable_counter(int idx)
896 if (!armv8pmu_counter_valid(idx)) {
897 pr_err("CPU%u disabling wrong PMNC counter %d\n",
898 smp_processor_id(), idx);
902 counter = ARMV8_IDX_TO_COUNTER(idx);
903 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
907 static inline int armv8pmu_enable_intens(int idx)
911 if (!armv8pmu_counter_valid(idx)) {
912 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
913 smp_processor_id(), idx);
917 counter = ARMV8_IDX_TO_COUNTER(idx);
918 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
922 static inline int armv8pmu_disable_intens(int idx)
926 if (!armv8pmu_counter_valid(idx)) {
927 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
928 smp_processor_id(), idx);
932 counter = ARMV8_IDX_TO_COUNTER(idx);
933 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
935 /* Clear the overflow flag in case an interrupt is pending. */
936 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
941 static inline u32 armv8pmu_getreset_flags(void)
946 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
948 /* Write to clear flags */
949 value &= ARMV8_OVSR_MASK;
950 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
955 static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
958 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
961 * Enable counter and interrupt, and set the counter to count
962 * the event that we're interested in.
964 raw_spin_lock_irqsave(&events->pmu_lock, flags);
969 armv8pmu_disable_counter(idx);
972 * Set event (if destined for PMNx counters).
974 armv8pmu_write_evtype(idx, hwc->config_base);
977 * Enable interrupt for this counter
979 armv8pmu_enable_intens(idx);
984 armv8pmu_enable_counter(idx);
986 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
989 static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
992 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
995 * Disable counter and interrupt
997 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1002 armv8pmu_disable_counter(idx);
1005 * Disable interrupt for this counter
1007 armv8pmu_disable_intens(idx);
1009 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1012 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
1015 struct perf_sample_data data;
1016 struct pmu_hw_events *cpuc;
1017 struct pt_regs *regs;
1021 * Get and reset the IRQ flags
1023 pmovsr = armv8pmu_getreset_flags();
1026 * Did an overflow occur?
1028 if (!armv8pmu_has_overflowed(pmovsr))
1032 * Handle the counter(s) overflow(s)
1034 regs = get_irq_regs();
1036 cpuc = &__get_cpu_var(cpu_hw_events);
1037 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1038 struct perf_event *event = cpuc->events[idx];
1039 struct hw_perf_event *hwc;
1041 /* Ignore if we don't have an event. */
1046 * We have a single interrupt for all counters. Check that
1047 * each counter has overflowed before we process it.
1049 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
1053 armpmu_event_update(event, hwc, idx);
1054 perf_sample_data_init(&data, 0, hwc->last_period);
1055 if (!armpmu_event_set_period(event, hwc, idx))
1058 if (perf_event_overflow(event, &data, regs))
1059 cpu_pmu->disable(hwc, idx);
1063 * Handle the pending perf events.
1065 * Note: this call *must* be run with interrupts disabled. For
1066 * platforms that can have the PMU interrupts raised as an NMI, this
1074 static void armv8pmu_start(void)
1076 unsigned long flags;
1077 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1079 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1080 /* Enable all counters */
1081 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
1082 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1085 static void armv8pmu_stop(void)
1087 unsigned long flags;
1088 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1090 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1091 /* Disable all counters */
1092 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
1093 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1096 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
1097 struct hw_perf_event *event)
1100 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
1102 /* Always place a cycle counter into the cycle counter. */
1103 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
1104 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
1107 return ARMV8_IDX_CYCLE_COUNTER;
1111 * For anything other than a cycle counter, try and use
1112 * the events counters
1114 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1115 if (!test_and_set_bit(idx, cpuc->used_mask))
1119 /* The counters are all in use. */
1124 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1126 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
1127 struct perf_event_attr *attr)
1129 unsigned long config_base = 0;
1131 if (attr->exclude_idle)
1133 if (attr->exclude_user)
1134 config_base |= ARMV8_EXCLUDE_EL0;
1135 if (attr->exclude_kernel)
1136 config_base |= ARMV8_EXCLUDE_EL1;
1137 if (!attr->exclude_hv)
1138 config_base |= ARMV8_INCLUDE_EL2;
1141 * Install the filter into config_base as this is used to
1142 * construct the event type.
1144 event->config_base = config_base;
1149 static void armv8pmu_reset(void *info)
1151 u32 idx, nb_cnt = cpu_pmu->num_events;
1153 /* The counter and interrupt enable registers are unknown at reset. */
1154 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1155 armv8pmu_disable_event(NULL, idx);
1157 /* Initialize & Reset PMNC: C and P bits. */
1158 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
1160 /* Disable access from userspace. */
1161 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
1164 static int armv8_pmuv3_map_event(struct perf_event *event)
1166 return map_cpu_event(event, &armv8_pmuv3_perf_map,
1167 &armv8_pmuv3_perf_cache_map, 0xFF);
1170 static struct arm_pmu armv8pmu = {
1171 .handle_irq = armv8pmu_handle_irq,
1172 .enable = armv8pmu_enable_event,
1173 .disable = armv8pmu_disable_event,
1174 .read_counter = armv8pmu_read_counter,
1175 .write_counter = armv8pmu_write_counter,
1176 .get_event_idx = armv8pmu_get_event_idx,
1177 .start = armv8pmu_start,
1178 .stop = armv8pmu_stop,
1179 .reset = armv8pmu_reset,
1180 .max_period = (1LLU << 32) - 1,
1183 static u32 __init armv8pmu_read_num_pmnc_events(void)
1187 /* Read the nb of CNTx counters supported from PMNC */
1188 nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
1190 /* Add the CPU cycles counter and return */
1194 static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
1196 armv8pmu.name = "arm/armv8-pmuv3";
1197 armv8pmu.map_event = armv8_pmuv3_map_event;
1198 armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
1199 armv8pmu.set_event_filter = armv8pmu_set_event_filter;
1204 * Ensure the PMU has sane values out of reset.
1205 * This requires SMP to be available, so exists as a separate initcall.
1210 if (cpu_pmu && cpu_pmu->reset)
1211 return on_each_cpu(cpu_pmu->reset, NULL, 1);
1214 arch_initcall(cpu_pmu_reset);
1217 * PMU platform driver and devicetree bindings.
1219 static struct of_device_id armpmu_of_device_ids[] = {
1220 {.compatible = "arm,armv8-pmuv3"},
1224 static int armpmu_device_probe(struct platform_device *pdev)
1229 cpu_pmu->plat_device = pdev;
1233 static struct platform_driver armpmu_driver = {
1236 .of_match_table = armpmu_of_device_ids,
1238 .probe = armpmu_device_probe,
1241 static int __init register_pmu_driver(void)
1243 return platform_driver_register(&armpmu_driver);
1245 device_initcall(register_pmu_driver);
1247 static struct pmu_hw_events *armpmu_get_cpu_events(void)
1249 return &__get_cpu_var(cpu_hw_events);
1252 static void __init cpu_pmu_init(struct arm_pmu *armpmu)
1255 for_each_possible_cpu(cpu) {
1256 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
1257 events->events = per_cpu(hw_events, cpu);
1258 events->used_mask = per_cpu(used_mask, cpu);
1259 raw_spin_lock_init(&events->pmu_lock);
1261 armpmu->get_hw_events = armpmu_get_cpu_events;
1264 static int __init init_hw_perf_events(void)
1266 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
1268 switch ((dfr >> 8) & 0xf) {
1269 case 0x1: /* PMUv3 */
1270 cpu_pmu = armv8_pmuv3_pmu_init();
1275 pr_info("enabled with %s PMU driver, %d counters available\n",
1276 cpu_pmu->name, cpu_pmu->num_events);
1277 cpu_pmu_init(cpu_pmu);
1278 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
1280 pr_info("no hardware support available\n");
1285 early_initcall(init_hw_perf_events);
1288 * Callchain handling code.
1291 struct frame_tail __user *fp;
1293 } __attribute__((packed));
1296 * Get the return address for a single stackframe and return a pointer to the
1299 static struct frame_tail __user *
1300 user_backtrace(struct frame_tail __user *tail,
1301 struct perf_callchain_entry *entry)
1303 struct frame_tail buftail;
1306 /* Also check accessibility of one struct frame_tail beyond */
1307 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
1310 pagefault_disable();
1311 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
1317 perf_callchain_store(entry, buftail.lr);
1320 * Frame pointers should strictly progress back up the stack
1321 * (towards higher addresses).
1323 if (tail >= buftail.fp)
1329 void perf_callchain_user(struct perf_callchain_entry *entry,
1330 struct pt_regs *regs)
1332 struct frame_tail __user *tail;
1334 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1335 /* We don't support guest os callchain now */
1339 perf_callchain_store(entry, regs->pc);
1340 tail = (struct frame_tail __user *)regs->regs[29];
1342 while (entry->nr < PERF_MAX_STACK_DEPTH &&
1343 tail && !((unsigned long)tail & 0xf))
1344 tail = user_backtrace(tail, entry);
1348 * Gets called by walk_stackframe() for every stackframe. This will be called
1349 * whist unwinding the stackframe and is like a subroutine return so we use
1352 static int callchain_trace(struct stackframe *frame, void *data)
1354 struct perf_callchain_entry *entry = data;
1355 perf_callchain_store(entry, frame->pc);
1359 void perf_callchain_kernel(struct perf_callchain_entry *entry,
1360 struct pt_regs *regs)
1362 struct stackframe frame;
1364 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1365 /* We don't support guest os callchain now */
1369 frame.fp = regs->regs[29];
1370 frame.sp = regs->sp;
1371 frame.pc = regs->pc;
1372 walk_stackframe(&frame, callchain_trace, entry);
1375 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1377 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1378 return perf_guest_cbs->get_guest_ip();
1380 return instruction_pointer(regs);
1383 unsigned long perf_misc_flags(struct pt_regs *regs)
1387 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1388 if (perf_guest_cbs->is_user_mode())
1389 misc |= PERF_RECORD_MISC_GUEST_USER;
1391 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1393 if (user_mode(regs))
1394 misc |= PERF_RECORD_MISC_USER;
1396 misc |= PERF_RECORD_MISC_KERNEL;