1 // SPDX-License-Identifier: GPL-2.0
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
33 * SMMU events are not attributable to a CPU, so task mode and sampling
37 #include <linux/acpi.h>
38 #include <linux/acpi_iort.h>
39 #include <linux/bitfield.h>
40 #include <linux/bitops.h>
41 #include <linux/cpuhotplug.h>
42 #include <linux/cpumask.h>
43 #include <linux/device.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/irq.h>
47 #include <linux/kernel.h>
48 #include <linux/list.h>
49 #include <linux/msi.h>
51 #include <linux/perf_event.h>
52 #include <linux/platform_device.h>
53 #include <linux/smp.h>
54 #include <linux/sysfs.h>
55 #include <linux/types.h>
57 #define SMMU_PMCG_EVCNTR0 0x0
58 #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
59 #define SMMU_PMCG_EVTYPER0 0x400
60 #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
61 #define SMMU_PMCG_SID_SPAN_SHIFT 29
62 #define SMMU_PMCG_SMR0 0xA00
63 #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
64 #define SMMU_PMCG_CNTENSET0 0xC00
65 #define SMMU_PMCG_CNTENCLR0 0xC20
66 #define SMMU_PMCG_INTENSET0 0xC40
67 #define SMMU_PMCG_INTENCLR0 0xC60
68 #define SMMU_PMCG_OVSCLR0 0xC80
69 #define SMMU_PMCG_OVSSET0 0xCC0
70 #define SMMU_PMCG_CFGR 0xE00
71 #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
72 #define SMMU_PMCG_CFGR_MSI BIT(21)
73 #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
74 #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
75 #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
76 #define SMMU_PMCG_CR 0xE04
77 #define SMMU_PMCG_CR_ENABLE BIT(0)
78 #define SMMU_PMCG_IIDR 0xE08
79 #define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20)
80 #define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16)
81 #define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12)
82 #define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0)
83 #define SMMU_PMCG_CEID0 0xE20
84 #define SMMU_PMCG_CEID1 0xE28
85 #define SMMU_PMCG_IRQ_CTRL 0xE50
86 #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
87 #define SMMU_PMCG_IRQ_CFG0 0xE58
88 #define SMMU_PMCG_IRQ_CFG1 0xE60
89 #define SMMU_PMCG_IRQ_CFG2 0xE64
91 /* IMP-DEF ID registers */
92 #define SMMU_PMCG_PIDR0 0xFE0
93 #define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0)
94 #define SMMU_PMCG_PIDR1 0xFE4
95 #define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4)
96 #define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0)
97 #define SMMU_PMCG_PIDR2 0xFE8
98 #define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4)
99 #define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0)
100 #define SMMU_PMCG_PIDR3 0xFEC
101 #define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4)
102 #define SMMU_PMCG_PIDR4 0xFD0
103 #define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0)
105 /* MSI config fields */
106 #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
107 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
109 #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
110 #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
112 #define SMMU_PMCG_MAX_COUNTERS 64
113 #define SMMU_PMCG_ARCH_MAX_EVENTS 128
115 #define SMMU_PMCG_PA_SHIFT 12
117 #define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
118 #define SMMU_PMCG_HARDEN_DISABLE BIT(1)
120 static int cpuhp_state_num;
123 struct hlist_node node;
124 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
125 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
126 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
130 unsigned int num_counters;
132 void __iomem *reg_base;
133 void __iomem *reloc_base;
140 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
142 #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
143 static inline u32 get_##_name(struct perf_event *event) \
145 return FIELD_GET(GENMASK_ULL(_end, _start), \
146 event->attr._config); \
149 SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
150 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
151 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
152 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
154 static inline void smmu_pmu_enable(struct pmu *pmu)
156 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
158 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
159 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
160 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
163 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
164 struct perf_event *event, int idx);
166 static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
168 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
171 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
172 smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
174 smmu_pmu_enable(pmu);
177 static inline void smmu_pmu_disable(struct pmu *pmu)
179 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
181 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
182 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
185 static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
187 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
191 * The global disable of PMU sometimes fail to stop the counting.
192 * Harden this by writing an invalid event type to each used counter
193 * to forcibly stop counting.
195 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
196 writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
198 smmu_pmu_disable(pmu);
201 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
204 if (smmu_pmu->counter_mask & BIT(32))
205 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
207 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
210 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
214 if (smmu_pmu->counter_mask & BIT(32))
215 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
217 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
222 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
224 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
227 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
229 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
232 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
234 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
237 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
240 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
243 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
246 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
249 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
251 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
254 static void smmu_pmu_event_update(struct perf_event *event)
256 struct hw_perf_event *hwc = &event->hw;
257 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
258 u64 delta, prev, now;
262 prev = local64_read(&hwc->prev_count);
263 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
264 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
266 /* handle overflow. */
268 delta &= smmu_pmu->counter_mask;
270 local64_add(delta, &event->count);
273 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
274 struct hw_perf_event *hwc)
279 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
281 * On platforms that require this quirk, if the counter starts
282 * at < half_counter value and wraps, the current logic of
283 * handling the overflow may not work. It is expected that,
284 * those platforms will have full 64 counter bits implemented
285 * so that such a possibility is remote(eg: HiSilicon HIP08).
287 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
290 * We limit the max period to half the max counter value
291 * of the counter size, so that even in the case of extreme
292 * interrupt latency the counter will (hopefully) not wrap
293 * past its initial value.
295 new = smmu_pmu->counter_mask >> 1;
296 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
299 local64_set(&hwc->prev_count, new);
302 static void smmu_pmu_set_event_filter(struct perf_event *event,
303 int idx, u32 span, u32 sid)
305 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
308 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
309 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
310 smmu_pmu_set_smr(smmu_pmu, idx, sid);
313 static bool smmu_pmu_check_global_filter(struct perf_event *curr,
314 struct perf_event *new)
316 if (get_filter_enable(new) != get_filter_enable(curr))
319 if (!get_filter_enable(new))
322 return get_filter_span(new) == get_filter_span(curr) &&
323 get_filter_stream_id(new) == get_filter_stream_id(curr);
326 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
327 struct perf_event *event, int idx)
330 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
331 bool filter_en = !!get_filter_enable(event);
333 span = filter_en ? get_filter_span(event) :
334 SMMU_PMCG_DEFAULT_FILTER_SPAN;
335 sid = filter_en ? get_filter_stream_id(event) :
336 SMMU_PMCG_DEFAULT_FILTER_SID;
338 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
340 * Per-counter filtering, or scheduling the first globally-filtered
341 * event into an empty PMU so idx == 0 and it works out equivalent.
343 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
344 smmu_pmu_set_event_filter(event, idx, span, sid);
348 /* Otherwise, must match whatever's currently scheduled */
349 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
350 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
357 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
358 struct perf_event *event)
361 unsigned int num_ctrs = smmu_pmu->num_counters;
363 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
365 /* The counters are all in use. */
368 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
372 set_bit(idx, smmu_pmu->used_counters);
377 static bool smmu_pmu_events_compatible(struct perf_event *curr,
378 struct perf_event *new)
380 if (new->pmu != curr->pmu)
383 if (to_smmu_pmu(new->pmu)->global_filter &&
384 !smmu_pmu_check_global_filter(curr, new))
391 * Implementation of abstract pmu functionality required by
392 * the core perf events code.
395 static int smmu_pmu_event_init(struct perf_event *event)
397 struct hw_perf_event *hwc = &event->hw;
398 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
399 struct device *dev = smmu_pmu->dev;
400 struct perf_event *sibling;
401 int group_num_events = 1;
404 if (event->attr.type != event->pmu->type)
407 if (hwc->sample_period) {
408 dev_dbg(dev, "Sampling not supported\n");
412 if (event->cpu < 0) {
413 dev_dbg(dev, "Per-task mode not supported\n");
417 /* Verify specified event is supported on this PMU */
418 event_id = get_event(event);
419 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
420 (!test_bit(event_id, smmu_pmu->supported_events))) {
421 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
425 /* Don't allow groups with mixed PMUs, except for s/w events */
426 if (!is_software_event(event->group_leader)) {
427 if (!smmu_pmu_events_compatible(event->group_leader, event))
430 if (++group_num_events > smmu_pmu->num_counters)
434 for_each_sibling_event(sibling, event->group_leader) {
435 if (is_software_event(sibling))
438 if (!smmu_pmu_events_compatible(sibling, event))
441 if (++group_num_events > smmu_pmu->num_counters)
448 * Ensure all events are on the same cpu so all events are in the
449 * same cpu context, to avoid races on pmu_enable etc.
451 event->cpu = smmu_pmu->on_cpu;
456 static void smmu_pmu_event_start(struct perf_event *event, int flags)
458 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
459 struct hw_perf_event *hwc = &event->hw;
464 smmu_pmu_set_period(smmu_pmu, hwc);
466 smmu_pmu_counter_enable(smmu_pmu, idx);
469 static void smmu_pmu_event_stop(struct perf_event *event, int flags)
471 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
472 struct hw_perf_event *hwc = &event->hw;
475 if (hwc->state & PERF_HES_STOPPED)
478 smmu_pmu_counter_disable(smmu_pmu, idx);
479 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
480 smmu_pmu_event_update(event);
481 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
484 static int smmu_pmu_event_add(struct perf_event *event, int flags)
486 struct hw_perf_event *hwc = &event->hw;
488 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
490 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
495 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
496 smmu_pmu->events[idx] = event;
497 local64_set(&hwc->prev_count, 0);
499 smmu_pmu_interrupt_enable(smmu_pmu, idx);
501 if (flags & PERF_EF_START)
502 smmu_pmu_event_start(event, flags);
504 /* Propagate changes to the userspace mapping. */
505 perf_event_update_userpage(event);
510 static void smmu_pmu_event_del(struct perf_event *event, int flags)
512 struct hw_perf_event *hwc = &event->hw;
513 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
516 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
517 smmu_pmu_interrupt_disable(smmu_pmu, idx);
518 smmu_pmu->events[idx] = NULL;
519 clear_bit(idx, smmu_pmu->used_counters);
521 perf_event_update_userpage(event);
524 static void smmu_pmu_event_read(struct perf_event *event)
526 smmu_pmu_event_update(event);
531 static ssize_t smmu_pmu_cpumask_show(struct device *dev,
532 struct device_attribute *attr,
535 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
537 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
540 static struct device_attribute smmu_pmu_cpumask_attr =
541 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
543 static struct attribute *smmu_pmu_cpumask_attrs[] = {
544 &smmu_pmu_cpumask_attr.attr,
548 static const struct attribute_group smmu_pmu_cpumask_group = {
549 .attrs = smmu_pmu_cpumask_attrs,
554 static ssize_t smmu_pmu_event_show(struct device *dev,
555 struct device_attribute *attr, char *page)
557 struct perf_pmu_events_attr *pmu_attr;
559 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
561 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
564 #define SMMU_EVENT_ATTR(name, config) \
565 PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
567 static struct attribute *smmu_pmu_events[] = {
568 SMMU_EVENT_ATTR(cycles, 0),
569 SMMU_EVENT_ATTR(transaction, 1),
570 SMMU_EVENT_ATTR(tlb_miss, 2),
571 SMMU_EVENT_ATTR(config_cache_miss, 3),
572 SMMU_EVENT_ATTR(trans_table_walk_access, 4),
573 SMMU_EVENT_ATTR(config_struct_access, 5),
574 SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
575 SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
579 static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
580 struct attribute *attr, int unused)
582 struct device *dev = kobj_to_dev(kobj);
583 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
584 struct perf_pmu_events_attr *pmu_attr;
586 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
588 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
594 static const struct attribute_group smmu_pmu_events_group = {
596 .attrs = smmu_pmu_events,
597 .is_visible = smmu_pmu_event_is_visible,
600 static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
601 struct device_attribute *attr,
604 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
606 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
609 static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
610 struct attribute *attr,
613 struct device *dev = kobj_to_dev(kobj);
614 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
621 static struct device_attribute smmu_pmu_identifier_attr =
622 __ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
624 static struct attribute *smmu_pmu_identifier_attrs[] = {
625 &smmu_pmu_identifier_attr.attr,
629 static const struct attribute_group smmu_pmu_identifier_group = {
630 .attrs = smmu_pmu_identifier_attrs,
631 .is_visible = smmu_pmu_identifier_attr_visible,
635 PMU_FORMAT_ATTR(event, "config:0-15");
636 PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
637 PMU_FORMAT_ATTR(filter_span, "config1:32");
638 PMU_FORMAT_ATTR(filter_enable, "config1:33");
640 static struct attribute *smmu_pmu_formats[] = {
641 &format_attr_event.attr,
642 &format_attr_filter_stream_id.attr,
643 &format_attr_filter_span.attr,
644 &format_attr_filter_enable.attr,
648 static const struct attribute_group smmu_pmu_format_group = {
650 .attrs = smmu_pmu_formats,
653 static const struct attribute_group *smmu_pmu_attr_grps[] = {
654 &smmu_pmu_cpumask_group,
655 &smmu_pmu_events_group,
656 &smmu_pmu_format_group,
657 &smmu_pmu_identifier_group,
662 * Generic device handlers
665 static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
667 struct smmu_pmu *smmu_pmu;
670 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
671 if (cpu != smmu_pmu->on_cpu)
674 target = cpumask_any_but(cpu_online_mask, cpu);
675 if (target >= nr_cpu_ids)
678 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
679 smmu_pmu->on_cpu = target;
680 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
685 static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
687 struct smmu_pmu *smmu_pmu = data;
688 DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64));
692 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
696 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
698 bitmap_from_u64(ovs, ovsr);
699 for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
700 struct perf_event *event = smmu_pmu->events[idx];
701 struct hw_perf_event *hwc;
703 if (WARN_ON_ONCE(!event))
706 smmu_pmu_event_update(event);
709 smmu_pmu_set_period(smmu_pmu, hwc);
715 static void smmu_pmu_free_msis(void *data)
717 struct device *dev = data;
719 platform_msi_domain_free_irqs(dev);
722 static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
724 phys_addr_t doorbell;
725 struct device *dev = msi_desc_to_dev(desc);
726 struct smmu_pmu *pmu = dev_get_drvdata(dev);
728 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
729 doorbell &= MSI_CFG0_ADDR_MASK;
731 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
732 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
733 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
734 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
737 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
739 struct device *dev = pmu->dev;
742 /* Clear MSI address reg */
743 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
745 /* MSI supported or not */
746 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
749 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
751 dev_warn(dev, "failed to allocate MSIs\n");
755 pmu->irq = msi_get_virq(dev, 0);
757 /* Add callback to free MSIs on teardown */
758 devm_add_action(dev, smmu_pmu_free_msis, dev);
761 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
763 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
764 int irq, ret = -ENXIO;
766 smmu_pmu_setup_msi(pmu);
770 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
771 flags, "smmuv3-pmu", pmu);
775 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
777 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
779 smmu_pmu_disable(&smmu_pmu->pmu);
781 /* Disable counter and interrupt */
782 writeq_relaxed(counter_present_mask,
783 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
784 writeq_relaxed(counter_present_mask,
785 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
786 writeq_relaxed(counter_present_mask,
787 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
790 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
794 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
797 case IORT_SMMU_V3_PMCG_HISI_HIP08:
798 /* HiSilicon Erratum 162001800 */
799 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
801 case IORT_SMMU_V3_PMCG_HISI_HIP09:
802 smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
806 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
809 static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
811 return of_device_is_compatible(smmu_pmu->dev->of_node,
815 static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
817 u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
819 if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
820 u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
821 u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
822 u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
823 u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
824 u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
826 u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) |
827 (FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8);
828 u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2);
829 u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3);
831 FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) |
832 (FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) |
833 (FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8);
835 iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) |
836 FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) |
837 FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) |
838 FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer);
841 smmu_pmu->iidr = iidr;
844 static int smmu_pmu_probe(struct platform_device *pdev)
846 struct smmu_pmu *smmu_pmu;
847 struct resource *res_0;
852 struct device *dev = &pdev->dev;
854 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
859 platform_set_drvdata(pdev, smmu_pmu);
861 smmu_pmu->pmu = (struct pmu) {
862 .module = THIS_MODULE,
863 .task_ctx_nr = perf_invalid_context,
864 .pmu_enable = smmu_pmu_enable,
865 .pmu_disable = smmu_pmu_disable,
866 .event_init = smmu_pmu_event_init,
867 .add = smmu_pmu_event_add,
868 .del = smmu_pmu_event_del,
869 .start = smmu_pmu_event_start,
870 .stop = smmu_pmu_event_stop,
871 .read = smmu_pmu_event_read,
872 .attr_groups = smmu_pmu_attr_grps,
873 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
876 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
877 if (IS_ERR(smmu_pmu->reg_base))
878 return PTR_ERR(smmu_pmu->reg_base);
880 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
882 /* Determine if page 1 is present */
883 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
884 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
885 if (IS_ERR(smmu_pmu->reloc_base))
886 return PTR_ERR(smmu_pmu->reloc_base);
888 smmu_pmu->reloc_base = smmu_pmu->reg_base;
891 irq = platform_get_irq_optional(pdev, 0);
895 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
896 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
897 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
898 SMMU_PMCG_ARCH_MAX_EVENTS);
900 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
902 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
904 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
905 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
907 smmu_pmu_reset(smmu_pmu);
909 err = smmu_pmu_setup_irq(smmu_pmu);
911 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
915 smmu_pmu_get_iidr(smmu_pmu);
917 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
918 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
920 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
925 smmu_pmu_get_acpi_options(smmu_pmu);
928 * For platforms suffer this quirk, the PMU disable sometimes fails to
929 * stop the counters. This will leads to inaccurate or error counting.
930 * Forcibly disable the counters with these quirk handler.
932 if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
933 smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
934 smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
937 /* Pick one CPU to be the preferred one to use */
938 smmu_pmu->on_cpu = raw_smp_processor_id();
939 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
941 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
944 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
949 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
951 dev_err(dev, "Error %d registering PMU @%pa\n",
956 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
957 &res_0->start, smmu_pmu->num_counters,
958 smmu_pmu->global_filter ? "Global(Counter0)" :
964 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
968 static int smmu_pmu_remove(struct platform_device *pdev)
970 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
972 perf_pmu_unregister(&smmu_pmu->pmu);
973 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
978 static void smmu_pmu_shutdown(struct platform_device *pdev)
980 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
982 smmu_pmu_disable(&smmu_pmu->pmu);
986 static const struct of_device_id smmu_pmu_of_match[] = {
987 { .compatible = "arm,smmu-v3-pmcg" },
990 MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
993 static struct platform_driver smmu_pmu_driver = {
995 .name = "arm-smmu-v3-pmcg",
996 .of_match_table = of_match_ptr(smmu_pmu_of_match),
997 .suppress_bind_attrs = true,
999 .probe = smmu_pmu_probe,
1000 .remove = smmu_pmu_remove,
1001 .shutdown = smmu_pmu_shutdown,
1004 static int __init arm_smmu_pmu_init(void)
1008 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1009 "perf/arm/pmcg:online",
1011 smmu_pmu_offline_cpu);
1012 if (cpuhp_state_num < 0)
1013 return cpuhp_state_num;
1015 ret = platform_driver_register(&smmu_pmu_driver);
1017 cpuhp_remove_multi_state(cpuhp_state_num);
1021 module_init(arm_smmu_pmu_init);
1023 static void __exit arm_smmu_pmu_exit(void)
1025 platform_driver_unregister(&smmu_pmu_driver);
1026 cpuhp_remove_multi_state(cpuhp_state_num);
1029 module_exit(arm_smmu_pmu_exit);
1031 MODULE_ALIAS("platform:arm-smmu-v3-pmcg");
1032 MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
1033 MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
1034 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1035 MODULE_LICENSE("GPL v2");