1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
4 #include <linux/acpi.h>
5 #include <linux/bitops.h>
7 #include <linux/cpuhotplug.h>
8 #include <linux/cpumask.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/percpu.h>
16 #include <linux/perf_event.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
23 #include <asm/barrier.h>
24 #include <asm/local64.h>
25 #include <asm/sysreg.h>
26 #include <soc/qcom/kryo-l2-accessors.h>
30 #define L2PMCR_NUM_EV_SHIFT 11
31 #define L2PMCR_NUM_EV_MASK 0x1F
34 #define L2PMCNTENCLR 0x403
35 #define L2PMCNTENSET 0x404
36 #define L2PMINTENCLR 0x405
37 #define L2PMINTENSET 0x406
38 #define L2PMOVSCLR 0x407
39 #define L2PMOVSSET 0x408
40 #define L2PMCCNTCR 0x409
41 #define L2PMCCNTR 0x40A
42 #define L2PMCCNTSR 0x40C
43 #define L2PMRESR 0x410
44 #define IA_L2PMXEVCNTCR_BASE 0x420
45 #define IA_L2PMXEVCNTR_BASE 0x421
46 #define IA_L2PMXEVFILTER_BASE 0x423
47 #define IA_L2PMXEVTYPER_BASE 0x424
49 #define IA_L2_REG_OFFSET 0x10
51 #define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000
52 #define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
53 #define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003
55 #define L2EVTYPER_REG_SHIFT 3
57 #define L2PMRESR_GROUP_BITS 8
58 #define L2PMRESR_GROUP_MASK GENMASK(7, 0)
60 #define L2CYCLE_CTR_BIT 31
61 #define L2CYCLE_CTR_RAW_CODE 0xFE
63 #define L2PMCR_RESET_ALL 0x6
64 #define L2PMCR_COUNTERS_ENABLE 0x1
65 #define L2PMCR_COUNTERS_DISABLE 0x0
67 #define L2PMRESR_EN BIT_ULL(63)
69 #define L2_EVT_MASK 0x00000FFF
70 #define L2_EVT_CODE_MASK 0x00000FF0
71 #define L2_EVT_GRP_MASK 0x0000000F
72 #define L2_EVT_CODE_SHIFT 4
73 #define L2_EVT_GRP_SHIFT 0
75 #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
76 #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
78 #define L2_EVT_GROUP_MAX 7
80 #define L2_COUNTER_RELOAD BIT_ULL(31)
81 #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
84 #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
89 #define L2_EVENT_CYCLES 0xfe
90 #define L2_EVENT_DCACHE_OPS 0x400
91 #define L2_EVENT_ICACHE_OPS 0x401
92 #define L2_EVENT_TLBI 0x402
93 #define L2_EVENT_BARRIERS 0x403
94 #define L2_EVENT_TOTAL_READS 0x405
95 #define L2_EVENT_TOTAL_WRITES 0x406
96 #define L2_EVENT_TOTAL_REQUESTS 0x407
97 #define L2_EVENT_LDREX 0x420
98 #define L2_EVENT_STREX 0x421
99 #define L2_EVENT_CLREX 0x422
106 * Aggregate PMU. Implements the core pmu functions and manages
110 struct hlist_node node;
115 struct platform_device *pdev;
116 struct cluster_pmu * __percpu *pmu_cluster;
117 struct list_head clusters;
121 * The cache is made up of one or more clusters, each cluster has its own PMU.
122 * Each cluster is associated with one or more CPUs.
123 * This structure represents one of the hardware PMUs.
125 * Events can be envisioned as a 2-dimensional array. Each column represents
126 * a group of events. There are 8 groups. Only one entry from each
127 * group can be in use at a time.
129 * Events are specified as 0xCCG, where CC is 2 hex digits specifying
130 * the code (array row) and G specifies the group (column).
132 * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
133 * which is outside the above scheme.
136 struct list_head next;
137 struct perf_event *events[MAX_L2_CTRS];
138 struct l2cache_pmu *l2cache_pmu;
139 DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
140 DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
143 /* The CPU that is used for collecting events on this cluster */
145 /* All the CPUs associated with this cluster */
146 cpumask_t cluster_cpus;
150 #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
152 static u32 l2_cycle_ctr_idx;
153 static u32 l2_counter_present_mask;
155 static inline u32 idx_to_reg_bit(u32 idx)
157 if (idx == l2_cycle_ctr_idx)
158 return BIT(L2CYCLE_CTR_BIT);
163 static inline struct cluster_pmu *get_cluster_pmu(
164 struct l2cache_pmu *l2cache_pmu, int cpu)
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
169 static void cluster_pmu_reset(void)
171 /* Reset all counters */
172 kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
173 kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
174 kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
175 kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
178 static inline void cluster_pmu_enable(void)
180 kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
183 static inline void cluster_pmu_disable(void)
185 kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
188 static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
190 if (idx == l2_cycle_ctr_idx)
191 kryo_l2_set_indirect_reg(L2PMCCNTR, value);
193 kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
196 static inline u64 cluster_pmu_counter_get_value(u32 idx)
200 if (idx == l2_cycle_ctr_idx)
201 value = kryo_l2_get_indirect_reg(L2PMCCNTR);
203 value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
208 static inline void cluster_pmu_counter_enable(u32 idx)
210 kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
213 static inline void cluster_pmu_counter_disable(u32 idx)
215 kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
218 static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
220 kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
223 static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
225 kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
228 static inline void cluster_pmu_set_evccntcr(u32 val)
230 kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
233 static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
235 kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
238 static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
240 kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
244 u32 event_group, u32 event_cc)
251 shift = L2PMRESR_GROUP_BITS * event_group;
252 field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
254 spin_lock_irqsave(&cluster->pmu_lock, flags);
256 resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
257 resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
259 resr_val |= L2PMRESR_EN;
260 kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags);
266 * Hardware allows filtering of events based on the originating
267 * CPU. Turn this off by setting filter bits to allow events from
268 * all CPUS, subunits and ID independent events in this cluster.
270 static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
272 u32 val = L2PMXEVFILTER_SUFILTER_ALL |
273 L2PMXEVFILTER_ORGFILTER_IDINDEP |
274 L2PMXEVFILTER_ORGFILTER_ALL;
276 kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
279 static inline u32 cluster_pmu_getreset_ovsr(void)
281 u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
283 kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
287 static inline bool cluster_pmu_has_overflowed(u32 ovsr)
289 return !!(ovsr & l2_counter_present_mask);
292 static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
294 return !!(ovsr & idx_to_reg_bit(idx));
297 static void l2_cache_event_update(struct perf_event *event)
299 struct hw_perf_event *hwc = &event->hw;
300 u64 delta, prev, now;
304 prev = local64_read(&hwc->prev_count);
305 now = cluster_pmu_counter_get_value(idx);
306 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
309 * The cycle counter is 64-bit, but all other counters are
310 * 32-bit, and we must handle 32-bit overflow explicitly.
313 if (idx != l2_cycle_ctr_idx)
316 local64_add(delta, &event->count);
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
320 struct hw_perf_event *hwc)
326 * We limit the max period to half the max counter value so
327 * that even in the case of extreme interrupt latency the
328 * counter will (hopefully) not wrap past its initial value.
330 if (idx == l2_cycle_ctr_idx)
331 new = L2_CYCLE_COUNTER_RELOAD;
333 new = L2_COUNTER_RELOAD;
335 local64_set(&hwc->prev_count, new);
336 cluster_pmu_counter_set_value(idx, new);
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
340 struct perf_event *event)
342 struct hw_perf_event *hwc = &event->hw;
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
347 if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
348 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
351 return l2_cycle_ctr_idx;
354 idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
356 /* The counters are all in use. */
360 * Check for column exclusion: event column already in use by another
361 * event. This is for events which are not in the same group.
362 * Conflicting events in the same group are detected in event_init.
364 group = L2_EVT_GROUP(hwc->config_base);
365 if (test_bit(group, cluster->used_groups))
368 set_bit(idx, cluster->used_counters);
369 set_bit(group, cluster->used_groups);
374 static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
375 struct perf_event *event)
377 struct hw_perf_event *hwc = &event->hw;
380 clear_bit(idx, cluster->used_counters);
381 if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
382 clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
385 static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
387 struct cluster_pmu *cluster = data;
388 int num_counters = cluster->l2cache_pmu->num_counters;
392 ovsr = cluster_pmu_getreset_ovsr();
393 if (!cluster_pmu_has_overflowed(ovsr))
396 for_each_set_bit(idx, cluster->used_counters, num_counters) {
397 struct perf_event *event = cluster->events[idx];
398 struct hw_perf_event *hwc;
400 if (WARN_ON_ONCE(!event))
403 if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
406 l2_cache_event_update(event);
409 l2_cache_cluster_set_period(cluster, hwc);
416 * Implementation of abstract pmu functionality required by
417 * the core perf events code.
420 static void l2_cache_pmu_enable(struct pmu *pmu)
423 * Although there is only one PMU (per socket) controlling multiple
424 * physical PMUs (per cluster), because we do not support per-task mode
425 * each event is associated with a CPU. Each event has pmu_enable
426 * called on its CPU, so here it is only necessary to enable the
427 * counters for the current CPU.
430 cluster_pmu_enable();
433 static void l2_cache_pmu_disable(struct pmu *pmu)
435 cluster_pmu_disable();
438 static int l2_cache_event_init(struct perf_event *event)
440 struct hw_perf_event *hwc = &event->hw;
441 struct cluster_pmu *cluster;
442 struct perf_event *sibling;
443 struct l2cache_pmu *l2cache_pmu;
445 if (event->attr.type != event->pmu->type)
448 l2cache_pmu = to_l2cache_pmu(event->pmu);
450 if (hwc->sample_period) {
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
452 "Sampling not supported\n");
456 if (event->cpu < 0) {
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
458 "Per-task mode not supported\n");
462 if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
463 ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
464 (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
466 "Invalid config %llx\n",
471 /* Don't allow groups with mixed PMUs, except for s/w events */
472 if (event->group_leader->pmu != event->pmu &&
473 !is_software_event(event->group_leader)) {
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
475 "Can't create mixed PMU group\n");
479 for_each_sibling_event(sibling, event->group_leader) {
480 if (sibling->pmu != event->pmu &&
481 !is_software_event(sibling)) {
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
483 "Can't create mixed PMU group\n");
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
490 /* CPU has not been initialised */
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
492 "CPU%d not associated with L2 cluster\n", event->cpu);
496 /* Ensure all events in a group are on the same cpu */
497 if ((event->group_leader != event) &&
498 (cluster->on_cpu != event->group_leader->cpu)) {
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
500 "Can't create group on CPUs %d and %d",
501 event->cpu, event->group_leader->cpu);
505 if ((event != event->group_leader) &&
506 !is_software_event(event->group_leader) &&
507 (L2_EVT_GROUP(event->group_leader->attr.config) ==
508 L2_EVT_GROUP(event->attr.config))) {
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
510 "Column exclusion: conflicting events %llx %llx\n",
511 event->group_leader->attr.config,
516 for_each_sibling_event(sibling, event->group_leader) {
517 if ((sibling != event) &&
518 !is_software_event(sibling) &&
519 (L2_EVT_GROUP(sibling->attr.config) ==
520 L2_EVT_GROUP(event->attr.config))) {
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
522 "Column exclusion: conflicting events %llx %llx\n",
523 sibling->attr.config,
530 hwc->config_base = event->attr.config;
533 * Ensure all events are on the same cpu so all events are in the
534 * same cpu context, to avoid races on pmu_enable etc.
536 event->cpu = cluster->on_cpu;
541 static void l2_cache_event_start(struct perf_event *event, int flags)
543 struct cluster_pmu *cluster;
544 struct hw_perf_event *hwc = &event->hw;
547 u32 event_cc, event_group;
551 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
553 l2_cache_cluster_set_period(cluster, hwc);
555 if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
556 cluster_pmu_set_evccntcr(0);
558 config = hwc->config_base;
559 event_cc = L2_EVT_CODE(config);
560 event_group = L2_EVT_GROUP(config);
562 cluster_pmu_set_evcntcr(idx, 0);
563 cluster_pmu_set_evtyper(idx, event_group);
564 cluster_pmu_set_resr(cluster, event_group, event_cc);
565 cluster_pmu_set_evfilter_sys_mode(idx);
568 cluster_pmu_counter_enable_interrupt(idx);
569 cluster_pmu_counter_enable(idx);
572 static void l2_cache_event_stop(struct perf_event *event, int flags)
574 struct hw_perf_event *hwc = &event->hw;
577 if (hwc->state & PERF_HES_STOPPED)
580 cluster_pmu_counter_disable_interrupt(idx);
581 cluster_pmu_counter_disable(idx);
583 if (flags & PERF_EF_UPDATE)
584 l2_cache_event_update(event);
585 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
588 static int l2_cache_event_add(struct perf_event *event, int flags)
590 struct hw_perf_event *hwc = &event->hw;
593 struct cluster_pmu *cluster;
595 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
597 idx = l2_cache_get_event_idx(cluster, event);
602 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
603 cluster->events[idx] = event;
604 local64_set(&hwc->prev_count, 0);
606 if (flags & PERF_EF_START)
607 l2_cache_event_start(event, flags);
609 /* Propagate changes to the userspace mapping. */
610 perf_event_update_userpage(event);
615 static void l2_cache_event_del(struct perf_event *event, int flags)
617 struct hw_perf_event *hwc = &event->hw;
618 struct cluster_pmu *cluster;
621 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
623 l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
624 cluster->events[idx] = NULL;
625 l2_cache_clear_event_idx(cluster, event);
627 perf_event_update_userpage(event);
630 static void l2_cache_event_read(struct perf_event *event)
632 l2_cache_event_update(event);
635 static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
636 struct device_attribute *attr,
639 struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
644 static struct device_attribute l2_cache_pmu_cpumask_attr =
645 __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
647 static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
648 &l2_cache_pmu_cpumask_attr.attr,
652 static const struct attribute_group l2_cache_pmu_cpumask_group = {
653 .attrs = l2_cache_pmu_cpumask_attrs,
656 /* CCG format for perf RAW codes. */
657 PMU_FORMAT_ATTR(l2_code, "config:4-11");
658 PMU_FORMAT_ATTR(l2_group, "config:0-3");
659 PMU_FORMAT_ATTR(event, "config:0-11");
661 static struct attribute *l2_cache_pmu_formats[] = {
662 &format_attr_l2_code.attr,
663 &format_attr_l2_group.attr,
664 &format_attr_event.attr,
668 static const struct attribute_group l2_cache_pmu_format_group = {
670 .attrs = l2_cache_pmu_formats,
673 static ssize_t l2cache_pmu_event_show(struct device *dev,
674 struct device_attribute *attr, char *page)
676 struct perf_pmu_events_attr *pmu_attr;
678 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
679 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
682 #define L2CACHE_EVENT_ATTR(_name, _id) \
683 PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id)
685 static struct attribute *l2_cache_pmu_events[] = {
686 L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
687 L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
688 L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
689 L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
690 L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
691 L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
692 L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
693 L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
694 L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
695 L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
696 L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
700 static const struct attribute_group l2_cache_pmu_events_group = {
702 .attrs = l2_cache_pmu_events,
705 static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
706 &l2_cache_pmu_format_group,
707 &l2_cache_pmu_cpumask_group,
708 &l2_cache_pmu_events_group,
713 * Generic device handlers
716 static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
721 static int get_num_counters(void)
725 val = kryo_l2_get_indirect_reg(L2PMCR);
728 * Read number of counters from L2PMCR and add 1
729 * for the cycle counter.
731 return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
734 static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
735 struct l2cache_pmu *l2cache_pmu, int cpu)
739 struct cluster_pmu *cluster;
742 * This assumes that the cluster_id is in MPIDR[aff1] for
743 * single-threaded cores, and MPIDR[aff2] for multi-threaded
744 * cores. This logic will have to be updated if this changes.
746 mpidr = read_cpuid_mpidr();
747 if (mpidr & MPIDR_MT_BITMASK)
748 cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
750 cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
753 if (cluster->cluster_id != cpu_cluster_id)
756 dev_info(&l2cache_pmu->pdev->dev,
757 "CPU%d associated with cluster %d\n", cpu,
758 cluster->cluster_id);
759 cpumask_set_cpu(cpu, &cluster->cluster_cpus);
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
767 static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
769 struct cluster_pmu *cluster;
770 struct l2cache_pmu *l2cache_pmu;
772 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
773 cluster = get_cluster_pmu(l2cache_pmu, cpu);
775 /* First time this CPU has come online */
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
778 /* Only if broken firmware doesn't list every cluster */
779 WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
784 /* If another CPU is managing this cluster, we're done */
785 if (cluster->on_cpu != -1)
789 * All CPUs on this cluster were down, use this one.
790 * Reset to put it into sane state.
792 cluster->on_cpu = cpu;
793 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
796 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
797 enable_irq(cluster->irq);
802 static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
804 struct cluster_pmu *cluster;
805 struct l2cache_pmu *l2cache_pmu;
806 cpumask_t cluster_online_cpus;
809 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
810 cluster = get_cluster_pmu(l2cache_pmu, cpu);
814 /* If this CPU is not managing the cluster, we're done */
815 if (cluster->on_cpu != cpu)
818 /* Give up ownership of cluster */
819 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
820 cluster->on_cpu = -1;
822 /* Any other CPU for this cluster which is still online */
823 cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
825 target = cpumask_any_but(&cluster_online_cpus, cpu);
826 if (target >= nr_cpu_ids) {
827 disable_irq(cluster->irq);
831 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
832 cluster->on_cpu = target;
833 cpumask_set_cpu(target, &l2cache_pmu->cpumask);
834 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
839 static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
841 struct platform_device *pdev = to_platform_device(dev->parent);
842 struct platform_device *sdev = to_platform_device(dev);
843 struct l2cache_pmu *l2cache_pmu = data;
844 struct cluster_pmu *cluster;
849 err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &fw_cluster_id);
851 dev_err(&pdev->dev, "unable to read ACPI uid\n");
855 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
859 INIT_LIST_HEAD(&cluster->next);
860 cluster->cluster_id = fw_cluster_id;
862 irq = platform_get_irq(sdev, 0);
867 cluster->l2cache_pmu = l2cache_pmu;
868 cluster->on_cpu = -1;
870 err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
871 IRQF_NOBALANCING | IRQF_NO_THREAD |
873 "l2-cache-pmu", cluster);
876 "Unable to request IRQ%d for L2 PMU counters\n", irq);
881 "Registered L2 cache PMU cluster %lld\n", fw_cluster_id);
883 spin_lock_init(&cluster->pmu_lock);
885 list_add(&cluster->next, &l2cache_pmu->clusters);
886 l2cache_pmu->num_pmus++;
891 static int l2_cache_pmu_probe(struct platform_device *pdev)
894 struct l2cache_pmu *l2cache_pmu;
897 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
901 INIT_LIST_HEAD(&l2cache_pmu->clusters);
903 platform_set_drvdata(pdev, l2cache_pmu);
904 l2cache_pmu->pmu = (struct pmu) {
905 /* suffix is instance id for future use with multiple sockets */
907 .task_ctx_nr = perf_invalid_context,
908 .pmu_enable = l2_cache_pmu_enable,
909 .pmu_disable = l2_cache_pmu_disable,
910 .event_init = l2_cache_event_init,
911 .add = l2_cache_event_add,
912 .del = l2_cache_event_del,
913 .start = l2_cache_event_start,
914 .stop = l2_cache_event_stop,
915 .read = l2_cache_event_read,
916 .attr_groups = l2_cache_pmu_attr_grps,
917 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
920 l2cache_pmu->num_counters = get_num_counters();
921 l2cache_pmu->pdev = pdev;
922 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
923 struct cluster_pmu *);
924 if (!l2cache_pmu->pmu_cluster)
927 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
928 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
929 BIT(L2CYCLE_CTR_BIT);
931 cpumask_clear(&l2cache_pmu->cpumask);
933 /* Read cluster info and initialize each cluster */
934 err = device_for_each_child(&pdev->dev, l2cache_pmu,
935 l2_cache_pmu_probe_cluster);
939 if (l2cache_pmu->num_pmus == 0) {
940 dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
944 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
947 dev_err(&pdev->dev, "Error %d registering hotplug", err);
951 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
953 dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
957 dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
958 l2cache_pmu->num_pmus);
963 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
968 static int l2_cache_pmu_remove(struct platform_device *pdev)
970 struct l2cache_pmu *l2cache_pmu =
971 to_l2cache_pmu(platform_get_drvdata(pdev));
973 perf_pmu_unregister(&l2cache_pmu->pmu);
974 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
979 static struct platform_driver l2_cache_pmu_driver = {
981 .name = "qcom-l2cache-pmu",
982 .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
983 .suppress_bind_attrs = true,
985 .probe = l2_cache_pmu_probe,
986 .remove = l2_cache_pmu_remove,
989 static int __init register_l2_cache_pmu_driver(void)
993 err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
994 "AP_PERF_ARM_QCOM_L2_ONLINE",
995 l2cache_pmu_online_cpu,
996 l2cache_pmu_offline_cpu);
1000 return platform_driver_register(&l2_cache_pmu_driver);
1002 device_initcall(register_l2_cache_pmu_driver);