1 // SPDX-License-Identifier: GPL-2.0
2 // CCI Cache Coherent Interconnect PMU driver
3 // Copyright (C) 2013-2018 Arm Ltd.
4 // Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com>
6 #include <linux/arm-cci.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
11 #include <linux/perf_event.h>
12 #include <linux/platform_device.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
16 #define DRIVER_NAME "ARM-CCI PMU"
18 #define CCI_PMCR 0x0100
19 #define CCI_PID2 0x0fe8
21 #define CCI_PMCR_CEN 0x00000001
22 #define CCI_PMCR_NCNT_MASK 0x0000f800
23 #define CCI_PMCR_NCNT_SHIFT 11
25 #define CCI_PID2_REV_MASK 0xf0
26 #define CCI_PID2_REV_SHIFT 4
28 #define CCI_PMU_EVT_SEL 0x000
29 #define CCI_PMU_CNTR 0x004
30 #define CCI_PMU_CNTR_CTRL 0x008
31 #define CCI_PMU_OVRFLW 0x00c
33 #define CCI_PMU_OVRFLW_FLAG 1
35 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
36 #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
37 #define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1)
38 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
40 #define CCI_PMU_MAX_HW_CNTRS(model) \
41 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
43 /* Types of interfaces that can generate events */
47 #ifdef CONFIG_ARM_CCI5xx_PMU
53 #define NUM_HW_CNTRS_CII_4XX 4
54 #define NUM_HW_CNTRS_CII_5XX 8
55 #define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX
57 #define FIXED_HW_CNTRS_CII_4XX 1
58 #define FIXED_HW_CNTRS_CII_5XX 0
59 #define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX
61 #define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
68 struct cci_pmu_hw_events {
69 struct perf_event **events;
70 unsigned long *used_mask;
71 raw_spinlock_t pmu_lock;
76 * struct cci_pmu_model:
77 * @fixed_hw_cntrs - Number of fixed event counters
78 * @num_hw_cntrs - Maximum number of programmable event counters
79 * @cntr_size - Size of an event counter mapping
81 struct cci_pmu_model {
86 struct attribute **format_attrs;
87 struct attribute **event_attrs;
88 struct event_range event_ranges[CCI_IF_MAX];
89 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
90 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
91 void (*write_counters)(struct cci_pmu *, unsigned long *);
94 static struct cci_pmu_model cci_pmu_models[];
98 void __iomem *ctrl_base;
103 unsigned long active_irqs;
104 const struct cci_pmu_model *model;
105 struct cci_pmu_hw_events hw_events;
106 struct platform_device *plat_device;
108 atomic_t active_events;
109 struct mutex reserve_mutex;
112 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
114 static struct cci_pmu *g_cci_pmu;
117 #ifdef CONFIG_ARM_CCI400_PMU
121 #ifdef CONFIG_ARM_CCI5xx_PMU
128 static void pmu_write_counters(struct cci_pmu *cci_pmu,
129 unsigned long *mask);
130 static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
131 struct device_attribute *attr, char *buf);
132 static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
133 struct device_attribute *attr, char *buf);
135 #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
136 &((struct dev_ext_attribute[]) { \
137 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
140 #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
141 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
142 #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
143 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
145 /* CCI400 PMU Specific definitions */
147 #ifdef CONFIG_ARM_CCI400_PMU
150 #define CCI400_PORT_S0 0
151 #define CCI400_PORT_S1 1
152 #define CCI400_PORT_S2 2
153 #define CCI400_PORT_S3 3
154 #define CCI400_PORT_S4 4
155 #define CCI400_PORT_M0 5
156 #define CCI400_PORT_M1 6
157 #define CCI400_PORT_M2 7
159 #define CCI400_R1_PX 5
162 * Instead of an event id to monitor CCI cycles, a dedicated counter is
163 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
164 * make use of this event in hardware.
166 enum cci400_perf_events {
167 CCI400_PMU_CYCLES = 0xff
170 #define CCI400_PMU_CYCLE_CNTR_IDX 0
171 #define CCI400_PMU_CNTR0_IDX 1
174 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
175 * ports and bits 4:0 are event codes. There are different event codes
176 * associated with each port type.
178 * Additionally, the range of events associated with the port types changed
179 * between Rev0 and Rev1.
181 * The constants below define the range of valid codes for each port type for
182 * the different revisions and are used to validate the event to be monitored.
185 #define CCI400_PMU_EVENT_MASK 0xffUL
186 #define CCI400_PMU_EVENT_SOURCE_SHIFT 5
187 #define CCI400_PMU_EVENT_SOURCE_MASK 0x7
188 #define CCI400_PMU_EVENT_CODE_SHIFT 0
189 #define CCI400_PMU_EVENT_CODE_MASK 0x1f
190 #define CCI400_PMU_EVENT_SOURCE(event) \
191 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
192 CCI400_PMU_EVENT_SOURCE_MASK)
193 #define CCI400_PMU_EVENT_CODE(event) \
194 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
196 #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
197 #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
198 #define CCI400_R0_MASTER_PORT_MIN_EV 0x14
199 #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
201 #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
202 #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
203 #define CCI400_R1_MASTER_PORT_MIN_EV 0x00
204 #define CCI400_R1_MASTER_PORT_MAX_EV 0x11
206 #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
207 CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
208 (unsigned long)_config)
210 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
211 struct device_attribute *attr, char *buf);
213 static struct attribute *cci400_pmu_format_attrs[] = {
214 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
215 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
219 static struct attribute *cci400_r0_pmu_event_attrs[] = {
221 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
222 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
223 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
224 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
225 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
226 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
227 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
228 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
229 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
230 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
231 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
232 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
233 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
234 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
235 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
236 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
237 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
238 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
239 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
240 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
242 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
243 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
244 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
245 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
246 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
247 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
248 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
249 /* Special event for cycles counter */
250 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
254 static struct attribute *cci400_r1_pmu_event_attrs[] = {
256 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
257 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
258 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
259 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
260 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
261 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
262 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
263 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
264 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
265 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
266 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
267 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
268 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
269 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
270 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
271 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
272 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
273 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
274 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
275 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
276 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
278 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
279 CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
280 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
281 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
282 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
283 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
284 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
285 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
286 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
287 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
288 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
289 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
290 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
291 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
292 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
293 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
294 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
295 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
296 /* Special event for cycles counter */
297 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
301 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
304 struct dev_ext_attribute *eattr = container_of(attr,
305 struct dev_ext_attribute, attr);
306 return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
309 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
310 struct cci_pmu_hw_events *hw,
311 unsigned long cci_event)
315 /* cycles event idx is fixed */
316 if (cci_event == CCI400_PMU_CYCLES) {
317 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
320 return CCI400_PMU_CYCLE_CNTR_IDX;
323 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
324 if (!test_and_set_bit(idx, hw->used_mask))
327 /* No counters available */
331 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
333 u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
334 u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
337 if (hw_event & ~CCI400_PMU_EVENT_MASK)
340 if (hw_event == CCI400_PMU_CYCLES)
349 /* Slave Interface */
350 if_type = CCI_IF_SLAVE;
355 /* Master Interface */
356 if_type = CCI_IF_MASTER;
362 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
363 ev_code <= cci_pmu->model->event_ranges[if_type].max)
369 static int probe_cci400_revision(struct cci_pmu *cci_pmu)
372 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
373 rev >>= CCI_PID2_REV_SHIFT;
375 if (rev < CCI400_R1_PX)
381 static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
383 if (platform_has_secure_cci_access())
384 return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
387 #else /* !CONFIG_ARM_CCI400_PMU */
388 static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
392 #endif /* CONFIG_ARM_CCI400_PMU */
394 #ifdef CONFIG_ARM_CCI5xx_PMU
397 * CCI5xx PMU event id is an 9-bit value made of two parts.
398 * bits [8:5] - Source for the event
399 * bits [4:0] - Event code (specific to type of interface)
405 #define CCI5xx_PORT_S0 0x0
406 #define CCI5xx_PORT_S1 0x1
407 #define CCI5xx_PORT_S2 0x2
408 #define CCI5xx_PORT_S3 0x3
409 #define CCI5xx_PORT_S4 0x4
410 #define CCI5xx_PORT_S5 0x5
411 #define CCI5xx_PORT_S6 0x6
413 #define CCI5xx_PORT_M0 0x8
414 #define CCI5xx_PORT_M1 0x9
415 #define CCI5xx_PORT_M2 0xa
416 #define CCI5xx_PORT_M3 0xb
417 #define CCI5xx_PORT_M4 0xc
418 #define CCI5xx_PORT_M5 0xd
419 #define CCI5xx_PORT_M6 0xe
421 #define CCI5xx_PORT_GLOBAL 0xf
423 #define CCI5xx_PMU_EVENT_MASK 0x1ffUL
424 #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5
425 #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf
426 #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0
427 #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f
429 #define CCI5xx_PMU_EVENT_SOURCE(event) \
430 ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
431 #define CCI5xx_PMU_EVENT_CODE(event) \
432 ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
434 #define CCI5xx_SLAVE_PORT_MIN_EV 0x00
435 #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f
436 #define CCI5xx_MASTER_PORT_MIN_EV 0x00
437 #define CCI5xx_MASTER_PORT_MAX_EV 0x06
438 #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00
439 #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f
442 #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
443 CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
444 (unsigned long) _config)
446 static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
447 struct device_attribute *attr, char *buf);
449 static struct attribute *cci5xx_pmu_format_attrs[] = {
450 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
451 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
455 static struct attribute *cci5xx_pmu_event_attrs[] = {
457 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
458 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
459 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
460 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
461 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
462 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
463 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
464 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
465 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
466 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
467 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
468 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
469 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
470 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
471 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
472 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
473 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
474 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
475 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
476 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
477 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
478 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
479 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
480 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
481 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
482 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
483 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
484 CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
485 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
486 CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
487 CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
488 CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
491 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
492 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
493 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
494 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
495 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
496 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
497 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
500 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
501 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
502 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
503 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
504 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
505 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
506 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
507 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
508 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
509 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
510 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
511 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
512 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
513 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
514 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
515 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
519 static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
520 struct device_attribute *attr, char *buf)
522 struct dev_ext_attribute *eattr = container_of(attr,
523 struct dev_ext_attribute, attr);
524 /* Global events have single fixed source code */
525 return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
526 (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
530 * CCI500 provides 8 independent event counters that can count
531 * any of the events available.
532 * CCI500 PMU event source ids
533 * 0x0-0x6 - Slave interfaces
534 * 0x8-0xD - Master interfaces
535 * 0xf - Global Events
538 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
539 unsigned long hw_event)
541 u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
542 u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
545 if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
556 if_type = CCI_IF_SLAVE;
564 if_type = CCI_IF_MASTER;
566 case CCI5xx_PORT_GLOBAL:
567 if_type = CCI_IF_GLOBAL;
573 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
574 ev_code <= cci_pmu->model->event_ranges[if_type].max)
581 * CCI550 provides 8 independent event counters that can count
582 * any of the events available.
583 * CCI550 PMU event source ids
584 * 0x0-0x6 - Slave interfaces
585 * 0x8-0xe - Master interfaces
586 * 0xf - Global Events
589 static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
590 unsigned long hw_event)
592 u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
593 u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
596 if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
607 if_type = CCI_IF_SLAVE;
616 if_type = CCI_IF_MASTER;
618 case CCI5xx_PORT_GLOBAL:
619 if_type = CCI_IF_GLOBAL;
625 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
626 ev_code <= cci_pmu->model->event_ranges[if_type].max)
632 #endif /* CONFIG_ARM_CCI5xx_PMU */
635 * Program the CCI PMU counters which have PERF_HES_ARCH set
636 * with the event period and mark them ready before we enable
639 static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
642 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
643 DECLARE_BITMAP(mask, HW_CNTRS_MAX);
645 bitmap_zero(mask, HW_CNTRS_MAX);
646 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
647 struct perf_event *event = cci_hw->events[i];
652 /* Leave the events which are not counting */
653 if (event->hw.state & PERF_HES_STOPPED)
655 if (event->hw.state & PERF_HES_ARCH) {
657 event->hw.state &= ~PERF_HES_ARCH;
661 pmu_write_counters(cci_pmu, mask);
664 /* Should be called with cci_pmu->hw_events->pmu_lock held */
665 static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
669 /* Enable all the PMU counters. */
670 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
671 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
674 /* Should be called with cci_pmu->hw_events->pmu_lock held */
675 static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
677 cci_pmu_sync_counters(cci_pmu);
678 __cci_pmu_enable_nosync(cci_pmu);
681 /* Should be called with cci_pmu->hw_events->pmu_lock held */
682 static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
686 /* Disable all the PMU counters. */
687 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
688 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
691 static ssize_t cci_pmu_format_show(struct device *dev,
692 struct device_attribute *attr, char *buf)
694 struct dev_ext_attribute *eattr = container_of(attr,
695 struct dev_ext_attribute, attr);
696 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
699 static ssize_t cci_pmu_event_show(struct device *dev,
700 struct device_attribute *attr, char *buf)
702 struct dev_ext_attribute *eattr = container_of(attr,
703 struct dev_ext_attribute, attr);
704 /* source parameter is mandatory for normal PMU events */
705 return sysfs_emit(buf, "source=?,event=0x%lx\n",
706 (unsigned long)eattr->var);
709 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
711 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
714 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
716 return readl_relaxed(cci_pmu->base +
717 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
720 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
721 int idx, unsigned int offset)
723 writel_relaxed(value, cci_pmu->base +
724 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
727 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
729 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
732 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
734 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
737 static bool __maybe_unused
738 pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
740 return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
743 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
745 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
749 * For all counters on the CCI-PMU, disable any 'enabled' counters,
750 * saving the changed counters in the mask, so that we can restore
751 * it later using pmu_restore_counters. The mask is private to the
752 * caller. We cannot rely on the used_mask maintained by the CCI_PMU
753 * as it only tells us if the counter is assigned to perf_event or not.
754 * The state of the perf_event cannot be locked by the PMU layer, hence
755 * we check the individual counter status (which can be locked by
756 * cci_pm->hw_events->pmu_lock).
758 * @mask should be initialised to empty by the caller.
760 static void __maybe_unused
761 pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
765 for (i = 0; i < cci_pmu->num_cntrs; i++) {
766 if (pmu_counter_is_enabled(cci_pmu, i)) {
768 pmu_disable_counter(cci_pmu, i);
774 * Restore the status of the counters. Reversal of the pmu_save_counters().
775 * For each counter set in the mask, enable the counter back.
777 static void __maybe_unused
778 pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
782 for_each_set_bit(i, mask, cci_pmu->num_cntrs)
783 pmu_enable_counter(cci_pmu, i);
787 * Returns the number of programmable counters actually implemented
790 static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
792 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
793 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
796 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
798 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
799 unsigned long cci_event = event->hw.config_base;
802 if (cci_pmu->model->get_event_idx)
803 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
805 /* Generic code to find an unused idx from the mask */
806 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
807 if (!test_and_set_bit(idx, hw->used_mask))
810 /* No counters available */
814 static int pmu_map_event(struct perf_event *event)
816 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
818 if (event->attr.type < PERF_TYPE_MAX ||
819 !cci_pmu->model->validate_hw_event)
822 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
825 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
828 struct platform_device *pmu_device = cci_pmu->plat_device;
830 if (unlikely(!pmu_device))
833 if (cci_pmu->nr_irqs < 1) {
834 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
839 * Register all available CCI PMU interrupts. In the interrupt handler
840 * we iterate over the counters checking for interrupt source (the
841 * overflowing counter) and clear it.
843 * This should allow handling of non-unique interrupt for the counters.
845 for (i = 0; i < cci_pmu->nr_irqs; i++) {
846 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
847 "arm-cci-pmu", cci_pmu);
849 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
854 set_bit(i, &cci_pmu->active_irqs);
860 static void pmu_free_irq(struct cci_pmu *cci_pmu)
864 for (i = 0; i < cci_pmu->nr_irqs; i++) {
865 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
868 free_irq(cci_pmu->irqs[i], cci_pmu);
872 static u32 pmu_read_counter(struct perf_event *event)
874 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
875 struct hw_perf_event *hw_counter = &event->hw;
876 int idx = hw_counter->idx;
879 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
880 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
883 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
888 static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
890 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
893 static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
896 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
898 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
899 struct perf_event *event = cci_hw->events[i];
903 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
907 static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
909 if (cci_pmu->model->write_counters)
910 cci_pmu->model->write_counters(cci_pmu, mask);
912 __pmu_write_counters(cci_pmu, mask);
915 #ifdef CONFIG_ARM_CCI5xx_PMU
918 * CCI-500/CCI-550 has advanced power saving policies, which could gate the
919 * clocks to the PMU counters, which makes the writes to them ineffective.
920 * The only way to write to those counters is when the global counters
921 * are enabled and the particular counter is enabled.
923 * So we do the following :
925 * 1) Disable all the PMU counters, saving their current state
926 * 2) Enable the global PMU profiling, now that all counters are
929 * For each counter to be programmed, repeat steps 3-7:
931 * 3) Write an invalid event code to the event control register for the
932 counter, so that the counters are not modified.
933 * 4) Enable the counter control for the counter.
934 * 5) Set the counter value
935 * 6) Disable the counter
936 * 7) Restore the event in the target counter
938 * 8) Disable the global PMU.
939 * 9) Restore the status of the rest of the counters.
941 * We choose an event which for CCI-5xx is guaranteed not to count.
942 * We use the highest possible event code (0x1f) for the master interface 0.
944 #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
945 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
946 static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
949 DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
951 bitmap_zero(saved_mask, cci_pmu->num_cntrs);
952 pmu_save_counters(cci_pmu, saved_mask);
955 * Now that all the counters are disabled, we can safely turn the PMU on,
956 * without syncing the status of the counters
958 __cci_pmu_enable_nosync(cci_pmu);
960 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
961 struct perf_event *event = cci_pmu->hw_events.events[i];
966 pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
967 pmu_enable_counter(cci_pmu, i);
968 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
969 pmu_disable_counter(cci_pmu, i);
970 pmu_set_event(cci_pmu, i, event->hw.config_base);
973 __cci_pmu_disable(cci_pmu);
975 pmu_restore_counters(cci_pmu, saved_mask);
978 #endif /* CONFIG_ARM_CCI5xx_PMU */
980 static u64 pmu_event_update(struct perf_event *event)
982 struct hw_perf_event *hwc = &event->hw;
983 u64 delta, prev_raw_count, new_raw_count;
986 prev_raw_count = local64_read(&hwc->prev_count);
987 new_raw_count = pmu_read_counter(event);
988 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
989 new_raw_count) != prev_raw_count);
991 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
993 local64_add(delta, &event->count);
995 return new_raw_count;
998 static void pmu_read(struct perf_event *event)
1000 pmu_event_update(event);
1003 static void pmu_event_set_period(struct perf_event *event)
1005 struct hw_perf_event *hwc = &event->hw;
1007 * The CCI PMU counters have a period of 2^32. To account for the
1008 * possiblity of extreme interrupt latency we program for a period of
1009 * half that. Hopefully we can handle the interrupt before another 2^31
1010 * events occur and the counter overtakes its previous value.
1012 u64 val = 1ULL << 31;
1013 local64_set(&hwc->prev_count, val);
1016 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
1017 * values needs to be sync-ed with the s/w state before the PMU is
1019 * Mark this counter for sync.
1021 hwc->state |= PERF_HES_ARCH;
1024 static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
1026 struct cci_pmu *cci_pmu = dev;
1027 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
1028 int idx, handled = IRQ_NONE;
1030 raw_spin_lock(&events->pmu_lock);
1032 /* Disable the PMU while we walk through the counters */
1033 __cci_pmu_disable(cci_pmu);
1035 * Iterate over counters and update the corresponding perf events.
1036 * This should work regardless of whether we have per-counter overflow
1037 * interrupt or a combined overflow interrupt.
1039 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
1040 struct perf_event *event = events->events[idx];
1045 /* Did this counter overflow? */
1046 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
1047 CCI_PMU_OVRFLW_FLAG))
1050 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
1053 pmu_event_update(event);
1054 pmu_event_set_period(event);
1055 handled = IRQ_HANDLED;
1058 /* Enable the PMU and sync possibly overflowed counters */
1059 __cci_pmu_enable_sync(cci_pmu);
1060 raw_spin_unlock(&events->pmu_lock);
1062 return IRQ_RETVAL(handled);
1065 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
1067 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
1069 pmu_free_irq(cci_pmu);
1075 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
1077 pmu_free_irq(cci_pmu);
1080 static void hw_perf_event_destroy(struct perf_event *event)
1082 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1083 atomic_t *active_events = &cci_pmu->active_events;
1084 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
1086 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
1087 cci_pmu_put_hw(cci_pmu);
1088 mutex_unlock(reserve_mutex);
1092 static void cci_pmu_enable(struct pmu *pmu)
1094 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1095 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1096 bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
1097 unsigned long flags;
1102 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1103 __cci_pmu_enable_sync(cci_pmu);
1104 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1108 static void cci_pmu_disable(struct pmu *pmu)
1110 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1111 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1112 unsigned long flags;
1114 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1115 __cci_pmu_disable(cci_pmu);
1116 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1120 * Check if the idx represents a non-programmable counter.
1121 * All the fixed event counters are mapped before the programmable
1124 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
1126 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
1129 static void cci_pmu_start(struct perf_event *event, int pmu_flags)
1131 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1132 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1133 struct hw_perf_event *hwc = &event->hw;
1135 unsigned long flags;
1138 * To handle interrupt latency, we always reprogram the period
1139 * regardless of PERF_EF_RELOAD.
1141 if (pmu_flags & PERF_EF_RELOAD)
1142 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1146 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1147 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1151 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1153 /* Configure the counter unless you are counting a fixed event */
1154 if (!pmu_fixed_hw_idx(cci_pmu, idx))
1155 pmu_set_event(cci_pmu, idx, hwc->config_base);
1157 pmu_event_set_period(event);
1158 pmu_enable_counter(cci_pmu, idx);
1160 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1163 static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
1165 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1166 struct hw_perf_event *hwc = &event->hw;
1169 if (hwc->state & PERF_HES_STOPPED)
1172 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1173 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1178 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
1181 pmu_disable_counter(cci_pmu, idx);
1182 pmu_event_update(event);
1183 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1186 static int cci_pmu_add(struct perf_event *event, int flags)
1188 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1189 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1190 struct hw_perf_event *hwc = &event->hw;
1193 /* If we don't have a space for the counter then finish early. */
1194 idx = pmu_get_event_idx(hw_events, event);
1198 event->hw.idx = idx;
1199 hw_events->events[idx] = event;
1201 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1202 if (flags & PERF_EF_START)
1203 cci_pmu_start(event, PERF_EF_RELOAD);
1205 /* Propagate our changes to the userspace mapping. */
1206 perf_event_update_userpage(event);
1211 static void cci_pmu_del(struct perf_event *event, int flags)
1213 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1214 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1215 struct hw_perf_event *hwc = &event->hw;
1218 cci_pmu_stop(event, PERF_EF_UPDATE);
1219 hw_events->events[idx] = NULL;
1220 clear_bit(idx, hw_events->used_mask);
1222 perf_event_update_userpage(event);
1225 static int validate_event(struct pmu *cci_pmu,
1226 struct cci_pmu_hw_events *hw_events,
1227 struct perf_event *event)
1229 if (is_software_event(event))
1233 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1234 * core perf code won't check that the pmu->ctx == leader->ctx
1235 * until after pmu->event_init(event).
1237 if (event->pmu != cci_pmu)
1240 if (event->state < PERF_EVENT_STATE_OFF)
1243 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1246 return pmu_get_event_idx(hw_events, event) >= 0;
1249 static int validate_group(struct perf_event *event)
1251 struct perf_event *sibling, *leader = event->group_leader;
1252 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1253 unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
1254 struct cci_pmu_hw_events fake_pmu = {
1256 * Initialise the fake PMU. We only need to populate the
1257 * used_mask for the purposes of validation.
1261 bitmap_zero(mask, cci_pmu->num_cntrs);
1263 if (!validate_event(event->pmu, &fake_pmu, leader))
1266 for_each_sibling_event(sibling, leader) {
1267 if (!validate_event(event->pmu, &fake_pmu, sibling))
1271 if (!validate_event(event->pmu, &fake_pmu, event))
1277 static int __hw_perf_event_init(struct perf_event *event)
1279 struct hw_perf_event *hwc = &event->hw;
1282 mapping = pmu_map_event(event);
1285 pr_debug("event %x:%llx not supported\n", event->attr.type,
1286 event->attr.config);
1291 * We don't assign an index until we actually place the event onto
1292 * hardware. Use -1 to signify that we haven't decided where to put it
1296 hwc->config_base = 0;
1298 hwc->event_base = 0;
1301 * Store the event encoding into the config_base field.
1303 hwc->config_base |= (unsigned long)mapping;
1305 if (event->group_leader != event) {
1306 if (validate_group(event) != 0)
1313 static int cci_pmu_event_init(struct perf_event *event)
1315 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1316 atomic_t *active_events = &cci_pmu->active_events;
1319 if (event->attr.type != event->pmu->type)
1322 /* Shared by all CPUs, no meaningful state to sample */
1323 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1327 * Following the example set by other "uncore" PMUs, we accept any CPU
1328 * and rewrite its affinity dynamically rather than having perf core
1329 * handle cpu == -1 and pid == -1 for this case.
1331 * The perf core will pin online CPUs for the duration of this call and
1332 * the event being installed into its context, so the PMU's CPU can't
1333 * change under our feet.
1337 event->cpu = cci_pmu->cpu;
1339 event->destroy = hw_perf_event_destroy;
1340 if (!atomic_inc_not_zero(active_events)) {
1341 mutex_lock(&cci_pmu->reserve_mutex);
1342 if (atomic_read(active_events) == 0)
1343 err = cci_pmu_get_hw(cci_pmu);
1345 atomic_inc(active_events);
1346 mutex_unlock(&cci_pmu->reserve_mutex);
1351 err = __hw_perf_event_init(event);
1353 hw_perf_event_destroy(event);
1358 static ssize_t pmu_cpumask_attr_show(struct device *dev,
1359 struct device_attribute *attr, char *buf)
1361 struct pmu *pmu = dev_get_drvdata(dev);
1362 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1364 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
1367 static struct device_attribute pmu_cpumask_attr =
1368 __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
1370 static struct attribute *pmu_attrs[] = {
1371 &pmu_cpumask_attr.attr,
1375 static const struct attribute_group pmu_attr_group = {
1379 static struct attribute_group pmu_format_attr_group = {
1381 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1384 static struct attribute_group pmu_event_attr_group = {
1386 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1389 static const struct attribute_group *pmu_attr_groups[] = {
1391 &pmu_format_attr_group,
1392 &pmu_event_attr_group,
1396 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1398 const struct cci_pmu_model *model = cci_pmu->model;
1399 char *name = model->name;
1402 if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
1404 if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
1407 pmu_event_attr_group.attrs = model->event_attrs;
1408 pmu_format_attr_group.attrs = model->format_attrs;
1410 cci_pmu->pmu = (struct pmu) {
1411 .module = THIS_MODULE,
1412 .name = cci_pmu->model->name,
1413 .task_ctx_nr = perf_invalid_context,
1414 .pmu_enable = cci_pmu_enable,
1415 .pmu_disable = cci_pmu_disable,
1416 .event_init = cci_pmu_event_init,
1419 .start = cci_pmu_start,
1420 .stop = cci_pmu_stop,
1422 .attr_groups = pmu_attr_groups,
1423 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1426 cci_pmu->plat_device = pdev;
1427 num_cntrs = pmu_get_max_counters(cci_pmu);
1428 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1429 dev_warn(&pdev->dev,
1430 "PMU implements more counters(%d) than supported by"
1431 " the model(%d), truncated.",
1432 num_cntrs, cci_pmu->model->num_hw_cntrs);
1433 num_cntrs = cci_pmu->model->num_hw_cntrs;
1435 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1437 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1440 static int cci_pmu_offline_cpu(unsigned int cpu)
1444 if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
1447 target = cpumask_any_but(cpu_online_mask, cpu);
1448 if (target >= nr_cpu_ids)
1451 perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
1452 g_cci_pmu->cpu = target;
1456 static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
1457 #ifdef CONFIG_ARM_CCI400_PMU
1460 .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
1461 .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
1463 .format_attrs = cci400_pmu_format_attrs,
1464 .event_attrs = cci400_r0_pmu_event_attrs,
1467 CCI400_R0_SLAVE_PORT_MIN_EV,
1468 CCI400_R0_SLAVE_PORT_MAX_EV,
1471 CCI400_R0_MASTER_PORT_MIN_EV,
1472 CCI400_R0_MASTER_PORT_MAX_EV,
1475 .validate_hw_event = cci400_validate_hw_event,
1476 .get_event_idx = cci400_get_event_idx,
1479 .name = "CCI_400_r1",
1480 .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
1481 .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
1483 .format_attrs = cci400_pmu_format_attrs,
1484 .event_attrs = cci400_r1_pmu_event_attrs,
1487 CCI400_R1_SLAVE_PORT_MIN_EV,
1488 CCI400_R1_SLAVE_PORT_MAX_EV,
1491 CCI400_R1_MASTER_PORT_MIN_EV,
1492 CCI400_R1_MASTER_PORT_MAX_EV,
1495 .validate_hw_event = cci400_validate_hw_event,
1496 .get_event_idx = cci400_get_event_idx,
1499 #ifdef CONFIG_ARM_CCI5xx_PMU
1502 .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
1503 .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
1504 .cntr_size = SZ_64K,
1505 .format_attrs = cci5xx_pmu_format_attrs,
1506 .event_attrs = cci5xx_pmu_event_attrs,
1509 CCI5xx_SLAVE_PORT_MIN_EV,
1510 CCI5xx_SLAVE_PORT_MAX_EV,
1513 CCI5xx_MASTER_PORT_MIN_EV,
1514 CCI5xx_MASTER_PORT_MAX_EV,
1517 CCI5xx_GLOBAL_PORT_MIN_EV,
1518 CCI5xx_GLOBAL_PORT_MAX_EV,
1521 .validate_hw_event = cci500_validate_hw_event,
1522 .write_counters = cci5xx_pmu_write_counters,
1526 .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
1527 .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
1528 .cntr_size = SZ_64K,
1529 .format_attrs = cci5xx_pmu_format_attrs,
1530 .event_attrs = cci5xx_pmu_event_attrs,
1533 CCI5xx_SLAVE_PORT_MIN_EV,
1534 CCI5xx_SLAVE_PORT_MAX_EV,
1537 CCI5xx_MASTER_PORT_MIN_EV,
1538 CCI5xx_MASTER_PORT_MAX_EV,
1541 CCI5xx_GLOBAL_PORT_MIN_EV,
1542 CCI5xx_GLOBAL_PORT_MAX_EV,
1545 .validate_hw_event = cci550_validate_hw_event,
1546 .write_counters = cci5xx_pmu_write_counters,
1551 static const struct of_device_id arm_cci_pmu_matches[] = {
1552 #ifdef CONFIG_ARM_CCI400_PMU
1554 .compatible = "arm,cci-400-pmu",
1558 .compatible = "arm,cci-400-pmu,r0",
1559 .data = &cci_pmu_models[CCI400_R0],
1562 .compatible = "arm,cci-400-pmu,r1",
1563 .data = &cci_pmu_models[CCI400_R1],
1566 #ifdef CONFIG_ARM_CCI5xx_PMU
1568 .compatible = "arm,cci-500-pmu,r0",
1569 .data = &cci_pmu_models[CCI500_R0],
1572 .compatible = "arm,cci-550-pmu,r0",
1573 .data = &cci_pmu_models[CCI550_R0],
1578 MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches);
1580 static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1584 for (i = 0; i < nr_irqs; i++)
1591 static struct cci_pmu *cci_pmu_alloc(struct device *dev)
1593 struct cci_pmu *cci_pmu;
1594 const struct cci_pmu_model *model;
1597 * All allocations are devm_* hence we don't have to free
1598 * them explicitly on an error, as it would end up in driver
1601 cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
1603 return ERR_PTR(-ENOMEM);
1605 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
1607 model = of_device_get_match_data(dev);
1610 "DEPRECATED compatible property, requires secure access to CCI registers");
1611 model = probe_cci_model(cci_pmu);
1614 dev_warn(dev, "CCI PMU version not supported\n");
1615 return ERR_PTR(-ENODEV);
1618 cci_pmu->model = model;
1619 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
1620 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1622 return ERR_PTR(-ENOMEM);
1623 cci_pmu->hw_events.events = devm_kcalloc(dev,
1624 CCI_PMU_MAX_HW_CNTRS(model),
1625 sizeof(*cci_pmu->hw_events.events),
1627 if (!cci_pmu->hw_events.events)
1628 return ERR_PTR(-ENOMEM);
1629 cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
1630 CCI_PMU_MAX_HW_CNTRS(model),
1632 if (!cci_pmu->hw_events.used_mask)
1633 return ERR_PTR(-ENOMEM);
1638 static int cci_pmu_probe(struct platform_device *pdev)
1640 struct cci_pmu *cci_pmu;
1643 cci_pmu = cci_pmu_alloc(&pdev->dev);
1644 if (IS_ERR(cci_pmu))
1645 return PTR_ERR(cci_pmu);
1647 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
1648 if (IS_ERR(cci_pmu->base))
1652 * CCI PMU has one overflow interrupt per counter; but some may be tied
1653 * together to a common interrupt.
1655 cci_pmu->nr_irqs = 0;
1656 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1657 irq = platform_get_irq(pdev, i);
1661 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1664 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1668 * Ensure that the device tree has as many interrupts as the number
1671 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1672 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
1673 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1677 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1678 mutex_init(&cci_pmu->reserve_mutex);
1679 atomic_set(&cci_pmu->active_events, 0);
1681 cci_pmu->cpu = raw_smp_processor_id();
1682 g_cci_pmu = cci_pmu;
1683 cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1684 "perf/arm/cci:online", NULL,
1685 cci_pmu_offline_cpu);
1687 ret = cci_pmu_init(cci_pmu, pdev);
1689 goto error_pmu_init;
1691 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1695 cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
1700 static int cci_pmu_remove(struct platform_device *pdev)
1705 cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
1706 perf_pmu_unregister(&g_cci_pmu->pmu);
1712 static struct platform_driver cci_pmu_driver = {
1714 .name = DRIVER_NAME,
1715 .of_match_table = arm_cci_pmu_matches,
1716 .suppress_bind_attrs = true,
1718 .probe = cci_pmu_probe,
1719 .remove = cci_pmu_remove,
1722 module_platform_driver(cci_pmu_driver);
1723 MODULE_LICENSE("GPL v2");
1724 MODULE_DESCRIPTION("ARM CCI PMU support");