1 // SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
3 * Mellanox BlueField Performance Monitoring Counters driver
5 * This driver provides a sysfs interface for monitoring
6 * performance statistics in BlueField SoC.
8 * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
11 #include <linux/acpi.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/bitfield.h>
14 #include <linux/errno.h>
15 #include <linux/hwmon.h>
16 #include <linux/platform_device.h>
17 #include <linux/string.h>
18 #include <uapi/linux/psci.h>
20 #define MLXBF_PMC_WRITE_REG_32 0x82000009
21 #define MLXBF_PMC_READ_REG_32 0x8200000A
22 #define MLXBF_PMC_WRITE_REG_64 0x8200000B
23 #define MLXBF_PMC_READ_REG_64 0x8200000C
24 #define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
25 #define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
26 #define MLXBF_PMC_SVC_REQ_MAJOR 0
27 #define MLXBF_PMC_SVC_MIN_MINOR 3
29 #define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
31 #define MLXBF_PMC_EVENT_SET_BF1 0
32 #define MLXBF_PMC_EVENT_SET_BF2 1
33 #define MLXBF_PMC_EVENT_INFO_LEN 100
35 #define MLXBF_PMC_MAX_BLOCKS 30
36 #define MLXBF_PMC_MAX_ATTRS 30
37 #define MLXBF_PMC_INFO_SZ 4
38 #define MLXBF_PMC_REG_SIZE 8
39 #define MLXBF_PMC_L3C_REG_SIZE 4
41 #define MLXBF_PMC_TYPE_COUNTER 1
42 #define MLXBF_PMC_TYPE_REGISTER 0
44 #define MLXBF_PMC_PERFCTL 0
45 #define MLXBF_PMC_PERFEVT 1
46 #define MLXBF_PMC_PERFACC0 4
48 #define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
49 #define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
50 #define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
51 #define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
53 #define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
54 #define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
55 #define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
56 #define MLXBF_PMC_PERFCTL_AD0 BIT(27)
57 #define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
58 #define MLXBF_PMC_PERFCTL_EB0 BIT(30)
59 #define MLXBF_PMC_PERFCTL_EN0 BIT(31)
61 #define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
63 #define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
64 #define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
65 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
66 #define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
67 #define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
69 #define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
70 #define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
71 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
72 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
73 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
74 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
76 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
78 #define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
79 #define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
82 * struct mlxbf_pmc_attribute - Structure to hold attribute and block info
83 * for each sysfs entry
84 * @dev_attr: Device attribute struct
85 * @index: index to identify counter number within a block
86 * @nr: block number to which the sysfs belongs
88 struct mlxbf_pmc_attribute {
89 struct device_attribute dev_attr;
95 * struct mlxbf_pmc_block_info - Structure to hold info for each HW block
97 * @mmio_base: The VA at which the PMC block is mapped
98 * @blk_size: Size of each mapped region
99 * @counters: Number of counters in the block
100 * @type: Type of counters in the block
101 * @attr_counter: Attributes for "counter" sysfs files
102 * @attr_event: Attributes for "event" sysfs files
103 * @attr_event_list: Attributes for "event_list" sysfs files
104 * @attr_enable: Attributes for "enable" sysfs files
105 * @block_attr: All attributes needed for the block
106 * @block_attr_grp: Attribute group for the block
108 struct mlxbf_pmc_block_info {
109 void __iomem *mmio_base;
113 struct mlxbf_pmc_attribute *attr_counter;
114 struct mlxbf_pmc_attribute *attr_event;
115 struct mlxbf_pmc_attribute attr_event_list;
116 struct mlxbf_pmc_attribute attr_enable;
117 struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
118 struct attribute_group block_attr_grp;
122 * struct mlxbf_pmc_context - Structure to hold PMC context info
124 * @pdev: The kernel structure representing the device
125 * @total_blocks: Total number of blocks
126 * @tile_count: Number of tiles in the system
127 * @hwmon_dev: Hwmon device for bfperf
128 * @block_name: Block name
130 * @groups: Attribute groups from each block
131 * @svc_sreg_support: Whether SMCs are used to access performance registers
132 * @sreg_tbl_perf: Secure register access table number
133 * @event_set: Event set to use
135 struct mlxbf_pmc_context {
136 struct platform_device *pdev;
137 uint32_t total_blocks;
139 struct device *hwmon_dev;
140 const char *block_name[MLXBF_PMC_MAX_BLOCKS];
141 struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
142 const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
143 bool svc_sreg_support;
144 uint32_t sreg_tbl_perf;
145 unsigned int event_set;
149 * struct mlxbf_pmc_events - Structure to hold supported events for each block
150 * @evt_num: Event number used to program counters
151 * @evt_name: Name of the event
153 struct mlxbf_pmc_events {
158 static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = {
159 { 0x0, "IN_P_PKT_CNT" },
160 { 0x10, "IN_NP_PKT_CNT" },
161 { 0x18, "IN_C_PKT_CNT" },
162 { 0x20, "OUT_P_PKT_CNT" },
163 { 0x28, "OUT_NP_PKT_CNT" },
164 { 0x30, "OUT_C_PKT_CNT" },
165 { 0x38, "IN_P_BYTE_CNT" },
166 { 0x40, "IN_NP_BYTE_CNT" },
167 { 0x48, "IN_C_BYTE_CNT" },
168 { 0x50, "OUT_P_BYTE_CNT" },
169 { 0x58, "OUT_NP_BYTE_CNT" },
170 { 0x60, "OUT_C_BYTE_CNT" },
173 static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
180 { 0x6, "AW_REQ_TBU" },
186 { 0xd, "AR_REQ_TBU" },
188 { 0xf, "TX_DAT_AF" },
189 { 0x10, "RX_DAT_AF" },
190 { 0x11, "RETRYQ_CRED" },
193 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
194 { 0xa0, "TPIO_DATA_BEAT" },
195 { 0xa1, "TDMA_DATA_BEAT" },
196 { 0xa2, "MAP_DATA_BEAT" },
197 { 0xa3, "TXMSG_DATA_BEAT" },
198 { 0xa4, "TPIO_DATA_PACKET" },
199 { 0xa5, "TDMA_DATA_PACKET" },
200 { 0xa6, "MAP_DATA_PACKET" },
201 { 0xa7, "TXMSG_DATA_PACKET" },
202 { 0xa8, "TDMA_RT_AF" },
203 { 0xa9, "TDMA_PBUF_MAC_AF" },
204 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
205 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
206 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
207 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
208 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
209 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
210 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
211 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
212 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
213 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
216 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
217 { 0xa0, "TPIO_DATA_BEAT" },
218 { 0xa1, "TDMA_DATA_BEAT" },
219 { 0xa2, "MAP_DATA_BEAT" },
220 { 0xa3, "TXMSG_DATA_BEAT" },
221 { 0xa4, "TPIO_DATA_PACKET" },
222 { 0xa5, "TDMA_DATA_PACKET" },
223 { 0xa6, "MAP_DATA_PACKET" },
224 { 0xa7, "TXMSG_DATA_PACKET" },
225 { 0xa8, "TDMA_RT_AF" },
226 { 0xa9, "TDMA_PBUF_MAC_AF" },
227 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
228 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
229 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
230 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
231 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
232 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
233 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
234 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
235 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
236 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
237 { 0xb4, "TRIO_RING_TX_FLIT_CH0" },
238 { 0xb5, "TRIO_RING_TX_FLIT_CH1" },
239 { 0xb6, "TRIO_RING_TX_FLIT_CH2" },
240 { 0xb7, "TRIO_RING_TX_FLIT_CH3" },
241 { 0xb8, "TRIO_RING_TX_FLIT_CH4" },
242 { 0xb9, "TRIO_RING_RX_FLIT_CH0" },
243 { 0xba, "TRIO_RING_RX_FLIT_CH1" },
244 { 0xbb, "TRIO_RING_RX_FLIT_CH2" },
245 { 0xbc, "TRIO_RING_RX_FLIT_CH3" },
248 static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
249 { 0x100, "ECC_SINGLE_ERROR_CNT" },
250 { 0x104, "ECC_DOUBLE_ERROR_CNT" },
251 { 0x114, "SERR_INJ" },
252 { 0x118, "DERR_INJ" },
253 { 0x124, "ECC_SINGLE_ERROR_0" },
254 { 0x164, "ECC_DOUBLE_ERROR_0" },
255 { 0x340, "DRAM_ECC_COUNT" },
256 { 0x344, "DRAM_ECC_INJECT" },
257 { 0x348, "DRAM_ECC_ERROR" },
260 static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
261 { 0xc0, "RXREQ_MSS" },
262 { 0xc1, "RXDAT_MSS" },
263 { 0xc2, "TXRSP_MSS" },
264 { 0xc3, "TXDAT_MSS" },
267 static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
268 { 0x45, "HNF_REQUESTS" },
269 { 0x46, "HNF_REJECTS" },
270 { 0x47, "ALL_BUSY" },
271 { 0x48, "MAF_BUSY" },
272 { 0x49, "MAF_REQUESTS" },
273 { 0x4a, "RNF_REQUESTS" },
274 { 0x4b, "REQUEST_TYPE" },
275 { 0x4c, "MEMORY_READS" },
276 { 0x4d, "MEMORY_WRITES" },
277 { 0x4e, "VICTIM_WRITE" },
278 { 0x4f, "POC_FULL" },
279 { 0x50, "POC_FAIL" },
280 { 0x51, "POC_SUCCESS" },
281 { 0x52, "POC_WRITES" },
282 { 0x53, "POC_READS" },
284 { 0x55, "RXREQ_HNF" },
285 { 0x56, "RXRSP_HNF" },
286 { 0x57, "RXDAT_HNF" },
287 { 0x58, "TXREQ_HNF" },
288 { 0x59, "TXRSP_HNF" },
289 { 0x5a, "TXDAT_HNF" },
290 { 0x5b, "TXSNP_HNF" },
291 { 0x5c, "INDEX_MATCH" },
292 { 0x5d, "A72_ACCESS" },
293 { 0x5e, "IO_ACCESS" },
294 { 0x5f, "TSO_WRITE" },
295 { 0x60, "TSO_CONFLICT" },
297 { 0x62, "HNF_ACCEPTS" },
298 { 0x63, "REQ_BUF_EMPTY" },
299 { 0x64, "REQ_BUF_IDLE_MAF" },
300 { 0x65, "TSO_NOARB" },
301 { 0x66, "TSO_NOARB_CYCLES" },
302 { 0x67, "MSS_NO_CREDIT" },
303 { 0x68, "TXDAT_NO_LCRD" },
304 { 0x69, "TXSNP_NO_LCRD" },
305 { 0x6a, "TXRSP_NO_LCRD" },
306 { 0x6b, "TXREQ_NO_LCRD" },
307 { 0x6c, "TSO_CL_MATCH" },
308 { 0x6d, "MEMORY_READS_BYPASS" },
309 { 0x6e, "TSO_NOARB_TIMEOUT" },
310 { 0x6f, "ALLOCATE" },
312 { 0x71, "A72_WRITE" },
313 { 0x72, "A72_READ" },
314 { 0x73, "IO_WRITE" },
316 { 0x75, "TSO_REJECT" },
317 { 0x80, "TXREQ_RN" },
318 { 0x81, "TXRSP_RN" },
319 { 0x82, "TXDAT_RN" },
320 { 0x83, "RXSNP_RN" },
321 { 0x84, "RXRSP_RN" },
322 { 0x85, "RXDAT_RN" },
325 static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
329 { 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
330 { 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
331 { 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
332 { 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
333 { 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
334 { 0x1a, "CDN_DIAG_N_EGRESS" },
335 { 0x1b, "CDN_DIAG_S_EGRESS" },
336 { 0x1c, "CDN_DIAG_E_EGRESS" },
337 { 0x1d, "CDN_DIAG_W_EGRESS" },
338 { 0x1e, "CDN_DIAG_C_EGRESS" },
339 { 0x1f, "CDN_DIAG_N_INGRESS" },
340 { 0x20, "CDN_DIAG_S_INGRESS" },
341 { 0x21, "CDN_DIAG_E_INGRESS" },
342 { 0x22, "CDN_DIAG_W_INGRESS" },
343 { 0x23, "CDN_DIAG_C_INGRESS" },
344 { 0x24, "CDN_DIAG_CORE_SENT" },
345 { 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
346 { 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
347 { 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
348 { 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
349 { 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
350 { 0x2a, "DDN_DIAG_N_EGRESS" },
351 { 0x2b, "DDN_DIAG_S_EGRESS" },
352 { 0x2c, "DDN_DIAG_E_EGRESS" },
353 { 0x2d, "DDN_DIAG_W_EGRESS" },
354 { 0x2e, "DDN_DIAG_C_EGRESS" },
355 { 0x2f, "DDN_DIAG_N_INGRESS" },
356 { 0x30, "DDN_DIAG_S_INGRESS" },
357 { 0x31, "DDN_DIAG_E_INGRESS" },
358 { 0x32, "DDN_DIAG_W_INGRESS" },
359 { 0x33, "DDN_DIAG_C_INGRESS" },
360 { 0x34, "DDN_DIAG_CORE_SENT" },
361 { 0x35, "NDN_DIAG_S_OUT_OF_CRED" },
362 { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
363 { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
364 { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
365 { 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
366 { 0x3a, "NDN_DIAG_N_EGRESS" },
367 { 0x3b, "NDN_DIAG_S_EGRESS" },
368 { 0x3c, "NDN_DIAG_E_EGRESS" },
369 { 0x3d, "NDN_DIAG_W_EGRESS" },
370 { 0x3e, "NDN_DIAG_C_EGRESS" },
371 { 0x3f, "NDN_DIAG_N_INGRESS" },
372 { 0x40, "NDN_DIAG_S_INGRESS" },
373 { 0x41, "NDN_DIAG_E_INGRESS" },
374 { 0x42, "NDN_DIAG_W_INGRESS" },
375 { 0x43, "NDN_DIAG_C_INGRESS" },
376 { 0x44, "NDN_DIAG_CORE_SENT" },
379 static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = {
382 { 0x02, "TOTAL_RD_REQ_IN" },
383 { 0x03, "TOTAL_WR_REQ_IN" },
384 { 0x04, "TOTAL_WR_DBID_ACK" },
385 { 0x05, "TOTAL_WR_DATA_IN" },
386 { 0x06, "TOTAL_WR_COMP" },
387 { 0x07, "TOTAL_RD_DATA_OUT" },
388 { 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
389 { 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
390 { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
391 { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
392 { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
393 { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
394 { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
395 { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
396 { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
397 { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
398 { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
399 { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
400 { 0x14, "TOTAL_RD_REQ_OUT" },
401 { 0x15, "TOTAL_WR_REQ_OUT" },
402 { 0x16, "TOTAL_RD_RES_IN" },
403 { 0x17, "HITS_BANK0" },
404 { 0x18, "HITS_BANK1" },
405 { 0x19, "MISSES_BANK0" },
406 { 0x1a, "MISSES_BANK1" },
407 { 0x1b, "ALLOCATIONS_BANK0" },
408 { 0x1c, "ALLOCATIONS_BANK1" },
409 { 0x1d, "EVICTIONS_BANK0" },
410 { 0x1e, "EVICTIONS_BANK1" },
411 { 0x1f, "DBID_REJECT" },
412 { 0x20, "WRDB_REJECT_BANK0" },
413 { 0x21, "WRDB_REJECT_BANK1" },
414 { 0x22, "CMDQ_REJECT_BANK0" },
415 { 0x23, "CMDQ_REJECT_BANK1" },
416 { 0x24, "COB_REJECT_BANK0" },
417 { 0x25, "COB_REJECT_BANK1" },
418 { 0x26, "TRB_REJECT_BANK0" },
419 { 0x27, "TRB_REJECT_BANK1" },
420 { 0x28, "TAG_REJECT_BANK0" },
421 { 0x29, "TAG_REJECT_BANK1" },
422 { 0x2a, "ANY_REJECT_BANK0" },
423 { 0x2b, "ANY_REJECT_BANK1" },
426 static struct mlxbf_pmc_context *pmc;
428 /* UUID used to probe ATF service. */
429 static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
431 /* Calls an SMC to access a performance register */
432 static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
435 struct arm_smccc_res res;
438 arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0,
444 case PSCI_RET_NOT_SUPPORTED:
447 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
458 /* Read from a performance counter */
459 static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
462 if (pmc->svc_sreg_support)
463 return mlxbf_pmc_secure_read(addr, command, result);
465 if (command == MLXBF_PMC_READ_REG_32)
466 *result = readl(addr);
468 *result = readq(addr);
473 /* Convenience function for 32-bit reads */
474 static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
479 status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
482 *result = (uint32_t)read_out;
487 /* Calls an SMC to access a performance register */
488 static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
491 struct arm_smccc_res res;
494 arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0,
500 case PSCI_RET_NOT_SUPPORTED:
503 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
511 /* Write to a performance counter */
512 static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
514 if (pmc->svc_sreg_support)
515 return mlxbf_pmc_secure_write(addr, command, value);
517 if (command == MLXBF_PMC_WRITE_REG_32)
525 /* Check if the register offset is within the mapped region for the block */
526 static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
528 if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
529 (offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
530 return true; /* inside the mapped PMC space */
535 /* Get the event list corresponding to a certain block */
536 static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
539 const struct mlxbf_pmc_events *events;
541 if (strstr(blk, "tilenet")) {
542 events = mlxbf_pmc_hnfnet_events;
543 *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
544 } else if (strstr(blk, "tile")) {
545 events = mlxbf_pmc_hnf_events;
546 *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
547 } else if (strstr(blk, "triogen")) {
548 events = mlxbf_pmc_smgen_events;
549 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
550 } else if (strstr(blk, "trio")) {
551 switch (pmc->event_set) {
552 case MLXBF_PMC_EVENT_SET_BF1:
553 events = mlxbf_pmc_trio_events_1;
554 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
556 case MLXBF_PMC_EVENT_SET_BF2:
557 events = mlxbf_pmc_trio_events_2;
558 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
565 } else if (strstr(blk, "mss")) {
566 events = mlxbf_pmc_mss_events;
567 *size = ARRAY_SIZE(mlxbf_pmc_mss_events);
568 } else if (strstr(blk, "ecc")) {
569 events = mlxbf_pmc_ecc_events;
570 *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
571 } else if (strstr(blk, "pcie")) {
572 events = mlxbf_pmc_pcie_events;
573 *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
574 } else if (strstr(blk, "l3cache")) {
575 events = mlxbf_pmc_l3c_events;
576 *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
577 } else if (strstr(blk, "gic")) {
578 events = mlxbf_pmc_smgen_events;
579 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
580 } else if (strstr(blk, "smmu")) {
581 events = mlxbf_pmc_smgen_events;
582 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
591 /* Get the event number given the name */
592 static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
594 const struct mlxbf_pmc_events *events;
597 events = mlxbf_pmc_event_list(blk, &size);
601 for (i = 0; i < size; ++i) {
602 if (!strcmp(evt, events[i].evt_name))
603 return events[i].evt_num;
609 /* Get the event number given the name */
610 static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
612 const struct mlxbf_pmc_events *events;
615 events = mlxbf_pmc_event_list(blk, &size);
619 for (i = 0; i < size; ++i) {
620 if (evt == events[i].evt_num)
621 return events[i].evt_name;
627 /* Method to enable/disable/reset l3cache counters */
628 static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
630 uint32_t perfcnt_cfg = 0;
633 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
635 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST;
637 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
638 MLXBF_PMC_L3C_PERF_CNT_CFG,
639 MLXBF_PMC_WRITE_REG_32, perfcnt_cfg);
642 /* Method to handle l3cache counter programming */
643 static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
646 uint32_t perfcnt_sel_1 = 0;
647 uint32_t perfcnt_sel = 0;
649 void __iomem *pmcaddr;
652 /* Disable all counters before programming them */
653 if (mlxbf_pmc_config_l3_counters(blk_num, false, false))
656 /* Select appropriate register information */
659 pmcaddr = pmc->block[blk_num].mmio_base +
660 MLXBF_PMC_L3C_PERF_CNT_SEL;
661 wordaddr = &perfcnt_sel;
664 pmcaddr = pmc->block[blk_num].mmio_base +
665 MLXBF_PMC_L3C_PERF_CNT_SEL_1;
666 wordaddr = &perfcnt_sel_1;
672 ret = mlxbf_pmc_readl(pmcaddr, wordaddr);
678 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0;
679 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0,
683 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1;
684 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1,
688 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2;
689 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2,
693 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3;
694 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3,
698 perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4;
699 perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
706 return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr);
709 /* Method to program a counter to monitor an event */
710 static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
711 uint32_t evt, bool is_l3)
713 uint64_t perfctl, perfevt, perfmon_cfg;
715 if (cnt_num >= pmc->block[blk_num].counters)
719 return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt);
721 /* Configure the counter */
722 perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1);
723 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0);
724 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1);
725 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0);
726 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0);
727 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0);
728 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0);
730 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl);
731 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
733 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
734 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
736 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
737 cnt_num * MLXBF_PMC_REG_SIZE,
738 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
741 /* Select the event */
742 perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt);
744 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt);
745 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
747 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
748 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
750 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
751 cnt_num * MLXBF_PMC_REG_SIZE,
752 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
755 /* Clear the accumulator */
756 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
758 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
759 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
761 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
762 cnt_num * MLXBF_PMC_REG_SIZE,
763 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
769 /* Method to handle l3 counter reads */
770 static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
773 uint32_t perfcnt_low = 0, perfcnt_high = 0;
777 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
778 MLXBF_PMC_L3C_PERF_CNT_LOW +
779 cnt_num * MLXBF_PMC_L3C_REG_SIZE,
785 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
786 MLXBF_PMC_L3C_PERF_CNT_HIGH +
787 cnt_num * MLXBF_PMC_L3C_REG_SIZE,
793 value = perfcnt_high;
795 value |= perfcnt_low;
801 /* Method to read the counter value */
802 static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
805 uint32_t perfcfg_offset, perfval_offset;
806 uint64_t perfmon_cfg;
809 if (cnt_num >= pmc->block[blk_num].counters)
813 return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result);
815 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
816 perfval_offset = perfcfg_offset +
817 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
819 /* Set counter in "read" mode */
820 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
822 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
823 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
825 status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
826 MLXBF_PMC_WRITE_REG_64, perfmon_cfg);
831 /* Get the counter value */
832 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
833 MLXBF_PMC_READ_REG_64, result);
836 /* Method to read L3 block event */
837 static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
840 uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
842 void __iomem *pmcaddr;
845 /* Select appropriate register information */
848 pmcaddr = pmc->block[blk_num].mmio_base +
849 MLXBF_PMC_L3C_PERF_CNT_SEL;
850 wordaddr = &perfcnt_sel;
853 pmcaddr = pmc->block[blk_num].mmio_base +
854 MLXBF_PMC_L3C_PERF_CNT_SEL_1;
855 wordaddr = &perfcnt_sel_1;
861 if (mlxbf_pmc_readl(pmcaddr, wordaddr))
864 /* Read from appropriate register field for the counter */
867 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel);
870 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel);
873 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel);
876 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel);
879 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
890 /* Method to find the event currently being monitored by a counter */
891 static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
894 uint32_t perfcfg_offset, perfval_offset;
895 uint64_t perfmon_cfg, perfevt, perfctl;
897 if (cnt_num >= pmc->block[blk_num].counters)
901 return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result);
903 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
904 perfval_offset = perfcfg_offset +
905 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
907 /* Set counter in "read" mode */
908 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
910 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
911 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
913 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
914 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
917 /* Check if the counter is enabled */
919 if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
920 MLXBF_PMC_READ_REG_64, &perfctl))
923 if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
926 /* Set counter in "read" mode */
927 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
929 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
930 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
932 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
933 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
936 /* Get the event number */
937 if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
938 MLXBF_PMC_READ_REG_64, &perfevt))
941 *result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt);
946 /* Method to read a register */
947 static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
951 if (strstr(pmc->block_name[blk_num], "ecc")) {
952 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
960 if (mlxbf_pmc_valid_range(blk_num, offset))
961 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset,
962 MLXBF_PMC_READ_REG_64, result);
967 /* Method to write to a register */
968 static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
970 if (strstr(pmc->block_name[blk_num], "ecc")) {
971 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
972 MLXBF_PMC_WRITE_REG_32, data);
975 if (mlxbf_pmc_valid_range(blk_num, offset))
976 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
977 MLXBF_PMC_WRITE_REG_64, data);
982 /* Show function for "counter" sysfs files */
983 static ssize_t mlxbf_pmc_counter_show(struct device *dev,
984 struct device_attribute *attr, char *buf)
986 struct mlxbf_pmc_attribute *attr_counter = container_of(
987 attr, struct mlxbf_pmc_attribute, dev_attr);
988 int blk_num, cnt_num, offset;
992 blk_num = attr_counter->nr;
993 cnt_num = attr_counter->index;
995 if (strstr(pmc->block_name[blk_num], "l3cache"))
998 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
999 if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value))
1001 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
1002 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1006 if (mlxbf_pmc_read_reg(blk_num, offset, &value))
1011 return sprintf(buf, "0x%llx\n", value);
1014 /* Store function for "counter" sysfs files */
1015 static ssize_t mlxbf_pmc_counter_store(struct device *dev,
1016 struct device_attribute *attr,
1017 const char *buf, size_t count)
1019 struct mlxbf_pmc_attribute *attr_counter = container_of(
1020 attr, struct mlxbf_pmc_attribute, dev_attr);
1021 int blk_num, cnt_num, offset, err, data;
1025 blk_num = attr_counter->nr;
1026 cnt_num = attr_counter->index;
1028 err = kstrtoint(buf, 0, &data);
1032 /* Allow non-zero writes only to the ecc regs */
1033 if (!(strstr(pmc->block_name[blk_num], "ecc")) && data)
1036 /* Do not allow writes to the L3C regs */
1037 if (strstr(pmc->block_name[blk_num], "l3cache"))
1040 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
1041 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
1044 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num,
1048 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
1049 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1053 err = mlxbf_pmc_write_reg(blk_num, offset, data);
1062 /* Show function for "event" sysfs files */
1063 static ssize_t mlxbf_pmc_event_show(struct device *dev,
1064 struct device_attribute *attr, char *buf)
1066 struct mlxbf_pmc_attribute *attr_event = container_of(
1067 attr, struct mlxbf_pmc_attribute, dev_attr);
1068 int blk_num, cnt_num, err;
1073 blk_num = attr_event->nr;
1074 cnt_num = attr_event->index;
1076 if (strstr(pmc->block_name[blk_num], "l3cache"))
1079 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
1081 return sprintf(buf, "No event being monitored\n");
1083 evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
1087 return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
1090 /* Store function for "event" sysfs files */
1091 static ssize_t mlxbf_pmc_event_store(struct device *dev,
1092 struct device_attribute *attr,
1093 const char *buf, size_t count)
1095 struct mlxbf_pmc_attribute *attr_event = container_of(
1096 attr, struct mlxbf_pmc_attribute, dev_attr);
1097 int blk_num, cnt_num, evt_num, err;
1100 blk_num = attr_event->nr;
1101 cnt_num = attr_event->index;
1103 if (isalpha(buf[0])) {
1104 evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1109 err = kstrtoint(buf, 0, &evt_num);
1114 if (strstr(pmc->block_name[blk_num], "l3cache"))
1117 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3);
1124 /* Show function for "event_list" sysfs files */
1125 static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
1126 struct device_attribute *attr,
1129 struct mlxbf_pmc_attribute *attr_event_list = container_of(
1130 attr, struct mlxbf_pmc_attribute, dev_attr);
1131 int blk_num, i, size, len = 0, ret = 0;
1132 const struct mlxbf_pmc_events *events;
1133 char e_info[MLXBF_PMC_EVENT_INFO_LEN];
1135 blk_num = attr_event_list->nr;
1137 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size);
1141 for (i = 0, buf[0] = '\0'; i < size; ++i) {
1142 len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
1143 events[i].evt_name);
1144 if (len > PAGE_SIZE)
1146 strcat(buf, e_info);
1153 /* Show function for "enable" sysfs files - only for l3cache */
1154 static ssize_t mlxbf_pmc_enable_show(struct device *dev,
1155 struct device_attribute *attr, char *buf)
1157 struct mlxbf_pmc_attribute *attr_enable = container_of(
1158 attr, struct mlxbf_pmc_attribute, dev_attr);
1159 uint32_t perfcnt_cfg;
1162 blk_num = attr_enable->nr;
1164 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
1165 MLXBF_PMC_L3C_PERF_CNT_CFG,
1169 value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
1171 return sprintf(buf, "%d\n", value);
1174 /* Store function for "enable" sysfs files - only for l3cache */
1175 static ssize_t mlxbf_pmc_enable_store(struct device *dev,
1176 struct device_attribute *attr,
1177 const char *buf, size_t count)
1179 struct mlxbf_pmc_attribute *attr_enable = container_of(
1180 attr, struct mlxbf_pmc_attribute, dev_attr);
1181 int err, en, blk_num;
1183 blk_num = attr_enable->nr;
1185 err = kstrtoint(buf, 0, &en);
1190 err = mlxbf_pmc_config_l3_counters(blk_num, false, false);
1193 } else if (en == 1) {
1194 err = mlxbf_pmc_config_l3_counters(blk_num, false, true);
1197 err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
1206 /* Populate attributes for blocks with counters to monitor performance */
1207 static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
1209 struct mlxbf_pmc_attribute *attr;
1212 /* "event_list" sysfs to list events supported by the block */
1213 attr = &pmc->block[blk_num].attr_event_list;
1214 attr->dev_attr.attr.mode = 0444;
1215 attr->dev_attr.show = mlxbf_pmc_event_list_show;
1217 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
1218 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
1221 /* "enable" sysfs to start/stop the counters. Only in L3C blocks */
1222 if (strstr(pmc->block_name[blk_num], "l3cache")) {
1223 attr = &pmc->block[blk_num].attr_enable;
1224 attr->dev_attr.attr.mode = 0644;
1225 attr->dev_attr.show = mlxbf_pmc_enable_show;
1226 attr->dev_attr.store = mlxbf_pmc_enable_store;
1228 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1230 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1234 pmc->block[blk_num].attr_counter = devm_kcalloc(
1235 dev, pmc->block[blk_num].counters,
1236 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1237 if (!pmc->block[blk_num].attr_counter)
1240 pmc->block[blk_num].attr_event = devm_kcalloc(
1241 dev, pmc->block[blk_num].counters,
1242 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1243 if (!pmc->block[blk_num].attr_event)
1246 /* "eventX" and "counterX" sysfs to program and read counter values */
1247 for (j = 0; j < pmc->block[blk_num].counters; ++j) {
1248 attr = &pmc->block[blk_num].attr_counter[j];
1249 attr->dev_attr.attr.mode = 0644;
1250 attr->dev_attr.show = mlxbf_pmc_counter_show;
1251 attr->dev_attr.store = mlxbf_pmc_counter_store;
1254 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1256 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1259 attr = &pmc->block[blk_num].attr_event[j];
1260 attr->dev_attr.attr.mode = 0644;
1261 attr->dev_attr.show = mlxbf_pmc_event_show;
1262 attr->dev_attr.store = mlxbf_pmc_event_store;
1265 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1267 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1274 /* Populate attributes for blocks with registers to monitor performance */
1275 static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
1277 struct mlxbf_pmc_attribute *attr;
1278 const struct mlxbf_pmc_events *events;
1281 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
1285 pmc->block[blk_num].attr_event = devm_kcalloc(
1286 dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1287 if (!pmc->block[blk_num].attr_event)
1292 attr = &pmc->block[blk_num].attr_event[j];
1293 attr->dev_attr.attr.mode = 0644;
1294 attr->dev_attr.show = mlxbf_pmc_counter_show;
1295 attr->dev_attr.store = mlxbf_pmc_counter_store;
1297 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1298 events[j].evt_name);
1299 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
1307 /* Helper to create the bfperf sysfs sub-directories and files */
1308 static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
1312 /* Populate attributes based on counter type */
1313 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER)
1314 err = mlxbf_pmc_init_perftype_counter(dev, blk_num);
1315 else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
1316 err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
1323 /* Add a new attribute_group for the block */
1324 pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
1325 pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
1326 dev, GFP_KERNEL, pmc->block_name[blk_num]);
1327 pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
1332 static bool mlxbf_pmc_guid_match(const guid_t *guid,
1333 const struct arm_smccc_res *res)
1335 guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2,
1336 res->a2 >> 8, res->a2 >> 16, res->a2 >> 24,
1337 res->a3, res->a3 >> 8, res->a3 >> 16,
1340 return guid_equal(guid, &id);
1343 /* Helper to map the Performance Counters from the varios blocks */
1344 static int mlxbf_pmc_map_counters(struct device *dev)
1346 uint64_t info[MLXBF_PMC_INFO_SZ];
1347 int i, tile_num, ret;
1349 for (i = 0; i < pmc->total_blocks; ++i) {
1350 if (strstr(pmc->block_name[i], "tile")) {
1351 ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
1355 if (tile_num >= pmc->tile_count)
1358 ret = device_property_read_u64_array(dev, pmc->block_name[i],
1359 info, MLXBF_PMC_INFO_SZ);
1364 * Do not remap if the proper SMC calls are supported,
1365 * since the SMC calls expect physical addresses.
1367 if (pmc->svc_sreg_support)
1368 pmc->block[i].mmio_base = (void __iomem *)info[0];
1370 pmc->block[i].mmio_base =
1371 devm_ioremap(dev, info[0], info[1]);
1373 pmc->block[i].blk_size = info[1];
1374 pmc->block[i].counters = info[2];
1375 pmc->block[i].type = info[3];
1377 if (IS_ERR(pmc->block[i].mmio_base))
1378 return PTR_ERR(pmc->block[i].mmio_base);
1380 ret = mlxbf_pmc_create_groups(dev, i);
1388 static int mlxbf_pmc_probe(struct platform_device *pdev)
1390 struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev);
1391 const char *hid = acpi_device_hid(acpi_dev);
1392 struct device *dev = &pdev->dev;
1393 struct arm_smccc_res res;
1397 /* Ensure we have the UUID we expect for this service. */
1398 arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1399 guid_parse(mlxbf_pmc_svc_uuid_str, &guid);
1400 if (!mlxbf_pmc_guid_match(&guid, &res))
1403 pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL);
1408 * ACPI indicates whether we use SMCs to access registers or not.
1409 * If sreg_tbl_perf is not present, just assume we're not using SMCs.
1411 ret = device_property_read_u32(dev, "sec_reg_block",
1412 &pmc->sreg_tbl_perf);
1414 pmc->svc_sreg_support = false;
1417 * Check service version to see if we actually do support the
1418 * needed SMCs. If we have the calls we need, mark support for
1419 * them in the pmc struct.
1421 arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0,
1423 if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR &&
1424 res.a1 >= MLXBF_PMC_SVC_MIN_MINOR)
1425 pmc->svc_sreg_support = true;
1430 if (!strcmp(hid, "MLNXBFD0"))
1431 pmc->event_set = MLXBF_PMC_EVENT_SET_BF1;
1432 else if (!strcmp(hid, "MLNXBFD1"))
1433 pmc->event_set = MLXBF_PMC_EVENT_SET_BF2;
1437 ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks);
1441 ret = device_property_read_string_array(dev, "block_name",
1444 if (ret != pmc->total_blocks)
1447 ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count);
1453 ret = mlxbf_pmc_map_counters(dev);
1457 pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
1458 dev, "bfperf", pmc, pmc->groups);
1459 platform_set_drvdata(pdev, pmc);
1464 static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 },
1468 MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids);
1469 static struct platform_driver pmc_driver = {
1470 .driver = { .name = "mlxbf-pmc",
1471 .acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), },
1472 .probe = mlxbf_pmc_probe,
1475 module_platform_driver(pmc_driver);
1477 MODULE_AUTHOR("Shravan Kumar Ramani <sramani@mellanox.com>");
1478 MODULE_DESCRIPTION("Mellanox PMC driver");
1479 MODULE_LICENSE("Dual BSD/GPL");