2 * Performance events support for SH-4A performance counters
4 * Copyright (C) 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/irq.h>
14 #include <linux/perf_event.h>
15 #include <asm/processor.h>
17 #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
18 #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
20 #define CCBR_CIT_MASK (0x7ff << 6)
21 #define CCBR_DUC (1 << 3)
22 #define CCBR_CMDS (1 << 1)
23 #define CCBR_PPCE (1 << 0)
25 #define PPC_PMCAT 0xfc100080
27 #define PMCAT_OVF3 (1 << 27)
28 #define PMCAT_CNN3 (1 << 26)
29 #define PMCAT_CLR3 (1 << 25)
30 #define PMCAT_OVF2 (1 << 19)
31 #define PMCAT_CLR2 (1 << 17)
32 #define PMCAT_OVF1 (1 << 11)
33 #define PMCAT_CNN1 (1 << 10)
34 #define PMCAT_CLR1 (1 << 9)
35 #define PMCAT_OVF0 (1 << 3)
36 #define PMCAT_CLR0 (1 << 1)
38 static struct sh_pmu sh4a_pmu;
41 * Special reserved bits used by hardware emulators, read values will
42 * vary, but writes must always be 0.
44 #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
46 static const int sh4a_general_events[] = {
47 [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
48 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
49 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
50 [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
51 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
52 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
53 [PERF_COUNT_HW_BUS_CYCLES] = -1,
56 #define C(x) PERF_COUNT_HW_CACHE_##x
58 static const int sh4a_cache_events
59 [PERF_COUNT_HW_CACHE_MAX]
60 [PERF_COUNT_HW_CACHE_OP_MAX]
61 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
65 [ C(RESULT_ACCESS) ] = 0x0031,
66 [ C(RESULT_MISS) ] = 0x0032,
69 [ C(RESULT_ACCESS) ] = 0x0039,
70 [ C(RESULT_MISS) ] = 0x003a,
72 [ C(OP_PREFETCH) ] = {
73 [ C(RESULT_ACCESS) ] = 0,
74 [ C(RESULT_MISS) ] = 0,
80 [ C(RESULT_ACCESS) ] = 0x0029,
81 [ C(RESULT_MISS) ] = 0x002a,
84 [ C(RESULT_ACCESS) ] = -1,
85 [ C(RESULT_MISS) ] = -1,
87 [ C(OP_PREFETCH) ] = {
88 [ C(RESULT_ACCESS) ] = 0,
89 [ C(RESULT_MISS) ] = 0,
95 [ C(RESULT_ACCESS) ] = 0x0030,
96 [ C(RESULT_MISS) ] = 0,
99 [ C(RESULT_ACCESS) ] = 0x0038,
100 [ C(RESULT_MISS) ] = 0,
102 [ C(OP_PREFETCH) ] = {
103 [ C(RESULT_ACCESS) ] = 0,
104 [ C(RESULT_MISS) ] = 0,
110 [ C(RESULT_ACCESS) ] = 0x0222,
111 [ C(RESULT_MISS) ] = 0x0220,
114 [ C(RESULT_ACCESS) ] = 0,
115 [ C(RESULT_MISS) ] = 0,
117 [ C(OP_PREFETCH) ] = {
118 [ C(RESULT_ACCESS) ] = 0,
119 [ C(RESULT_MISS) ] = 0,
125 [ C(RESULT_ACCESS) ] = 0,
126 [ C(RESULT_MISS) ] = 0x02a0,
129 [ C(RESULT_ACCESS) ] = -1,
130 [ C(RESULT_MISS) ] = -1,
132 [ C(OP_PREFETCH) ] = {
133 [ C(RESULT_ACCESS) ] = -1,
134 [ C(RESULT_MISS) ] = -1,
140 [ C(RESULT_ACCESS) ] = -1,
141 [ C(RESULT_MISS) ] = -1,
144 [ C(RESULT_ACCESS) ] = -1,
145 [ C(RESULT_MISS) ] = -1,
147 [ C(OP_PREFETCH) ] = {
148 [ C(RESULT_ACCESS) ] = -1,
149 [ C(RESULT_MISS) ] = -1,
154 static int sh4a_event_map(int event)
156 return sh4a_general_events[event];
159 static u64 sh4a_pmu_read(int idx)
161 return __raw_readl(PPC_PMCTR(idx));
164 static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
168 tmp = __raw_readl(PPC_CCBR(idx));
169 tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
170 __raw_writel(tmp, PPC_CCBR(idx));
173 static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
177 tmp = __raw_readl(PPC_PMCAT);
178 tmp &= ~PMCAT_EMU_CLR_MASK;
179 tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
180 __raw_writel(tmp, PPC_PMCAT);
182 tmp = __raw_readl(PPC_CCBR(idx));
183 tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
184 __raw_writel(tmp, PPC_CCBR(idx));
186 __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
189 static void sh4a_pmu_disable_all(void)
193 for (i = 0; i < sh4a_pmu.num_events; i++)
194 __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
197 static void sh4a_pmu_enable_all(void)
201 for (i = 0; i < sh4a_pmu.num_events; i++)
202 __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
205 static struct sh_pmu sh4a_pmu = {
208 .event_map = sh4a_event_map,
209 .max_events = ARRAY_SIZE(sh4a_general_events),
210 .raw_event_mask = 0x3ff,
211 .cache_events = &sh4a_cache_events,
212 .read = sh4a_pmu_read,
213 .disable = sh4a_pmu_disable,
214 .enable = sh4a_pmu_enable,
215 .disable_all = sh4a_pmu_disable_all,
216 .enable_all = sh4a_pmu_enable_all,
219 static int __init sh4a_pmu_init(void)
222 * Make sure this CPU actually has perf counters.
224 if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
225 pr_notice("HW perf events unsupported, software events only.\n");
229 return register_sh_pmu(&sh4a_pmu);
231 arch_initcall(sh4a_pmu_init);