1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
19 #include <asm/cpufeature.h>
20 #include <asm/hardirq.h>
21 #include <asm/intel-family.h>
22 #include <asm/intel_pt.h>
24 #include <asm/cpu_device_id.h>
26 #include "../perf_event.h"
29 * Intel PerfMon, used on Core and later.
31 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
43 static struct event_constraint intel_core_event_constraints[] __read_mostly =
45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
54 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
72 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
88 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
96 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
108 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
124 * When HT is off these events can only run on the bottom 4 counters
125 * When HT is on, they are impacted by the HT bug and require EXCL access
127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
135 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
152 * When HT is off these events can only run on the bottom 4 counters
153 * When HT is on, they are impacted by the HT bug and require EXCL access
155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
163 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
172 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
177 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
185 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
191 FIXED_EVENT_CONSTRAINT(0x0500, 4),
192 FIXED_EVENT_CONSTRAINT(0x0600, 5),
193 FIXED_EVENT_CONSTRAINT(0x0700, 6),
194 FIXED_EVENT_CONSTRAINT(0x0800, 7),
195 FIXED_EVENT_CONSTRAINT(0x0900, 8),
196 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
197 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
198 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
199 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
200 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
201 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
202 FIXED_EVENT_CONSTRAINT(0x1000, 15),
206 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
214 static struct event_constraint intel_skl_event_constraints[] = {
215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
218 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
221 * when HT is off, these can only run on the bottom 4 counters
223 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
224 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
225 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
226 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
227 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
232 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
238 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
239 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
240 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
241 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
242 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
246 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
254 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
255 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
256 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
257 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
259 * Note the low 8 bits eventsel code is not a continuous field, containing
260 * some #GPing bits. These are masked out.
262 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
266 static struct event_constraint intel_icl_event_constraints[] = {
267 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
268 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
269 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
270 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
271 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
272 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
273 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
274 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
275 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
276 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
277 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
278 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
279 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
280 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
281 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
282 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
283 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
284 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
285 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
286 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
287 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
288 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
289 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
290 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
294 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
295 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
296 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
297 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
298 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
302 static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
303 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
304 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
307 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
308 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
312 static struct event_constraint intel_spr_event_constraints[] = {
313 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
314 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
315 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
316 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
317 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
321 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
322 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
323 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
324 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
325 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
327 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
328 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
330 * Generally event codes < 0x90 are restricted to counters 0-3.
331 * The 0x2E and 0x3C are exception, which has no restriction.
333 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
335 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
336 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
338 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
339 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
340 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
341 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
342 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
344 * Generally event codes >= 0x90 are likely to have no restrictions.
345 * The exception are defined as above.
347 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
352 static struct extra_reg intel_gnr_extra_regs[] __read_mostly = {
353 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
354 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
355 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
356 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
357 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
358 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
359 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
363 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
364 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
365 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
367 static struct attribute *nhm_mem_events_attrs[] = {
368 EVENT_PTR(mem_ld_nhm),
373 * topdown events for Intel Core CPUs.
375 * The events are all in slots, which is a free slot in a 4 wide
376 * pipeline. Some events are already reported in slots, for cycle
377 * events we multiply by the pipeline width (4).
379 * With Hyper Threading on, topdown metrics are either summed or averaged
380 * between the threads of a core: (count_t0 + count_t1).
382 * For the average case the metric is always scaled to pipeline width,
383 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
386 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
387 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
388 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
389 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
390 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
391 "event=0xe,umask=0x1"); /* uops_issued.any */
392 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
393 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
394 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
395 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
396 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
397 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
398 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
399 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
402 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
403 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
404 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
405 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
406 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
407 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
408 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
409 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
410 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
412 static struct attribute *snb_events_attrs[] = {
413 EVENT_PTR(td_slots_issued),
414 EVENT_PTR(td_slots_retired),
415 EVENT_PTR(td_fetch_bubbles),
416 EVENT_PTR(td_total_slots),
417 EVENT_PTR(td_total_slots_scale),
418 EVENT_PTR(td_recovery_bubbles),
419 EVENT_PTR(td_recovery_bubbles_scale),
423 static struct attribute *snb_mem_events_attrs[] = {
424 EVENT_PTR(mem_ld_snb),
425 EVENT_PTR(mem_st_snb),
429 static struct event_constraint intel_hsw_event_constraints[] = {
430 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
431 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
432 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
433 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
434 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
435 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
436 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
437 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
438 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
439 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
440 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
441 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
444 * When HT is off these events can only run on the bottom 4 counters
445 * When HT is on, they are impacted by the HT bug and require EXCL access
447 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
448 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
449 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
450 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
455 static struct event_constraint intel_bdw_event_constraints[] = {
456 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
457 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
458 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
459 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
460 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
462 * when HT is off, these can only run on the bottom 4 counters
464 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
465 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
466 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
467 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
471 static u64 intel_pmu_event_map(int hw_event)
473 return intel_perfmon_event_map[hw_event];
476 static __initconst const u64 spr_hw_cache_event_ids
477 [PERF_COUNT_HW_CACHE_MAX]
478 [PERF_COUNT_HW_CACHE_OP_MAX]
479 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
483 [ C(RESULT_ACCESS) ] = 0x81d0,
484 [ C(RESULT_MISS) ] = 0xe124,
487 [ C(RESULT_ACCESS) ] = 0x82d0,
492 [ C(RESULT_MISS) ] = 0xe424,
495 [ C(RESULT_ACCESS) ] = -1,
496 [ C(RESULT_MISS) ] = -1,
501 [ C(RESULT_ACCESS) ] = 0x12a,
502 [ C(RESULT_MISS) ] = 0x12a,
505 [ C(RESULT_ACCESS) ] = 0x12a,
506 [ C(RESULT_MISS) ] = 0x12a,
511 [ C(RESULT_ACCESS) ] = 0x81d0,
512 [ C(RESULT_MISS) ] = 0xe12,
515 [ C(RESULT_ACCESS) ] = 0x82d0,
516 [ C(RESULT_MISS) ] = 0xe13,
521 [ C(RESULT_ACCESS) ] = -1,
522 [ C(RESULT_MISS) ] = 0xe11,
525 [ C(RESULT_ACCESS) ] = -1,
526 [ C(RESULT_MISS) ] = -1,
528 [ C(OP_PREFETCH) ] = {
529 [ C(RESULT_ACCESS) ] = -1,
530 [ C(RESULT_MISS) ] = -1,
535 [ C(RESULT_ACCESS) ] = 0x4c4,
536 [ C(RESULT_MISS) ] = 0x4c5,
539 [ C(RESULT_ACCESS) ] = -1,
540 [ C(RESULT_MISS) ] = -1,
542 [ C(OP_PREFETCH) ] = {
543 [ C(RESULT_ACCESS) ] = -1,
544 [ C(RESULT_MISS) ] = -1,
549 [ C(RESULT_ACCESS) ] = 0x12a,
550 [ C(RESULT_MISS) ] = 0x12a,
555 static __initconst const u64 spr_hw_cache_extra_regs
556 [PERF_COUNT_HW_CACHE_MAX]
557 [PERF_COUNT_HW_CACHE_OP_MAX]
558 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
562 [ C(RESULT_ACCESS) ] = 0x10001,
563 [ C(RESULT_MISS) ] = 0x3fbfc00001,
566 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
567 [ C(RESULT_MISS) ] = 0x3f3fc00002,
572 [ C(RESULT_ACCESS) ] = 0x10c000001,
573 [ C(RESULT_MISS) ] = 0x3fb3000001,
579 * Notes on the events:
580 * - data reads do not include code reads (comparable to earlier tables)
581 * - data counts include speculative execution (except L1 write, dtlb, bpu)
582 * - remote node access includes remote memory, remote cache, remote mmio.
583 * - prefetches are not included in the counts.
584 * - icache miss does not include decoded icache
587 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
588 #define SKL_DEMAND_RFO BIT_ULL(1)
589 #define SKL_ANY_RESPONSE BIT_ULL(16)
590 #define SKL_SUPPLIER_NONE BIT_ULL(17)
591 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
592 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
593 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
594 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
595 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
596 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
597 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
598 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
599 #define SKL_SPL_HIT BIT_ULL(30)
600 #define SKL_SNOOP_NONE BIT_ULL(31)
601 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
602 #define SKL_SNOOP_MISS BIT_ULL(33)
603 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
604 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
605 #define SKL_SNOOP_HITM BIT_ULL(36)
606 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
607 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
608 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
609 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
610 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
611 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
612 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
613 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
614 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
615 SKL_SNOOP_HITM|SKL_SPL_HIT)
616 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
617 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
618 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
619 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
620 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
622 static __initconst const u64 skl_hw_cache_event_ids
623 [PERF_COUNT_HW_CACHE_MAX]
624 [PERF_COUNT_HW_CACHE_OP_MAX]
625 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
629 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
630 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
633 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
634 [ C(RESULT_MISS) ] = 0x0,
636 [ C(OP_PREFETCH) ] = {
637 [ C(RESULT_ACCESS) ] = 0x0,
638 [ C(RESULT_MISS) ] = 0x0,
643 [ C(RESULT_ACCESS) ] = 0x0,
644 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
647 [ C(RESULT_ACCESS) ] = -1,
648 [ C(RESULT_MISS) ] = -1,
650 [ C(OP_PREFETCH) ] = {
651 [ C(RESULT_ACCESS) ] = 0x0,
652 [ C(RESULT_MISS) ] = 0x0,
657 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
658 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
661 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
662 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
664 [ C(OP_PREFETCH) ] = {
665 [ C(RESULT_ACCESS) ] = 0x0,
666 [ C(RESULT_MISS) ] = 0x0,
671 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
672 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
675 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
676 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
678 [ C(OP_PREFETCH) ] = {
679 [ C(RESULT_ACCESS) ] = 0x0,
680 [ C(RESULT_MISS) ] = 0x0,
685 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
686 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
689 [ C(RESULT_ACCESS) ] = -1,
690 [ C(RESULT_MISS) ] = -1,
692 [ C(OP_PREFETCH) ] = {
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
699 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
700 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
706 [ C(OP_PREFETCH) ] = {
707 [ C(RESULT_ACCESS) ] = -1,
708 [ C(RESULT_MISS) ] = -1,
713 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
714 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
717 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
718 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
720 [ C(OP_PREFETCH) ] = {
721 [ C(RESULT_ACCESS) ] = 0x0,
722 [ C(RESULT_MISS) ] = 0x0,
727 static __initconst const u64 skl_hw_cache_extra_regs
728 [PERF_COUNT_HW_CACHE_MAX]
729 [PERF_COUNT_HW_CACHE_OP_MAX]
730 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
734 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
735 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
736 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
737 SKL_L3_MISS|SKL_ANY_SNOOP|
741 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
742 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
743 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
744 SKL_L3_MISS|SKL_ANY_SNOOP|
747 [ C(OP_PREFETCH) ] = {
748 [ C(RESULT_ACCESS) ] = 0x0,
749 [ C(RESULT_MISS) ] = 0x0,
754 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
755 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
756 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
757 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
760 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
761 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
762 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
763 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
765 [ C(OP_PREFETCH) ] = {
766 [ C(RESULT_ACCESS) ] = 0x0,
767 [ C(RESULT_MISS) ] = 0x0,
772 #define SNB_DMND_DATA_RD (1ULL << 0)
773 #define SNB_DMND_RFO (1ULL << 1)
774 #define SNB_DMND_IFETCH (1ULL << 2)
775 #define SNB_DMND_WB (1ULL << 3)
776 #define SNB_PF_DATA_RD (1ULL << 4)
777 #define SNB_PF_RFO (1ULL << 5)
778 #define SNB_PF_IFETCH (1ULL << 6)
779 #define SNB_LLC_DATA_RD (1ULL << 7)
780 #define SNB_LLC_RFO (1ULL << 8)
781 #define SNB_LLC_IFETCH (1ULL << 9)
782 #define SNB_BUS_LOCKS (1ULL << 10)
783 #define SNB_STRM_ST (1ULL << 11)
784 #define SNB_OTHER (1ULL << 15)
785 #define SNB_RESP_ANY (1ULL << 16)
786 #define SNB_NO_SUPP (1ULL << 17)
787 #define SNB_LLC_HITM (1ULL << 18)
788 #define SNB_LLC_HITE (1ULL << 19)
789 #define SNB_LLC_HITS (1ULL << 20)
790 #define SNB_LLC_HITF (1ULL << 21)
791 #define SNB_LOCAL (1ULL << 22)
792 #define SNB_REMOTE (0xffULL << 23)
793 #define SNB_SNP_NONE (1ULL << 31)
794 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
795 #define SNB_SNP_MISS (1ULL << 33)
796 #define SNB_NO_FWD (1ULL << 34)
797 #define SNB_SNP_FWD (1ULL << 35)
798 #define SNB_HITM (1ULL << 36)
799 #define SNB_NON_DRAM (1ULL << 37)
801 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
802 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
803 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
805 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
806 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
809 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
810 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
812 #define SNB_L3_ACCESS SNB_RESP_ANY
813 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
815 static __initconst const u64 snb_hw_cache_extra_regs
816 [PERF_COUNT_HW_CACHE_MAX]
817 [PERF_COUNT_HW_CACHE_OP_MAX]
818 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
822 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
823 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
826 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
827 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
829 [ C(OP_PREFETCH) ] = {
830 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
831 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
836 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
837 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
840 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
841 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
843 [ C(OP_PREFETCH) ] = {
844 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
845 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
850 static __initconst const u64 snb_hw_cache_event_ids
851 [PERF_COUNT_HW_CACHE_MAX]
852 [PERF_COUNT_HW_CACHE_OP_MAX]
853 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
857 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
858 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
861 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
862 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
864 [ C(OP_PREFETCH) ] = {
865 [ C(RESULT_ACCESS) ] = 0x0,
866 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
871 [ C(RESULT_ACCESS) ] = 0x0,
872 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
875 [ C(RESULT_ACCESS) ] = -1,
876 [ C(RESULT_MISS) ] = -1,
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = 0x0,
880 [ C(RESULT_MISS) ] = 0x0,
885 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
886 [ C(RESULT_ACCESS) ] = 0x01b7,
887 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
888 [ C(RESULT_MISS) ] = 0x01b7,
891 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
892 [ C(RESULT_ACCESS) ] = 0x01b7,
893 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
894 [ C(RESULT_MISS) ] = 0x01b7,
896 [ C(OP_PREFETCH) ] = {
897 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
898 [ C(RESULT_ACCESS) ] = 0x01b7,
899 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
900 [ C(RESULT_MISS) ] = 0x01b7,
905 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
906 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
909 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
910 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
912 [ C(OP_PREFETCH) ] = {
913 [ C(RESULT_ACCESS) ] = 0x0,
914 [ C(RESULT_MISS) ] = 0x0,
919 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
920 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
923 [ C(RESULT_ACCESS) ] = -1,
924 [ C(RESULT_MISS) ] = -1,
926 [ C(OP_PREFETCH) ] = {
927 [ C(RESULT_ACCESS) ] = -1,
928 [ C(RESULT_MISS) ] = -1,
933 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
934 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
937 [ C(RESULT_ACCESS) ] = -1,
938 [ C(RESULT_MISS) ] = -1,
940 [ C(OP_PREFETCH) ] = {
941 [ C(RESULT_ACCESS) ] = -1,
942 [ C(RESULT_MISS) ] = -1,
947 [ C(RESULT_ACCESS) ] = 0x01b7,
948 [ C(RESULT_MISS) ] = 0x01b7,
951 [ C(RESULT_ACCESS) ] = 0x01b7,
952 [ C(RESULT_MISS) ] = 0x01b7,
954 [ C(OP_PREFETCH) ] = {
955 [ C(RESULT_ACCESS) ] = 0x01b7,
956 [ C(RESULT_MISS) ] = 0x01b7,
963 * Notes on the events:
964 * - data reads do not include code reads (comparable to earlier tables)
965 * - data counts include speculative execution (except L1 write, dtlb, bpu)
966 * - remote node access includes remote memory, remote cache, remote mmio.
967 * - prefetches are not included in the counts because they are not
971 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
972 #define HSW_DEMAND_RFO BIT_ULL(1)
973 #define HSW_ANY_RESPONSE BIT_ULL(16)
974 #define HSW_SUPPLIER_NONE BIT_ULL(17)
975 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
976 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
977 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
978 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
979 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
980 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
981 HSW_L3_MISS_REMOTE_HOP2P)
982 #define HSW_SNOOP_NONE BIT_ULL(31)
983 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
984 #define HSW_SNOOP_MISS BIT_ULL(33)
985 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
986 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
987 #define HSW_SNOOP_HITM BIT_ULL(36)
988 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
989 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
990 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
991 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
992 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
993 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
994 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
995 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
996 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
997 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
998 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
1000 #define BDW_L3_MISS_LOCAL BIT(26)
1001 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1002 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1003 HSW_L3_MISS_REMOTE_HOP2P)
1006 static __initconst const u64 hsw_hw_cache_event_ids
1007 [PERF_COUNT_HW_CACHE_MAX]
1008 [PERF_COUNT_HW_CACHE_OP_MAX]
1009 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1013 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1014 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1017 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1018 [ C(RESULT_MISS) ] = 0x0,
1020 [ C(OP_PREFETCH) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x0,
1022 [ C(RESULT_MISS) ] = 0x0,
1027 [ C(RESULT_ACCESS) ] = 0x0,
1028 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1031 [ C(RESULT_ACCESS) ] = -1,
1032 [ C(RESULT_MISS) ] = -1,
1034 [ C(OP_PREFETCH) ] = {
1035 [ C(RESULT_ACCESS) ] = 0x0,
1036 [ C(RESULT_MISS) ] = 0x0,
1041 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1042 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1045 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1046 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1048 [ C(OP_PREFETCH) ] = {
1049 [ C(RESULT_ACCESS) ] = 0x0,
1050 [ C(RESULT_MISS) ] = 0x0,
1055 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1056 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1059 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1060 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1062 [ C(OP_PREFETCH) ] = {
1063 [ C(RESULT_ACCESS) ] = 0x0,
1064 [ C(RESULT_MISS) ] = 0x0,
1069 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1070 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1073 [ C(RESULT_ACCESS) ] = -1,
1074 [ C(RESULT_MISS) ] = -1,
1076 [ C(OP_PREFETCH) ] = {
1077 [ C(RESULT_ACCESS) ] = -1,
1078 [ C(RESULT_MISS) ] = -1,
1083 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1084 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1087 [ C(RESULT_ACCESS) ] = -1,
1088 [ C(RESULT_MISS) ] = -1,
1090 [ C(OP_PREFETCH) ] = {
1091 [ C(RESULT_ACCESS) ] = -1,
1092 [ C(RESULT_MISS) ] = -1,
1097 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1098 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1101 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1102 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1104 [ C(OP_PREFETCH) ] = {
1105 [ C(RESULT_ACCESS) ] = 0x0,
1106 [ C(RESULT_MISS) ] = 0x0,
1111 static __initconst const u64 hsw_hw_cache_extra_regs
1112 [PERF_COUNT_HW_CACHE_MAX]
1113 [PERF_COUNT_HW_CACHE_OP_MAX]
1114 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1118 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1120 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1121 HSW_L3_MISS|HSW_ANY_SNOOP,
1124 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1126 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1127 HSW_L3_MISS|HSW_ANY_SNOOP,
1129 [ C(OP_PREFETCH) ] = {
1130 [ C(RESULT_ACCESS) ] = 0x0,
1131 [ C(RESULT_MISS) ] = 0x0,
1136 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1137 HSW_L3_MISS_LOCAL_DRAM|
1139 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1144 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1145 HSW_L3_MISS_LOCAL_DRAM|
1147 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1151 [ C(OP_PREFETCH) ] = {
1152 [ C(RESULT_ACCESS) ] = 0x0,
1153 [ C(RESULT_MISS) ] = 0x0,
1158 static __initconst const u64 westmere_hw_cache_event_ids
1159 [PERF_COUNT_HW_CACHE_MAX]
1160 [PERF_COUNT_HW_CACHE_OP_MAX]
1161 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1165 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1166 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1169 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1170 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1172 [ C(OP_PREFETCH) ] = {
1173 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1174 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1179 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1180 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1183 [ C(RESULT_ACCESS) ] = -1,
1184 [ C(RESULT_MISS) ] = -1,
1186 [ C(OP_PREFETCH) ] = {
1187 [ C(RESULT_ACCESS) ] = 0x0,
1188 [ C(RESULT_MISS) ] = 0x0,
1193 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1194 [ C(RESULT_ACCESS) ] = 0x01b7,
1195 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1196 [ C(RESULT_MISS) ] = 0x01b7,
1199 * Use RFO, not WRITEBACK, because a write miss would typically occur
1203 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1204 [ C(RESULT_ACCESS) ] = 0x01b7,
1205 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1206 [ C(RESULT_MISS) ] = 0x01b7,
1208 [ C(OP_PREFETCH) ] = {
1209 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1210 [ C(RESULT_ACCESS) ] = 0x01b7,
1211 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1212 [ C(RESULT_MISS) ] = 0x01b7,
1217 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1218 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1221 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1222 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1224 [ C(OP_PREFETCH) ] = {
1225 [ C(RESULT_ACCESS) ] = 0x0,
1226 [ C(RESULT_MISS) ] = 0x0,
1231 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1232 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1235 [ C(RESULT_ACCESS) ] = -1,
1236 [ C(RESULT_MISS) ] = -1,
1238 [ C(OP_PREFETCH) ] = {
1239 [ C(RESULT_ACCESS) ] = -1,
1240 [ C(RESULT_MISS) ] = -1,
1245 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1246 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1249 [ C(RESULT_ACCESS) ] = -1,
1250 [ C(RESULT_MISS) ] = -1,
1252 [ C(OP_PREFETCH) ] = {
1253 [ C(RESULT_ACCESS) ] = -1,
1254 [ C(RESULT_MISS) ] = -1,
1259 [ C(RESULT_ACCESS) ] = 0x01b7,
1260 [ C(RESULT_MISS) ] = 0x01b7,
1263 [ C(RESULT_ACCESS) ] = 0x01b7,
1264 [ C(RESULT_MISS) ] = 0x01b7,
1266 [ C(OP_PREFETCH) ] = {
1267 [ C(RESULT_ACCESS) ] = 0x01b7,
1268 [ C(RESULT_MISS) ] = 0x01b7,
1274 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1275 * See IA32 SDM Vol 3B 30.6.1.3
1278 #define NHM_DMND_DATA_RD (1 << 0)
1279 #define NHM_DMND_RFO (1 << 1)
1280 #define NHM_DMND_IFETCH (1 << 2)
1281 #define NHM_DMND_WB (1 << 3)
1282 #define NHM_PF_DATA_RD (1 << 4)
1283 #define NHM_PF_DATA_RFO (1 << 5)
1284 #define NHM_PF_IFETCH (1 << 6)
1285 #define NHM_OFFCORE_OTHER (1 << 7)
1286 #define NHM_UNCORE_HIT (1 << 8)
1287 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1288 #define NHM_OTHER_CORE_HITM (1 << 10)
1290 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1291 #define NHM_REMOTE_DRAM (1 << 13)
1292 #define NHM_LOCAL_DRAM (1 << 14)
1293 #define NHM_NON_DRAM (1 << 15)
1295 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1296 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1298 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1299 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1300 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1302 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1303 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1304 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1306 static __initconst const u64 nehalem_hw_cache_extra_regs
1307 [PERF_COUNT_HW_CACHE_MAX]
1308 [PERF_COUNT_HW_CACHE_OP_MAX]
1309 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1313 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1314 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1317 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1318 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1320 [ C(OP_PREFETCH) ] = {
1321 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1322 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1327 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1328 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1331 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1332 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1334 [ C(OP_PREFETCH) ] = {
1335 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1336 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1341 static __initconst const u64 nehalem_hw_cache_event_ids
1342 [PERF_COUNT_HW_CACHE_MAX]
1343 [PERF_COUNT_HW_CACHE_OP_MAX]
1344 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1348 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1349 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1352 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1353 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1355 [ C(OP_PREFETCH) ] = {
1356 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1357 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1362 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1363 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1366 [ C(RESULT_ACCESS) ] = -1,
1367 [ C(RESULT_MISS) ] = -1,
1369 [ C(OP_PREFETCH) ] = {
1370 [ C(RESULT_ACCESS) ] = 0x0,
1371 [ C(RESULT_MISS) ] = 0x0,
1376 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1377 [ C(RESULT_ACCESS) ] = 0x01b7,
1378 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1379 [ C(RESULT_MISS) ] = 0x01b7,
1382 * Use RFO, not WRITEBACK, because a write miss would typically occur
1386 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1387 [ C(RESULT_ACCESS) ] = 0x01b7,
1388 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1389 [ C(RESULT_MISS) ] = 0x01b7,
1391 [ C(OP_PREFETCH) ] = {
1392 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1393 [ C(RESULT_ACCESS) ] = 0x01b7,
1394 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1395 [ C(RESULT_MISS) ] = 0x01b7,
1400 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1401 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1404 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1405 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1407 [ C(OP_PREFETCH) ] = {
1408 [ C(RESULT_ACCESS) ] = 0x0,
1409 [ C(RESULT_MISS) ] = 0x0,
1414 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1415 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1418 [ C(RESULT_ACCESS) ] = -1,
1419 [ C(RESULT_MISS) ] = -1,
1421 [ C(OP_PREFETCH) ] = {
1422 [ C(RESULT_ACCESS) ] = -1,
1423 [ C(RESULT_MISS) ] = -1,
1428 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1429 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1432 [ C(RESULT_ACCESS) ] = -1,
1433 [ C(RESULT_MISS) ] = -1,
1435 [ C(OP_PREFETCH) ] = {
1436 [ C(RESULT_ACCESS) ] = -1,
1437 [ C(RESULT_MISS) ] = -1,
1442 [ C(RESULT_ACCESS) ] = 0x01b7,
1443 [ C(RESULT_MISS) ] = 0x01b7,
1446 [ C(RESULT_ACCESS) ] = 0x01b7,
1447 [ C(RESULT_MISS) ] = 0x01b7,
1449 [ C(OP_PREFETCH) ] = {
1450 [ C(RESULT_ACCESS) ] = 0x01b7,
1451 [ C(RESULT_MISS) ] = 0x01b7,
1456 static __initconst const u64 core2_hw_cache_event_ids
1457 [PERF_COUNT_HW_CACHE_MAX]
1458 [PERF_COUNT_HW_CACHE_OP_MAX]
1459 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1463 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1464 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1467 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1468 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1470 [ C(OP_PREFETCH) ] = {
1471 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1472 [ C(RESULT_MISS) ] = 0,
1477 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1478 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1481 [ C(RESULT_ACCESS) ] = -1,
1482 [ C(RESULT_MISS) ] = -1,
1484 [ C(OP_PREFETCH) ] = {
1485 [ C(RESULT_ACCESS) ] = 0,
1486 [ C(RESULT_MISS) ] = 0,
1491 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1492 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1495 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1496 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1498 [ C(OP_PREFETCH) ] = {
1499 [ C(RESULT_ACCESS) ] = 0,
1500 [ C(RESULT_MISS) ] = 0,
1505 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1506 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1509 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1510 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1512 [ C(OP_PREFETCH) ] = {
1513 [ C(RESULT_ACCESS) ] = 0,
1514 [ C(RESULT_MISS) ] = 0,
1519 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1520 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1523 [ C(RESULT_ACCESS) ] = -1,
1524 [ C(RESULT_MISS) ] = -1,
1526 [ C(OP_PREFETCH) ] = {
1527 [ C(RESULT_ACCESS) ] = -1,
1528 [ C(RESULT_MISS) ] = -1,
1533 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1534 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1537 [ C(RESULT_ACCESS) ] = -1,
1538 [ C(RESULT_MISS) ] = -1,
1540 [ C(OP_PREFETCH) ] = {
1541 [ C(RESULT_ACCESS) ] = -1,
1542 [ C(RESULT_MISS) ] = -1,
1547 static __initconst const u64 atom_hw_cache_event_ids
1548 [PERF_COUNT_HW_CACHE_MAX]
1549 [PERF_COUNT_HW_CACHE_OP_MAX]
1550 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1554 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1555 [ C(RESULT_MISS) ] = 0,
1558 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1559 [ C(RESULT_MISS) ] = 0,
1561 [ C(OP_PREFETCH) ] = {
1562 [ C(RESULT_ACCESS) ] = 0x0,
1563 [ C(RESULT_MISS) ] = 0,
1568 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1569 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1572 [ C(RESULT_ACCESS) ] = -1,
1573 [ C(RESULT_MISS) ] = -1,
1575 [ C(OP_PREFETCH) ] = {
1576 [ C(RESULT_ACCESS) ] = 0,
1577 [ C(RESULT_MISS) ] = 0,
1582 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1583 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1586 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1587 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1589 [ C(OP_PREFETCH) ] = {
1590 [ C(RESULT_ACCESS) ] = 0,
1591 [ C(RESULT_MISS) ] = 0,
1596 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1597 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1600 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1601 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1603 [ C(OP_PREFETCH) ] = {
1604 [ C(RESULT_ACCESS) ] = 0,
1605 [ C(RESULT_MISS) ] = 0,
1610 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1611 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1614 [ C(RESULT_ACCESS) ] = -1,
1615 [ C(RESULT_MISS) ] = -1,
1617 [ C(OP_PREFETCH) ] = {
1618 [ C(RESULT_ACCESS) ] = -1,
1619 [ C(RESULT_MISS) ] = -1,
1624 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1625 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1628 [ C(RESULT_ACCESS) ] = -1,
1629 [ C(RESULT_MISS) ] = -1,
1631 [ C(OP_PREFETCH) ] = {
1632 [ C(RESULT_ACCESS) ] = -1,
1633 [ C(RESULT_MISS) ] = -1,
1638 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1639 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1640 /* no_alloc_cycles.not_delivered */
1641 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1642 "event=0xca,umask=0x50");
1643 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1644 /* uops_retired.all */
1645 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1646 "event=0xc2,umask=0x10");
1647 /* uops_retired.all */
1648 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1649 "event=0xc2,umask=0x10");
1651 static struct attribute *slm_events_attrs[] = {
1652 EVENT_PTR(td_total_slots_slm),
1653 EVENT_PTR(td_total_slots_scale_slm),
1654 EVENT_PTR(td_fetch_bubbles_slm),
1655 EVENT_PTR(td_fetch_bubbles_scale_slm),
1656 EVENT_PTR(td_slots_issued_slm),
1657 EVENT_PTR(td_slots_retired_slm),
1661 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1663 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1664 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1665 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1669 #define SLM_DMND_READ SNB_DMND_DATA_RD
1670 #define SLM_DMND_WRITE SNB_DMND_RFO
1671 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1673 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1674 #define SLM_LLC_ACCESS SNB_RESP_ANY
1675 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1677 static __initconst const u64 slm_hw_cache_extra_regs
1678 [PERF_COUNT_HW_CACHE_MAX]
1679 [PERF_COUNT_HW_CACHE_OP_MAX]
1680 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1684 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1685 [ C(RESULT_MISS) ] = 0,
1688 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1689 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1691 [ C(OP_PREFETCH) ] = {
1692 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1693 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1698 static __initconst const u64 slm_hw_cache_event_ids
1699 [PERF_COUNT_HW_CACHE_MAX]
1700 [PERF_COUNT_HW_CACHE_OP_MAX]
1701 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1705 [ C(RESULT_ACCESS) ] = 0,
1706 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1709 [ C(RESULT_ACCESS) ] = 0,
1710 [ C(RESULT_MISS) ] = 0,
1712 [ C(OP_PREFETCH) ] = {
1713 [ C(RESULT_ACCESS) ] = 0,
1714 [ C(RESULT_MISS) ] = 0,
1719 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1720 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1723 [ C(RESULT_ACCESS) ] = -1,
1724 [ C(RESULT_MISS) ] = -1,
1726 [ C(OP_PREFETCH) ] = {
1727 [ C(RESULT_ACCESS) ] = 0,
1728 [ C(RESULT_MISS) ] = 0,
1733 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1734 [ C(RESULT_ACCESS) ] = 0x01b7,
1735 [ C(RESULT_MISS) ] = 0,
1738 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1739 [ C(RESULT_ACCESS) ] = 0x01b7,
1740 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1741 [ C(RESULT_MISS) ] = 0x01b7,
1743 [ C(OP_PREFETCH) ] = {
1744 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1745 [ C(RESULT_ACCESS) ] = 0x01b7,
1746 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1747 [ C(RESULT_MISS) ] = 0x01b7,
1752 [ C(RESULT_ACCESS) ] = 0,
1753 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1756 [ C(RESULT_ACCESS) ] = 0,
1757 [ C(RESULT_MISS) ] = 0,
1759 [ C(OP_PREFETCH) ] = {
1760 [ C(RESULT_ACCESS) ] = 0,
1761 [ C(RESULT_MISS) ] = 0,
1766 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1767 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1770 [ C(RESULT_ACCESS) ] = -1,
1771 [ C(RESULT_MISS) ] = -1,
1773 [ C(OP_PREFETCH) ] = {
1774 [ C(RESULT_ACCESS) ] = -1,
1775 [ C(RESULT_MISS) ] = -1,
1780 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1781 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1784 [ C(RESULT_ACCESS) ] = -1,
1785 [ C(RESULT_MISS) ] = -1,
1787 [ C(OP_PREFETCH) ] = {
1788 [ C(RESULT_ACCESS) ] = -1,
1789 [ C(RESULT_MISS) ] = -1,
1794 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1795 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1796 /* UOPS_NOT_DELIVERED.ANY */
1797 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1798 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1799 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1800 /* UOPS_RETIRED.ANY */
1801 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1802 /* UOPS_ISSUED.ANY */
1803 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1805 static struct attribute *glm_events_attrs[] = {
1806 EVENT_PTR(td_total_slots_glm),
1807 EVENT_PTR(td_total_slots_scale_glm),
1808 EVENT_PTR(td_fetch_bubbles_glm),
1809 EVENT_PTR(td_recovery_bubbles_glm),
1810 EVENT_PTR(td_slots_issued_glm),
1811 EVENT_PTR(td_slots_retired_glm),
1815 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1816 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1817 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1818 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1822 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1823 #define GLM_DEMAND_RFO BIT_ULL(1)
1824 #define GLM_ANY_RESPONSE BIT_ULL(16)
1825 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1826 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1827 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1828 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1829 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1830 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1831 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1833 static __initconst const u64 glm_hw_cache_event_ids
1834 [PERF_COUNT_HW_CACHE_MAX]
1835 [PERF_COUNT_HW_CACHE_OP_MAX]
1836 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1839 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1840 [C(RESULT_MISS)] = 0x0,
1843 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1844 [C(RESULT_MISS)] = 0x0,
1846 [C(OP_PREFETCH)] = {
1847 [C(RESULT_ACCESS)] = 0x0,
1848 [C(RESULT_MISS)] = 0x0,
1853 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1854 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1857 [C(RESULT_ACCESS)] = -1,
1858 [C(RESULT_MISS)] = -1,
1860 [C(OP_PREFETCH)] = {
1861 [C(RESULT_ACCESS)] = 0x0,
1862 [C(RESULT_MISS)] = 0x0,
1867 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1868 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1871 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1872 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1874 [C(OP_PREFETCH)] = {
1875 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1876 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1881 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1882 [C(RESULT_MISS)] = 0x0,
1885 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1886 [C(RESULT_MISS)] = 0x0,
1888 [C(OP_PREFETCH)] = {
1889 [C(RESULT_ACCESS)] = 0x0,
1890 [C(RESULT_MISS)] = 0x0,
1895 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1896 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1899 [C(RESULT_ACCESS)] = -1,
1900 [C(RESULT_MISS)] = -1,
1902 [C(OP_PREFETCH)] = {
1903 [C(RESULT_ACCESS)] = -1,
1904 [C(RESULT_MISS)] = -1,
1909 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1910 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1913 [C(RESULT_ACCESS)] = -1,
1914 [C(RESULT_MISS)] = -1,
1916 [C(OP_PREFETCH)] = {
1917 [C(RESULT_ACCESS)] = -1,
1918 [C(RESULT_MISS)] = -1,
1923 static __initconst const u64 glm_hw_cache_extra_regs
1924 [PERF_COUNT_HW_CACHE_MAX]
1925 [PERF_COUNT_HW_CACHE_OP_MAX]
1926 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1929 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1931 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1935 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1937 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1940 [C(OP_PREFETCH)] = {
1941 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1943 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1949 static __initconst const u64 glp_hw_cache_event_ids
1950 [PERF_COUNT_HW_CACHE_MAX]
1951 [PERF_COUNT_HW_CACHE_OP_MAX]
1952 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1955 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1956 [C(RESULT_MISS)] = 0x0,
1959 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1960 [C(RESULT_MISS)] = 0x0,
1962 [C(OP_PREFETCH)] = {
1963 [C(RESULT_ACCESS)] = 0x0,
1964 [C(RESULT_MISS)] = 0x0,
1969 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1970 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1973 [C(RESULT_ACCESS)] = -1,
1974 [C(RESULT_MISS)] = -1,
1976 [C(OP_PREFETCH)] = {
1977 [C(RESULT_ACCESS)] = 0x0,
1978 [C(RESULT_MISS)] = 0x0,
1983 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1984 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1987 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1988 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1990 [C(OP_PREFETCH)] = {
1991 [C(RESULT_ACCESS)] = 0x0,
1992 [C(RESULT_MISS)] = 0x0,
1997 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1998 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
2001 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2002 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2004 [C(OP_PREFETCH)] = {
2005 [C(RESULT_ACCESS)] = 0x0,
2006 [C(RESULT_MISS)] = 0x0,
2011 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2012 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2015 [C(RESULT_ACCESS)] = -1,
2016 [C(RESULT_MISS)] = -1,
2018 [C(OP_PREFETCH)] = {
2019 [C(RESULT_ACCESS)] = -1,
2020 [C(RESULT_MISS)] = -1,
2025 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2026 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2029 [C(RESULT_ACCESS)] = -1,
2030 [C(RESULT_MISS)] = -1,
2032 [C(OP_PREFETCH)] = {
2033 [C(RESULT_ACCESS)] = -1,
2034 [C(RESULT_MISS)] = -1,
2039 static __initconst const u64 glp_hw_cache_extra_regs
2040 [PERF_COUNT_HW_CACHE_MAX]
2041 [PERF_COUNT_HW_CACHE_OP_MAX]
2042 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2045 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2047 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2051 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2053 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2056 [C(OP_PREFETCH)] = {
2057 [C(RESULT_ACCESS)] = 0x0,
2058 [C(RESULT_MISS)] = 0x0,
2063 #define TNT_LOCAL_DRAM BIT_ULL(26)
2064 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2065 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2066 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2067 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2068 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2069 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2071 static __initconst const u64 tnt_hw_cache_extra_regs
2072 [PERF_COUNT_HW_CACHE_MAX]
2073 [PERF_COUNT_HW_CACHE_OP_MAX]
2074 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2077 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2079 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2083 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2085 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2088 [C(OP_PREFETCH)] = {
2089 [C(RESULT_ACCESS)] = 0x0,
2090 [C(RESULT_MISS)] = 0x0,
2095 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2096 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2097 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2098 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2100 static struct attribute *tnt_events_attrs[] = {
2101 EVENT_PTR(td_fe_bound_tnt),
2102 EVENT_PTR(td_retiring_tnt),
2103 EVENT_PTR(td_bad_spec_tnt),
2104 EVENT_PTR(td_be_bound_tnt),
2108 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2109 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2110 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2111 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2115 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2116 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2118 static struct attribute *grt_mem_attrs[] = {
2119 EVENT_PTR(mem_ld_grt),
2120 EVENT_PTR(mem_st_grt),
2124 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2125 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2126 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2127 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2128 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2132 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
2133 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
2135 static struct attribute *cmt_events_attrs[] = {
2136 EVENT_PTR(td_fe_bound_tnt),
2137 EVENT_PTR(td_retiring_cmt),
2138 EVENT_PTR(td_bad_spec_cmt),
2139 EVENT_PTR(td_be_bound_tnt),
2143 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2144 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2145 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2146 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2148 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2149 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2153 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2154 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2155 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2156 #define KNL_MCDRAM_FAR BIT_ULL(22)
2157 #define KNL_DDR_LOCAL BIT_ULL(23)
2158 #define KNL_DDR_FAR BIT_ULL(24)
2159 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2160 KNL_DDR_LOCAL | KNL_DDR_FAR)
2161 #define KNL_L2_READ SLM_DMND_READ
2162 #define KNL_L2_WRITE SLM_DMND_WRITE
2163 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2164 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2165 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2166 KNL_DRAM_ANY | SNB_SNP_ANY | \
2169 static __initconst const u64 knl_hw_cache_extra_regs
2170 [PERF_COUNT_HW_CACHE_MAX]
2171 [PERF_COUNT_HW_CACHE_OP_MAX]
2172 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2175 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2176 [C(RESULT_MISS)] = 0,
2179 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2180 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2182 [C(OP_PREFETCH)] = {
2183 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2184 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2190 * Used from PMIs where the LBRs are already disabled.
2192 * This function could be called consecutively. It is required to remain in
2193 * disabled state if called consecutively.
2195 * During consecutive calls, the same disable value will be written to related
2196 * registers, so the PMU state remains unchanged.
2198 * intel_bts events don't coexist with intel PMU's BTS events because of
2199 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2200 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2202 * Avoid PEBS_ENABLE MSR access in PMIs.
2203 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2204 * It doesn't matter if the PEBS is enabled or not.
2205 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2206 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2207 * However, there are some cases which may change PEBS status, e.g. PMI
2208 * throttle. The PEBS_ENABLE should be updated where the status changes.
2210 static __always_inline void __intel_pmu_disable_all(bool bts)
2212 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2214 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2216 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2217 intel_pmu_disable_bts();
2220 static __always_inline void intel_pmu_disable_all(void)
2222 __intel_pmu_disable_all(true);
2223 intel_pmu_pebs_disable_all();
2224 intel_pmu_lbr_disable_all();
2227 static void __intel_pmu_enable_all(int added, bool pmi)
2229 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2230 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2232 intel_pmu_lbr_enable_all(pmi);
2234 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2235 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2236 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2239 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2240 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2242 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2243 struct perf_event *event =
2244 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2246 if (WARN_ON_ONCE(!event))
2249 intel_pmu_enable_bts(event->hw.config);
2253 static void intel_pmu_enable_all(int added)
2255 intel_pmu_pebs_enable_all();
2256 __intel_pmu_enable_all(added, false);
2260 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2261 unsigned int cnt, unsigned long flags)
2263 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2265 intel_pmu_lbr_read();
2266 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2268 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2269 intel_pmu_enable_all(0);
2270 local_irq_restore(flags);
2275 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2277 unsigned long flags;
2279 /* must not have branches... */
2280 local_irq_save(flags);
2281 __intel_pmu_disable_all(false); /* we don't care about BTS */
2282 __intel_pmu_lbr_disable();
2283 /* ... until here */
2284 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2288 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2290 unsigned long flags;
2292 /* must not have branches... */
2293 local_irq_save(flags);
2294 __intel_pmu_disable_all(false); /* we don't care about BTS */
2295 __intel_pmu_arch_lbr_disable();
2296 /* ... until here */
2297 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2302 * Intel Errata AAK100 (model 26)
2303 * Intel Errata AAP53 (model 30)
2304 * Intel Errata BD53 (model 44)
2306 * The official story:
2307 * These chips need to be 'reset' when adding counters by programming the
2308 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2309 * in sequence on the same PMC or on different PMCs.
2311 * In practice it appears some of these events do in fact count, and
2312 * we need to program all 4 events.
2314 static void intel_pmu_nhm_workaround(void)
2316 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2317 static const unsigned long nhm_magic[4] = {
2323 struct perf_event *event;
2327 * The Errata requires below steps:
2328 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2329 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2330 * the corresponding PMCx;
2331 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2332 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2333 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2337 * The real steps we choose are a little different from above.
2338 * A) To reduce MSR operations, we don't run step 1) as they
2339 * are already cleared before this function is called;
2340 * B) Call x86_perf_event_update to save PMCx before configuring
2341 * PERFEVTSELx with magic number;
2342 * C) With step 5), we do clear only when the PERFEVTSELx is
2343 * not used currently.
2344 * D) Call x86_perf_event_set_period to restore PMCx;
2347 /* We always operate 4 pairs of PERF Counters */
2348 for (i = 0; i < 4; i++) {
2349 event = cpuc->events[i];
2351 static_call(x86_pmu_update)(event);
2354 for (i = 0; i < 4; i++) {
2355 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2356 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2359 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2360 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2362 for (i = 0; i < 4; i++) {
2363 event = cpuc->events[i];
2366 static_call(x86_pmu_set_period)(event);
2367 __x86_pmu_enable_event(&event->hw,
2368 ARCH_PERFMON_EVENTSEL_ENABLE);
2370 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2374 static void intel_pmu_nhm_enable_all(int added)
2377 intel_pmu_nhm_workaround();
2378 intel_pmu_enable_all(added);
2381 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2383 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2385 if (cpuc->tfa_shadow != val) {
2386 cpuc->tfa_shadow = val;
2387 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2391 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2394 * We're going to use PMC3, make sure TFA is set before we touch it.
2397 intel_set_tfa(cpuc, true);
2400 static void intel_tfa_pmu_enable_all(int added)
2402 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2405 * If we find PMC3 is no longer used when we enable the PMU, we can
2408 if (!test_bit(3, cpuc->active_mask))
2409 intel_set_tfa(cpuc, false);
2411 intel_pmu_enable_all(added);
2414 static inline u64 intel_pmu_get_status(void)
2418 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2423 static inline void intel_pmu_ack_status(u64 ack)
2425 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2428 static inline bool event_is_checkpointed(struct perf_event *event)
2430 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2433 static inline void intel_set_masks(struct perf_event *event, int idx)
2435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2437 if (event->attr.exclude_host)
2438 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2439 if (event->attr.exclude_guest)
2440 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2441 if (event_is_checkpointed(event))
2442 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2445 static inline void intel_clear_masks(struct perf_event *event, int idx)
2447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2449 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2450 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2451 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2454 static void intel_pmu_disable_fixed(struct perf_event *event)
2456 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2457 struct hw_perf_event *hwc = &event->hw;
2461 if (is_topdown_idx(idx)) {
2462 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2465 * When there are other active TopDown events,
2466 * don't disable the fixed counter 3.
2468 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2470 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2473 intel_clear_masks(event, idx);
2475 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2476 cpuc->fixed_ctrl_val &= ~mask;
2479 static void intel_pmu_disable_event(struct perf_event *event)
2481 struct hw_perf_event *hwc = &event->hw;
2485 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2486 intel_clear_masks(event, idx);
2487 x86_pmu_disable_event(event);
2489 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2490 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2491 intel_pmu_disable_fixed(event);
2493 case INTEL_PMC_IDX_FIXED_BTS:
2494 intel_pmu_disable_bts();
2495 intel_pmu_drain_bts_buffer();
2497 case INTEL_PMC_IDX_FIXED_VLBR:
2498 intel_clear_masks(event, idx);
2501 intel_clear_masks(event, idx);
2502 pr_warn("Failed to disable the event with invalid index %d\n",
2508 * Needs to be called after x86_pmu_disable_event,
2509 * so we don't trigger the event without PEBS bit set.
2511 if (unlikely(event->attr.precise_ip))
2512 intel_pmu_pebs_disable(event);
2515 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2517 if (is_pebs_pt(event))
2518 perf_report_aux_output_id(event, idx);
2521 static void intel_pmu_del_event(struct perf_event *event)
2523 if (needs_branch_stack(event))
2524 intel_pmu_lbr_del(event);
2525 if (event->attr.precise_ip)
2526 intel_pmu_pebs_del(event);
2529 static int icl_set_topdown_event_period(struct perf_event *event)
2531 struct hw_perf_event *hwc = &event->hw;
2532 s64 left = local64_read(&hwc->period_left);
2535 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2536 * Software should start both registers, PERF_METRICS and fixed
2537 * counter 3, from zero.
2538 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2539 * After that, both MSRs will be cleared for each read.
2540 * Don't need to clear them again.
2542 if (left == x86_pmu.max_period) {
2543 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2544 wrmsrl(MSR_PERF_METRICS, 0);
2545 hwc->saved_slots = 0;
2546 hwc->saved_metric = 0;
2549 if ((hwc->saved_slots) && is_slots_event(event)) {
2550 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2551 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2554 perf_event_update_userpage(event);
2559 static int adl_set_topdown_event_period(struct perf_event *event)
2561 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2563 if (pmu->cpu_type != hybrid_big)
2566 return icl_set_topdown_event_period(event);
2569 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2571 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2576 * The metric is reported as an 8bit integer fraction
2577 * summing up to 0xff.
2578 * slots-in-metric = (Metric / 0xff) * slots
2580 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2581 return mul_u64_u32_div(slots, val, 0xff);
2584 static u64 icl_get_topdown_value(struct perf_event *event,
2585 u64 slots, u64 metrics)
2587 int idx = event->hw.idx;
2590 if (is_metric_idx(idx))
2591 delta = icl_get_metrics_event_value(metrics, slots, idx);
2598 static void __icl_update_topdown_event(struct perf_event *event,
2599 u64 slots, u64 metrics,
2600 u64 last_slots, u64 last_metrics)
2602 u64 delta, last = 0;
2604 delta = icl_get_topdown_value(event, slots, metrics);
2606 last = icl_get_topdown_value(event, last_slots, last_metrics);
2609 * The 8bit integer fraction of metric may be not accurate,
2610 * especially when the changes is very small.
2611 * For example, if only a few bad_spec happens, the fraction
2612 * may be reduced from 1 to 0. If so, the bad_spec event value
2613 * will be 0 which is definitely less than the last value.
2614 * Avoid update event->count for this case.
2618 local64_add(delta, &event->count);
2622 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2623 u64 metrics, int metric_end)
2625 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2626 struct perf_event *other;
2629 event->hw.saved_slots = slots;
2630 event->hw.saved_metric = metrics;
2632 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2633 if (!is_topdown_idx(idx))
2635 other = cpuc->events[idx];
2636 other->hw.saved_slots = slots;
2637 other->hw.saved_metric = metrics;
2642 * Update all active Topdown events.
2644 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2645 * modify by a NMI. PMU has to be disabled before calling this function.
2648 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2650 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2651 struct perf_event *other;
2656 /* read Fixed counter 3 */
2657 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2661 /* read PERF_METRICS */
2662 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2664 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2665 if (!is_topdown_idx(idx))
2667 other = cpuc->events[idx];
2668 __icl_update_topdown_event(other, slots, metrics,
2669 event ? event->hw.saved_slots : 0,
2670 event ? event->hw.saved_metric : 0);
2674 * Check and update this event, which may have been cleared
2675 * in active_mask e.g. x86_pmu_stop()
2677 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2678 __icl_update_topdown_event(event, slots, metrics,
2679 event->hw.saved_slots,
2680 event->hw.saved_metric);
2683 * In x86_pmu_stop(), the event is cleared in active_mask first,
2684 * then drain the delta, which indicates context switch for
2686 * Save metric and slots for context switch.
2687 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2688 * Because the values will be restored in next schedule in.
2690 update_saved_topdown_regs(event, slots, metrics, metric_end);
2695 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2696 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2697 wrmsrl(MSR_PERF_METRICS, 0);
2699 update_saved_topdown_regs(event, 0, 0, metric_end);
2705 static u64 icl_update_topdown_event(struct perf_event *event)
2707 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2708 x86_pmu.num_topdown_events - 1);
2711 static u64 adl_update_topdown_event(struct perf_event *event)
2713 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2715 if (pmu->cpu_type != hybrid_big)
2718 return icl_update_topdown_event(event);
2721 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
2723 static void intel_pmu_read_topdown_event(struct perf_event *event)
2725 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2727 /* Only need to call update_topdown_event() once for group read. */
2728 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2729 !is_slots_event(event))
2732 perf_pmu_disable(event->pmu);
2733 static_call(intel_pmu_update_topdown_event)(event);
2734 perf_pmu_enable(event->pmu);
2737 static void intel_pmu_read_event(struct perf_event *event)
2739 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2740 intel_pmu_auto_reload_read(event);
2741 else if (is_topdown_count(event))
2742 intel_pmu_read_topdown_event(event);
2744 x86_perf_event_update(event);
2747 static void intel_pmu_enable_fixed(struct perf_event *event)
2749 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2750 struct hw_perf_event *hwc = &event->hw;
2754 if (is_topdown_idx(idx)) {
2755 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2757 * When there are other active TopDown events,
2758 * don't enable the fixed counter 3 again.
2760 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2763 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2766 intel_set_masks(event, idx);
2769 * Enable IRQ generation (0x8), if not PEBS,
2770 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2773 if (!event->attr.precise_ip)
2774 bits |= INTEL_FIXED_0_ENABLE_PMI;
2775 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2776 bits |= INTEL_FIXED_0_USER;
2777 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2778 bits |= INTEL_FIXED_0_KERNEL;
2781 * ANY bit is supported in v3 and up
2783 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2784 bits |= INTEL_FIXED_0_ANYTHREAD;
2786 idx -= INTEL_PMC_IDX_FIXED;
2787 bits = intel_fixed_bits_by_idx(idx, bits);
2788 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2790 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2791 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2792 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2795 cpuc->fixed_ctrl_val &= ~mask;
2796 cpuc->fixed_ctrl_val |= bits;
2799 static void intel_pmu_enable_event(struct perf_event *event)
2801 struct hw_perf_event *hwc = &event->hw;
2804 if (unlikely(event->attr.precise_ip))
2805 intel_pmu_pebs_enable(event);
2808 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2809 intel_set_masks(event, idx);
2810 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2812 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2813 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2814 intel_pmu_enable_fixed(event);
2816 case INTEL_PMC_IDX_FIXED_BTS:
2817 if (!__this_cpu_read(cpu_hw_events.enabled))
2819 intel_pmu_enable_bts(hwc->config);
2821 case INTEL_PMC_IDX_FIXED_VLBR:
2822 intel_set_masks(event, idx);
2825 pr_warn("Failed to enable the event with invalid index %d\n",
2830 static void intel_pmu_add_event(struct perf_event *event)
2832 if (event->attr.precise_ip)
2833 intel_pmu_pebs_add(event);
2834 if (needs_branch_stack(event))
2835 intel_pmu_lbr_add(event);
2839 * Save and restart an expired event. Called by NMI contexts,
2840 * so it has to be careful about preempting normal event ops:
2842 int intel_pmu_save_and_restart(struct perf_event *event)
2844 static_call(x86_pmu_update)(event);
2846 * For a checkpointed counter always reset back to 0. This
2847 * avoids a situation where the counter overflows, aborts the
2848 * transaction and is then set back to shortly before the
2849 * overflow, and overflows and aborts again.
2851 if (unlikely(event_is_checkpointed(event))) {
2852 /* No race with NMIs because the counter should not be armed */
2853 wrmsrl(event->hw.event_base, 0);
2854 local64_set(&event->hw.prev_count, 0);
2856 return static_call(x86_pmu_set_period)(event);
2859 static int intel_pmu_set_period(struct perf_event *event)
2861 if (unlikely(is_topdown_count(event)))
2862 return static_call(intel_pmu_set_topdown_event_period)(event);
2864 return x86_perf_event_set_period(event);
2867 static u64 intel_pmu_update(struct perf_event *event)
2869 if (unlikely(is_topdown_count(event)))
2870 return static_call(intel_pmu_update_topdown_event)(event);
2872 return x86_perf_event_update(event);
2875 static void intel_pmu_reset(void)
2877 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2878 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2879 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2880 int num_counters = hybrid(cpuc->pmu, num_counters);
2881 unsigned long flags;
2887 local_irq_save(flags);
2889 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2891 for (idx = 0; idx < num_counters; idx++) {
2892 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2893 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2895 for (idx = 0; idx < num_counters_fixed; idx++) {
2896 if (fixed_counter_disabled(idx, cpuc->pmu))
2898 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2902 ds->bts_index = ds->bts_buffer_base;
2904 /* Ack all overflows and disable fixed counters */
2905 if (x86_pmu.version >= 2) {
2906 intel_pmu_ack_status(intel_pmu_get_status());
2907 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2910 /* Reset LBRs and LBR freezing */
2911 if (x86_pmu.lbr_nr) {
2912 update_debugctlmsr(get_debugctlmsr() &
2913 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2916 local_irq_restore(flags);
2920 * We may be running with guest PEBS events created by KVM, and the
2921 * PEBS records are logged into the guest's DS and invisible to host.
2923 * In the case of guest PEBS overflow, we only trigger a fake event
2924 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2925 * The guest will then vm-entry and check the guest DS area to read
2926 * the guest PEBS records.
2928 * The contents and other behavior of the guest event do not matter.
2930 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2931 struct perf_sample_data *data)
2933 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2934 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
2935 struct perf_event *event = NULL;
2938 if (!unlikely(perf_guest_state()))
2941 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
2945 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs,
2946 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) {
2947 event = cpuc->events[bit];
2948 if (!event->attr.precise_ip)
2951 perf_sample_data_init(data, 0, event->hw.last_period);
2952 if (perf_event_overflow(event, data, regs))
2953 x86_pmu_stop(event, 0);
2955 /* Inject one fake event is enough. */
2960 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2962 struct perf_sample_data data;
2963 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2966 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2968 inc_irq_stat(apic_perf_irqs);
2971 * Ignore a range of extra bits in status that do not indicate
2972 * overflow by themselves.
2974 status &= ~(GLOBAL_STATUS_COND_CHG |
2975 GLOBAL_STATUS_ASIF |
2976 GLOBAL_STATUS_LBRS_FROZEN);
2980 * In case multiple PEBS events are sampled at the same time,
2981 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2982 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2983 * having their bits set in the status register. This is a sign
2984 * that there was at least one PEBS record pending at the time
2985 * of the PMU interrupt. PEBS counters must only be processed
2986 * via the drain_pebs() calls and not via the regular sample
2987 * processing loop coming after that the function, otherwise
2988 * phony regular samples may be generated in the sampling buffer
2989 * not marked with the EXACT tag. Another possibility is to have
2990 * one PEBS event and at least one non-PEBS event which overflows
2991 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2992 * not be set, yet the overflow status bit for the PEBS counter will
2995 * To avoid this problem, we systematically ignore the PEBS-enabled
2996 * counters from the GLOBAL_STATUS mask and we always process PEBS
2997 * events via drain_pebs().
2999 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3002 * PEBS overflow sets bit 62 in the global status register
3004 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3005 u64 pebs_enabled = cpuc->pebs_enabled;
3008 x86_pmu_handle_guest_pebs(regs, &data);
3009 x86_pmu.drain_pebs(regs, &data);
3010 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
3013 * PMI throttle may be triggered, which stops the PEBS event.
3014 * Although cpuc->pebs_enabled is updated accordingly, the
3015 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3016 * cpuc->enabled has been forced to 0 in PMI.
3017 * Update the MSR if pebs_enabled is changed.
3019 if (pebs_enabled != cpuc->pebs_enabled)
3020 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3026 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3028 if (!perf_guest_handle_intel_pt_intr())
3029 intel_pt_interrupt();
3033 * Intel Perf metrics
3035 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3037 static_call(intel_pmu_update_topdown_event)(NULL);
3041 * Checkpointed counters can lead to 'spurious' PMIs because the
3042 * rollback caused by the PMI will have cleared the overflow status
3043 * bit. Therefore always force probe these counters.
3045 status |= cpuc->intel_cp_status;
3047 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3048 struct perf_event *event = cpuc->events[bit];
3052 if (!test_bit(bit, cpuc->active_mask))
3055 if (!intel_pmu_save_and_restart(event))
3058 perf_sample_data_init(&data, 0, event->hw.last_period);
3060 if (has_branch_stack(event))
3061 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
3063 if (perf_event_overflow(event, &data, regs))
3064 x86_pmu_stop(event, 0);
3071 * This handler is triggered by the local APIC, so the APIC IRQ handling
3074 static int intel_pmu_handle_irq(struct pt_regs *regs)
3076 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3077 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3078 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3085 * Save the PMU state.
3086 * It needs to be restored when leaving the handler.
3088 pmu_enabled = cpuc->enabled;
3090 * In general, the early ACK is only applied for old platforms.
3091 * For the big core starts from Haswell, the late ACK should be
3093 * For the small core after Tremont, we have to do the ACK right
3094 * before re-enabling counters, which is in the middle of the
3097 if (!late_ack && !mid_ack)
3098 apic_write(APIC_LVTPC, APIC_DM_NMI);
3099 intel_bts_disable_local();
3101 __intel_pmu_disable_all(true);
3102 handled = intel_pmu_drain_bts_buffer();
3103 handled += intel_bts_interrupt();
3104 status = intel_pmu_get_status();
3110 intel_pmu_lbr_read();
3111 intel_pmu_ack_status(status);
3112 if (++loops > 100) {
3116 WARN(1, "perfevents: irq loop stuck!\n");
3117 perf_event_print_debug();
3124 handled += handle_pmi_common(regs, status);
3127 * Repeat if there is more work to be done:
3129 status = intel_pmu_get_status();
3135 apic_write(APIC_LVTPC, APIC_DM_NMI);
3136 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3137 cpuc->enabled = pmu_enabled;
3139 __intel_pmu_enable_all(0, true);
3140 intel_bts_enable_local();
3143 * Only unmask the NMI after the overflow counters
3144 * have been reset. This avoids spurious NMIs on
3148 apic_write(APIC_LVTPC, APIC_DM_NMI);
3152 static struct event_constraint *
3153 intel_bts_constraints(struct perf_event *event)
3155 if (unlikely(intel_pmu_has_bts(event)))
3156 return &bts_constraint;
3162 * Note: matches a fake event, like Fixed2.
3164 static struct event_constraint *
3165 intel_vlbr_constraints(struct perf_event *event)
3167 struct event_constraint *c = &vlbr_constraint;
3169 if (unlikely(constraint_match(c, event->hw.config))) {
3170 event->hw.flags |= c->flags;
3177 static int intel_alt_er(struct cpu_hw_events *cpuc,
3178 int idx, u64 config)
3180 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3183 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3186 if (idx == EXTRA_REG_RSP_0)
3187 alt_idx = EXTRA_REG_RSP_1;
3189 if (idx == EXTRA_REG_RSP_1)
3190 alt_idx = EXTRA_REG_RSP_0;
3192 if (config & ~extra_regs[alt_idx].valid_mask)
3198 static void intel_fixup_er(struct perf_event *event, int idx)
3200 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3201 event->hw.extra_reg.idx = idx;
3203 if (idx == EXTRA_REG_RSP_0) {
3204 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3205 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3206 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3207 } else if (idx == EXTRA_REG_RSP_1) {
3208 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3209 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3210 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3215 * manage allocation of shared extra msr for certain events
3218 * per-cpu: to be shared between the various events on a single PMU
3219 * per-core: per-cpu + shared by HT threads
3221 static struct event_constraint *
3222 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3223 struct perf_event *event,
3224 struct hw_perf_event_extra *reg)
3226 struct event_constraint *c = &emptyconstraint;
3227 struct er_account *era;
3228 unsigned long flags;
3232 * reg->alloc can be set due to existing state, so for fake cpuc we
3233 * need to ignore this, otherwise we might fail to allocate proper fake
3234 * state for this extra reg constraint. Also see the comment below.
3236 if (reg->alloc && !cpuc->is_fake)
3237 return NULL; /* call x86_get_event_constraint() */
3240 era = &cpuc->shared_regs->regs[idx];
3242 * we use spin_lock_irqsave() to avoid lockdep issues when
3243 * passing a fake cpuc
3245 raw_spin_lock_irqsave(&era->lock, flags);
3247 if (!atomic_read(&era->ref) || era->config == reg->config) {
3250 * If its a fake cpuc -- as per validate_{group,event}() we
3251 * shouldn't touch event state and we can avoid doing so
3252 * since both will only call get_event_constraints() once
3253 * on each event, this avoids the need for reg->alloc.
3255 * Not doing the ER fixup will only result in era->reg being
3256 * wrong, but since we won't actually try and program hardware
3257 * this isn't a problem either.
3259 if (!cpuc->is_fake) {
3260 if (idx != reg->idx)
3261 intel_fixup_er(event, idx);
3264 * x86_schedule_events() can call get_event_constraints()
3265 * multiple times on events in the case of incremental
3266 * scheduling(). reg->alloc ensures we only do the ER
3272 /* lock in msr value */
3273 era->config = reg->config;
3274 era->reg = reg->reg;
3277 atomic_inc(&era->ref);
3280 * need to call x86_get_event_constraint()
3281 * to check if associated event has constraints
3285 idx = intel_alt_er(cpuc, idx, reg->config);
3286 if (idx != reg->idx) {
3287 raw_spin_unlock_irqrestore(&era->lock, flags);
3291 raw_spin_unlock_irqrestore(&era->lock, flags);
3297 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3298 struct hw_perf_event_extra *reg)
3300 struct er_account *era;
3303 * Only put constraint if extra reg was actually allocated. Also takes
3304 * care of event which do not use an extra shared reg.
3306 * Also, if this is a fake cpuc we shouldn't touch any event state
3307 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3308 * either since it'll be thrown out.
3310 if (!reg->alloc || cpuc->is_fake)
3313 era = &cpuc->shared_regs->regs[reg->idx];
3315 /* one fewer user */
3316 atomic_dec(&era->ref);
3318 /* allocate again next time */
3322 static struct event_constraint *
3323 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3324 struct perf_event *event)
3326 struct event_constraint *c = NULL, *d;
3327 struct hw_perf_event_extra *xreg, *breg;
3329 xreg = &event->hw.extra_reg;
3330 if (xreg->idx != EXTRA_REG_NONE) {
3331 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3332 if (c == &emptyconstraint)
3335 breg = &event->hw.branch_reg;
3336 if (breg->idx != EXTRA_REG_NONE) {
3337 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3338 if (d == &emptyconstraint) {
3339 __intel_shared_reg_put_constraints(cpuc, xreg);
3346 struct event_constraint *
3347 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3348 struct perf_event *event)
3350 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3351 struct event_constraint *c;
3353 if (event_constraints) {
3354 for_each_event_constraint(c, event_constraints) {
3355 if (constraint_match(c, event->hw.config)) {
3356 event->hw.flags |= c->flags;
3362 return &hybrid_var(cpuc->pmu, unconstrained);
3365 static struct event_constraint *
3366 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3367 struct perf_event *event)
3369 struct event_constraint *c;
3371 c = intel_vlbr_constraints(event);
3375 c = intel_bts_constraints(event);
3379 c = intel_shared_regs_constraints(cpuc, event);
3383 c = intel_pebs_constraints(event);
3387 return x86_get_event_constraints(cpuc, idx, event);
3391 intel_start_scheduling(struct cpu_hw_events *cpuc)
3393 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3394 struct intel_excl_states *xl;
3395 int tid = cpuc->excl_thread_id;
3398 * nothing needed if in group validation mode
3400 if (cpuc->is_fake || !is_ht_workaround_enabled())
3404 * no exclusion needed
3406 if (WARN_ON_ONCE(!excl_cntrs))
3409 xl = &excl_cntrs->states[tid];
3411 xl->sched_started = true;
3413 * lock shared state until we are done scheduling
3414 * in stop_event_scheduling()
3415 * makes scheduling appear as a transaction
3417 raw_spin_lock(&excl_cntrs->lock);
3420 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3422 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3423 struct event_constraint *c = cpuc->event_constraint[idx];
3424 struct intel_excl_states *xl;
3425 int tid = cpuc->excl_thread_id;
3427 if (cpuc->is_fake || !is_ht_workaround_enabled())
3430 if (WARN_ON_ONCE(!excl_cntrs))
3433 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3436 xl = &excl_cntrs->states[tid];
3438 lockdep_assert_held(&excl_cntrs->lock);
3440 if (c->flags & PERF_X86_EVENT_EXCL)
3441 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3443 xl->state[cntr] = INTEL_EXCL_SHARED;
3447 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3449 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3450 struct intel_excl_states *xl;
3451 int tid = cpuc->excl_thread_id;
3454 * nothing needed if in group validation mode
3456 if (cpuc->is_fake || !is_ht_workaround_enabled())
3459 * no exclusion needed
3461 if (WARN_ON_ONCE(!excl_cntrs))
3464 xl = &excl_cntrs->states[tid];
3466 xl->sched_started = false;
3468 * release shared state lock (acquired in intel_start_scheduling())
3470 raw_spin_unlock(&excl_cntrs->lock);
3473 static struct event_constraint *
3474 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3476 WARN_ON_ONCE(!cpuc->constraint_list);
3478 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3479 struct event_constraint *cx;
3482 * grab pre-allocated constraint entry
3484 cx = &cpuc->constraint_list[idx];
3487 * initialize dynamic constraint
3488 * with static constraint
3493 * mark constraint as dynamic
3495 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3502 static struct event_constraint *
3503 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3504 int idx, struct event_constraint *c)
3506 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3507 struct intel_excl_states *xlo;
3508 int tid = cpuc->excl_thread_id;
3512 * validating a group does not require
3513 * enforcing cross-thread exclusion
3515 if (cpuc->is_fake || !is_ht_workaround_enabled())
3519 * no exclusion needed
3521 if (WARN_ON_ONCE(!excl_cntrs))
3525 * because we modify the constraint, we need
3526 * to make a copy. Static constraints come
3527 * from static const tables.
3529 * only needed when constraint has not yet
3530 * been cloned (marked dynamic)
3532 c = dyn_constraint(cpuc, c, idx);
3535 * From here on, the constraint is dynamic.
3536 * Either it was just allocated above, or it
3537 * was allocated during a earlier invocation
3542 * state of sibling HT
3544 xlo = &excl_cntrs->states[tid ^ 1];
3547 * event requires exclusive counter access
3550 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3551 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3552 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3553 if (!cpuc->n_excl++)
3554 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3558 * Modify static constraint with current dynamic
3561 * EXCLUSIVE: sibling counter measuring exclusive event
3562 * SHARED : sibling counter measuring non-exclusive event
3563 * UNUSED : sibling counter unused
3566 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3568 * exclusive event in sibling counter
3569 * our corresponding counter cannot be used
3570 * regardless of our event
3572 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3573 __clear_bit(i, c->idxmsk);
3578 * if measuring an exclusive event, sibling
3579 * measuring non-exclusive, then counter cannot
3582 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3583 __clear_bit(i, c->idxmsk);
3590 * if we return an empty mask, then switch
3591 * back to static empty constraint to avoid
3592 * the cost of freeing later on
3595 c = &emptyconstraint;
3602 static struct event_constraint *
3603 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3604 struct perf_event *event)
3606 struct event_constraint *c1, *c2;
3608 c1 = cpuc->event_constraint[idx];
3612 * - static constraint: no change across incremental scheduling calls
3613 * - dynamic constraint: handled by intel_get_excl_constraints()
3615 c2 = __intel_get_event_constraints(cpuc, idx, event);
3617 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3618 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3619 c1->weight = c2->weight;
3623 if (cpuc->excl_cntrs)
3624 return intel_get_excl_constraints(cpuc, event, idx, c2);
3629 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3630 struct perf_event *event)
3632 struct hw_perf_event *hwc = &event->hw;
3633 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3634 int tid = cpuc->excl_thread_id;
3635 struct intel_excl_states *xl;
3638 * nothing needed if in group validation mode
3643 if (WARN_ON_ONCE(!excl_cntrs))
3646 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3647 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3648 if (!--cpuc->n_excl)
3649 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3653 * If event was actually assigned, then mark the counter state as
3656 if (hwc->idx >= 0) {
3657 xl = &excl_cntrs->states[tid];
3660 * put_constraint may be called from x86_schedule_events()
3661 * which already has the lock held so here make locking
3664 if (!xl->sched_started)
3665 raw_spin_lock(&excl_cntrs->lock);
3667 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3669 if (!xl->sched_started)
3670 raw_spin_unlock(&excl_cntrs->lock);
3675 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3676 struct perf_event *event)
3678 struct hw_perf_event_extra *reg;
3680 reg = &event->hw.extra_reg;
3681 if (reg->idx != EXTRA_REG_NONE)
3682 __intel_shared_reg_put_constraints(cpuc, reg);
3684 reg = &event->hw.branch_reg;
3685 if (reg->idx != EXTRA_REG_NONE)
3686 __intel_shared_reg_put_constraints(cpuc, reg);
3689 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3690 struct perf_event *event)
3692 intel_put_shared_regs_event_constraints(cpuc, event);
3695 * is PMU has exclusive counter restrictions, then
3696 * all events are subject to and must call the
3697 * put_excl_constraints() routine
3699 if (cpuc->excl_cntrs)
3700 intel_put_excl_constraints(cpuc, event);
3703 static void intel_pebs_aliases_core2(struct perf_event *event)
3705 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3707 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3708 * (0x003c) so that we can use it with PEBS.
3710 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3711 * PEBS capable. However we can use INST_RETIRED.ANY_P
3712 * (0x00c0), which is a PEBS capable event, to get the same
3715 * INST_RETIRED.ANY_P counts the number of cycles that retires
3716 * CNTMASK instructions. By setting CNTMASK to a value (16)
3717 * larger than the maximum number of instructions that can be
3718 * retired per cycle (4) and then inverting the condition, we
3719 * count all cycles that retire 16 or less instructions, which
3722 * Thereby we gain a PEBS capable cycle counter.
3724 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3726 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3727 event->hw.config = alt_config;
3731 static void intel_pebs_aliases_snb(struct perf_event *event)
3733 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3735 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3736 * (0x003c) so that we can use it with PEBS.
3738 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3739 * PEBS capable. However we can use UOPS_RETIRED.ALL
3740 * (0x01c2), which is a PEBS capable event, to get the same
3743 * UOPS_RETIRED.ALL counts the number of cycles that retires
3744 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3745 * larger than the maximum number of micro-ops that can be
3746 * retired per cycle (4) and then inverting the condition, we
3747 * count all cycles that retire 16 or less micro-ops, which
3750 * Thereby we gain a PEBS capable cycle counter.
3752 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3754 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3755 event->hw.config = alt_config;
3759 static void intel_pebs_aliases_precdist(struct perf_event *event)
3761 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3763 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3764 * (0x003c) so that we can use it with PEBS.
3766 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3767 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3768 * (0x01c0), which is a PEBS capable event, to get the same
3771 * The PREC_DIST event has special support to minimize sample
3772 * shadowing effects. One drawback is that it can be
3773 * only programmed on counter 1, but that seems like an
3774 * acceptable trade off.
3776 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3778 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3779 event->hw.config = alt_config;
3783 static void intel_pebs_aliases_ivb(struct perf_event *event)
3785 if (event->attr.precise_ip < 3)
3786 return intel_pebs_aliases_snb(event);
3787 return intel_pebs_aliases_precdist(event);
3790 static void intel_pebs_aliases_skl(struct perf_event *event)
3792 if (event->attr.precise_ip < 3)
3793 return intel_pebs_aliases_core2(event);
3794 return intel_pebs_aliases_precdist(event);
3797 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3799 unsigned long flags = x86_pmu.large_pebs_flags;
3801 if (event->attr.use_clockid)
3802 flags &= ~PERF_SAMPLE_TIME;
3803 if (!event->attr.exclude_kernel)
3804 flags &= ~PERF_SAMPLE_REGS_USER;
3805 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3806 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3810 static int intel_pmu_bts_config(struct perf_event *event)
3812 struct perf_event_attr *attr = &event->attr;
3814 if (unlikely(intel_pmu_has_bts(event))) {
3815 /* BTS is not supported by this architecture. */
3816 if (!x86_pmu.bts_active)
3819 /* BTS is currently only allowed for user-mode. */
3820 if (!attr->exclude_kernel)
3823 /* BTS is not allowed for precise events. */
3824 if (attr->precise_ip)
3827 /* disallow bts if conflicting events are present */
3828 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3831 event->destroy = hw_perf_lbr_event_destroy;
3837 static int core_pmu_hw_config(struct perf_event *event)
3839 int ret = x86_pmu_hw_config(event);
3844 return intel_pmu_bts_config(event);
3847 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3848 ((x86_pmu.num_topdown_events - 1) << 8))
3850 static bool is_available_metric_event(struct perf_event *event)
3852 return is_metric_event(event) &&
3853 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3856 static inline bool is_mem_loads_event(struct perf_event *event)
3858 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3861 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3863 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3866 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3868 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3872 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3877 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3879 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3881 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3884 static int intel_pmu_hw_config(struct perf_event *event)
3886 int ret = x86_pmu_hw_config(event);
3891 ret = intel_pmu_bts_config(event);
3895 if (event->attr.precise_ip) {
3896 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3899 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3900 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3901 if (!(event->attr.sample_type &
3902 ~intel_pmu_large_pebs_flags(event))) {
3903 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3904 event->attach_state |= PERF_ATTACH_SCHED_CB;
3907 if (x86_pmu.pebs_aliases)
3908 x86_pmu.pebs_aliases(event);
3911 if (needs_branch_stack(event)) {
3912 ret = intel_pmu_setup_lbr_filter(event);
3915 event->attach_state |= PERF_ATTACH_SCHED_CB;
3918 * BTS is set up earlier in this path, so don't account twice
3920 if (!unlikely(intel_pmu_has_bts(event))) {
3921 /* disallow lbr if conflicting events are present */
3922 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3925 event->destroy = hw_perf_lbr_event_destroy;
3929 if (event->attr.aux_output) {
3930 if (!event->attr.precise_ip)
3933 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3936 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3937 (event->attr.type == PERF_TYPE_HW_CACHE))
3941 * Config Topdown slots and metric events
3943 * The slots event on Fixed Counter 3 can support sampling,
3944 * which will be handled normally in x86_perf_event_update().
3946 * Metric events don't support sampling and require being paired
3947 * with a slots event as group leader. When the slots event
3948 * is used in a metrics group, it too cannot support sampling.
3950 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3951 if (event->attr.config1 || event->attr.config2)
3955 * The TopDown metrics events and slots event don't
3956 * support any filters.
3958 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3961 if (is_available_metric_event(event)) {
3962 struct perf_event *leader = event->group_leader;
3964 /* The metric events don't support sampling. */
3965 if (is_sampling_event(event))
3968 /* The metric events require a slots group leader. */
3969 if (!is_slots_event(leader))
3973 * The leader/SLOTS must not be a sampling event for
3974 * metric use; hardware requires it starts at 0 when used
3975 * in conjunction with MSR_PERF_METRICS.
3977 if (is_sampling_event(leader))
3980 event->event_caps |= PERF_EV_CAP_SIBLING;
3982 * Only once we have a METRICs sibling do we
3983 * need TopDown magic.
3985 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3986 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3991 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3992 * doesn't function quite right. As a work-around it needs to always be
3993 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3994 * The actual count of this second event is irrelevant it just needs
3995 * to be active to make the first event function correctly.
3997 * In a group, the auxiliary event must be in front of the load latency
3998 * event. The rule is to simplify the implementation of the check.
3999 * That's because perf cannot have a complete group at the moment.
4001 if (require_mem_loads_aux_event(event) &&
4002 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4003 is_mem_loads_event(event)) {
4004 struct perf_event *leader = event->group_leader;
4005 struct perf_event *sibling = NULL;
4008 * When this memload event is also the first event (no group
4009 * exists yet), then there is no aux event before it.
4011 if (leader == event)
4014 if (!is_mem_loads_aux_event(leader)) {
4015 for_each_sibling_event(sibling, leader) {
4016 if (is_mem_loads_aux_event(sibling))
4019 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4024 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4027 if (x86_pmu.version < 3)
4030 ret = perf_allow_cpu(&event->attr);
4034 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4040 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4041 * The host perf conext helps to prepare the values of the real hardware for
4042 * a set of msrs that need to be switched atomically in a vmx transaction.
4044 * For example, the pseudocode needed to add a new msr should look like:
4046 * arr[(*nr)++] = (struct perf_guest_switch_msr){
4047 * .msr = the hardware msr address,
4048 * .host = the value the hardware has when it doesn't run a guest,
4049 * .guest = the value the hardware has when it runs a guest,
4052 * These values have nothing to do with the emulated values the guest sees
4053 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4054 * specifically in the intel_pmu_{get,set}_msr().
4056 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4058 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4059 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4060 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4061 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4062 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4063 int global_ctrl, pebs_enable;
4066 global_ctrl = (*nr)++;
4067 arr[global_ctrl] = (struct perf_guest_switch_msr){
4068 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4069 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4070 .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
4077 * If PMU counter has PEBS enabled it is not enough to
4078 * disable counter on a guest entry since PEBS memory
4079 * write can overshoot guest entry and corrupt guest
4080 * memory. Disabling PEBS solves the problem.
4082 * Don't do this if the CPU already enforces it.
4084 if (x86_pmu.pebs_no_isolation) {
4085 arr[(*nr)++] = (struct perf_guest_switch_msr){
4086 .msr = MSR_IA32_PEBS_ENABLE,
4087 .host = cpuc->pebs_enabled,
4093 if (!kvm_pmu || !x86_pmu.pebs_ept)
4096 arr[(*nr)++] = (struct perf_guest_switch_msr){
4097 .msr = MSR_IA32_DS_AREA,
4098 .host = (unsigned long)cpuc->ds,
4099 .guest = kvm_pmu->ds_area,
4102 if (x86_pmu.intel_cap.pebs_baseline) {
4103 arr[(*nr)++] = (struct perf_guest_switch_msr){
4104 .msr = MSR_PEBS_DATA_CFG,
4105 .host = cpuc->active_pebs_data_cfg,
4106 .guest = kvm_pmu->pebs_data_cfg,
4110 pebs_enable = (*nr)++;
4111 arr[pebs_enable] = (struct perf_guest_switch_msr){
4112 .msr = MSR_IA32_PEBS_ENABLE,
4113 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4114 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4117 if (arr[pebs_enable].host) {
4118 /* Disable guest PEBS if host PEBS is enabled. */
4119 arr[pebs_enable].guest = 0;
4121 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4122 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4123 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4124 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4125 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4131 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4133 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4134 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4137 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4138 struct perf_event *event = cpuc->events[idx];
4140 arr[idx].msr = x86_pmu_config_addr(idx);
4141 arr[idx].host = arr[idx].guest = 0;
4143 if (!test_bit(idx, cpuc->active_mask))
4146 arr[idx].host = arr[idx].guest =
4147 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4149 if (event->attr.exclude_host)
4150 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4151 else if (event->attr.exclude_guest)
4152 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4155 *nr = x86_pmu.num_counters;
4159 static void core_pmu_enable_event(struct perf_event *event)
4161 if (!event->attr.exclude_host)
4162 x86_pmu_enable_event(event);
4165 static void core_pmu_enable_all(int added)
4167 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4170 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4171 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4173 if (!test_bit(idx, cpuc->active_mask) ||
4174 cpuc->events[idx]->attr.exclude_host)
4177 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4181 static int hsw_hw_config(struct perf_event *event)
4183 int ret = intel_pmu_hw_config(event);
4187 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4189 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4192 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4193 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4196 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4197 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4198 event->attr.precise_ip > 0))
4201 if (event_is_checkpointed(event)) {
4203 * Sampling of checkpointed events can cause situations where
4204 * the CPU constantly aborts because of a overflow, which is
4205 * then checkpointed back and ignored. Forbid checkpointing
4208 * But still allow a long sampling period, so that perf stat
4211 if (event->attr.sample_period > 0 &&
4212 event->attr.sample_period < 0x7fffffff)
4218 static struct event_constraint counter0_constraint =
4219 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4221 static struct event_constraint counter1_constraint =
4222 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4224 static struct event_constraint counter0_1_constraint =
4225 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4227 static struct event_constraint counter2_constraint =
4228 EVENT_CONSTRAINT(0, 0x4, 0);
4230 static struct event_constraint fixed0_constraint =
4231 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4233 static struct event_constraint fixed0_counter0_constraint =
4234 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4236 static struct event_constraint fixed0_counter0_1_constraint =
4237 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4239 static struct event_constraint counters_1_7_constraint =
4240 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4242 static struct event_constraint *
4243 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4244 struct perf_event *event)
4246 struct event_constraint *c;
4248 c = intel_get_event_constraints(cpuc, idx, event);
4250 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4251 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4252 if (c->idxmsk64 & (1U << 2))
4253 return &counter2_constraint;
4254 return &emptyconstraint;
4260 static struct event_constraint *
4261 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4262 struct perf_event *event)
4265 * Fixed counter 0 has less skid.
4266 * Force instruction:ppp in Fixed counter 0
4268 if ((event->attr.precise_ip == 3) &&
4269 constraint_match(&fixed0_constraint, event->hw.config))
4270 return &fixed0_constraint;
4272 return hsw_get_event_constraints(cpuc, idx, event);
4275 static struct event_constraint *
4276 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4277 struct perf_event *event)
4279 struct event_constraint *c;
4281 c = icl_get_event_constraints(cpuc, idx, event);
4284 * The :ppp indicates the Precise Distribution (PDist) facility, which
4285 * is only supported on the GP counter 0. If a :ppp event which is not
4286 * available on the GP counter 0, error out.
4287 * Exception: Instruction PDIR is only available on the fixed counter 0.
4289 if ((event->attr.precise_ip == 3) &&
4290 !constraint_match(&fixed0_constraint, event->hw.config)) {
4291 if (c->idxmsk64 & BIT_ULL(0))
4292 return &counter0_constraint;
4294 return &emptyconstraint;
4300 static struct event_constraint *
4301 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4302 struct perf_event *event)
4304 struct event_constraint *c;
4306 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4307 if (event->attr.precise_ip == 3)
4308 return &counter0_constraint;
4310 c = intel_get_event_constraints(cpuc, idx, event);
4315 static struct event_constraint *
4316 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4317 struct perf_event *event)
4319 struct event_constraint *c;
4321 c = intel_get_event_constraints(cpuc, idx, event);
4324 * :ppp means to do reduced skid PEBS,
4325 * which is available on PMC0 and fixed counter 0.
4327 if (event->attr.precise_ip == 3) {
4328 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4329 if (constraint_match(&fixed0_constraint, event->hw.config))
4330 return &fixed0_counter0_constraint;
4332 return &counter0_constraint;
4338 static bool allow_tsx_force_abort = true;
4340 static struct event_constraint *
4341 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4342 struct perf_event *event)
4344 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4347 * Without TFA we must not use PMC3.
4349 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4350 c = dyn_constraint(cpuc, c, idx);
4351 c->idxmsk64 &= ~(1ULL << 3);
4358 static struct event_constraint *
4359 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4360 struct perf_event *event)
4362 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4364 if (pmu->cpu_type == hybrid_big)
4365 return spr_get_event_constraints(cpuc, idx, event);
4366 else if (pmu->cpu_type == hybrid_small)
4367 return tnt_get_event_constraints(cpuc, idx, event);
4370 return &emptyconstraint;
4373 static struct event_constraint *
4374 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4375 struct perf_event *event)
4377 struct event_constraint *c;
4379 c = intel_get_event_constraints(cpuc, idx, event);
4382 * The :ppp indicates the Precise Distribution (PDist) facility, which
4383 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4384 * If a :ppp event which is not available on the above eligible counters,
4387 if (event->attr.precise_ip == 3) {
4388 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4389 if (constraint_match(&fixed0_constraint, event->hw.config))
4390 return &fixed0_counter0_1_constraint;
4392 switch (c->idxmsk64 & 0x3ull) {
4394 return &counter0_constraint;
4396 return &counter1_constraint;
4398 return &counter0_1_constraint;
4400 return &emptyconstraint;
4406 static struct event_constraint *
4407 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4408 struct perf_event *event)
4410 struct event_constraint *c;
4412 c = spr_get_event_constraints(cpuc, idx, event);
4414 /* The Retire Latency is not supported by the fixed counter 0. */
4415 if (event->attr.precise_ip &&
4416 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4417 constraint_match(&fixed0_constraint, event->hw.config)) {
4419 * The Instruction PDIR is only available
4420 * on the fixed counter 0. Error out for this case.
4422 if (event->attr.precise_ip == 3)
4423 return &emptyconstraint;
4424 return &counters_1_7_constraint;
4430 static struct event_constraint *
4431 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4432 struct perf_event *event)
4434 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4436 if (pmu->cpu_type == hybrid_big)
4437 return rwc_get_event_constraints(cpuc, idx, event);
4438 if (pmu->cpu_type == hybrid_small)
4439 return cmt_get_event_constraints(cpuc, idx, event);
4442 return &emptyconstraint;
4445 static int adl_hw_config(struct perf_event *event)
4447 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4449 if (pmu->cpu_type == hybrid_big)
4450 return hsw_hw_config(event);
4451 else if (pmu->cpu_type == hybrid_small)
4452 return intel_pmu_hw_config(event);
4458 static u8 adl_get_hybrid_cpu_type(void)
4466 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4467 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4468 * the two to enforce a minimum period of 128 (the smallest value that has bits
4469 * 0-5 cleared and >= 100).
4471 * Because of how the code in x86_perf_event_set_period() works, the truncation
4472 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4473 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4475 * Therefore the effective (average) period matches the requested period,
4476 * despite coarser hardware granularity.
4478 static void bdw_limit_period(struct perf_event *event, s64 *left)
4480 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4481 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4488 static void nhm_limit_period(struct perf_event *event, s64 *left)
4490 *left = max(*left, 32LL);
4493 static void spr_limit_period(struct perf_event *event, s64 *left)
4495 if (event->attr.precise_ip == 3)
4496 *left = max(*left, 128LL);
4499 PMU_FORMAT_ATTR(event, "config:0-7" );
4500 PMU_FORMAT_ATTR(umask, "config:8-15" );
4501 PMU_FORMAT_ATTR(edge, "config:18" );
4502 PMU_FORMAT_ATTR(pc, "config:19" );
4503 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4504 PMU_FORMAT_ATTR(inv, "config:23" );
4505 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4506 PMU_FORMAT_ATTR(in_tx, "config:32");
4507 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4509 static struct attribute *intel_arch_formats_attr[] = {
4510 &format_attr_event.attr,
4511 &format_attr_umask.attr,
4512 &format_attr_edge.attr,
4513 &format_attr_pc.attr,
4514 &format_attr_inv.attr,
4515 &format_attr_cmask.attr,
4519 ssize_t intel_event_sysfs_show(char *page, u64 config)
4521 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4523 return x86_event_sysfs_show(page, config, event);
4526 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4528 struct intel_shared_regs *regs;
4531 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4532 GFP_KERNEL, cpu_to_node(cpu));
4535 * initialize the locks to keep lockdep happy
4537 for (i = 0; i < EXTRA_REG_MAX; i++)
4538 raw_spin_lock_init(®s->regs[i].lock);
4545 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4547 struct intel_excl_cntrs *c;
4549 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4550 GFP_KERNEL, cpu_to_node(cpu));
4552 raw_spin_lock_init(&c->lock);
4559 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4561 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4563 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4564 cpuc->shared_regs = allocate_shared_regs(cpu);
4565 if (!cpuc->shared_regs)
4569 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4570 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4572 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4573 if (!cpuc->constraint_list)
4574 goto err_shared_regs;
4577 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4578 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4579 if (!cpuc->excl_cntrs)
4580 goto err_constraint_list;
4582 cpuc->excl_thread_id = 0;
4587 err_constraint_list:
4588 kfree(cpuc->constraint_list);
4589 cpuc->constraint_list = NULL;
4592 kfree(cpuc->shared_regs);
4593 cpuc->shared_regs = NULL;
4599 static int intel_pmu_cpu_prepare(int cpu)
4601 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4604 static void flip_smm_bit(void *data)
4606 unsigned long set = *(unsigned long *)data;
4609 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4610 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4612 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4613 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4617 static void intel_pmu_check_num_counters(int *num_counters,
4618 int *num_counters_fixed,
4619 u64 *intel_ctrl, u64 fixed_mask);
4621 static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
4623 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF);
4624 unsigned int eax, ebx, ecx, edx;
4626 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
4627 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
4628 &eax, &ebx, &ecx, &edx);
4629 pmu->num_counters = fls(eax);
4630 pmu->num_counters_fixed = fls(ebx);
4631 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
4632 &pmu->intel_ctrl, ebx);
4636 static bool init_hybrid_pmu(int cpu)
4638 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4639 u8 cpu_type = get_this_hybrid_cpu_type();
4640 struct x86_hybrid_pmu *pmu = NULL;
4643 if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4644 cpu_type = x86_pmu.get_hybrid_cpu_type();
4646 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4647 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4648 pmu = &x86_pmu.hybrid_pmu[i];
4652 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4657 /* Only check and dump the PMU information for the first CPU */
4658 if (!cpumask_empty(&pmu->supported_cpus))
4661 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
4662 update_pmu_cap(pmu);
4664 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4667 pr_info("%s PMU driver: ", pmu->name);
4669 if (pmu->intel_cap.pebs_output_pt_available)
4670 pr_cont("PEBS-via-PT ");
4674 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4678 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4679 cpuc->pmu = &pmu->pmu;
4684 static void intel_pmu_cpu_starting(int cpu)
4686 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4687 int core_id = topology_core_id(cpu);
4690 if (is_hybrid() && !init_hybrid_pmu(cpu))
4693 init_debug_store_on_cpu(cpu);
4695 * Deal with CPUs that don't clear their LBRs on power-up.
4697 intel_pmu_lbr_reset();
4699 cpuc->lbr_sel = NULL;
4701 if (x86_pmu.flags & PMU_FL_TFA) {
4702 WARN_ON_ONCE(cpuc->tfa_shadow);
4703 cpuc->tfa_shadow = ~0ULL;
4704 intel_set_tfa(cpuc, false);
4707 if (x86_pmu.version > 1)
4708 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4711 * Disable perf metrics if any added CPU doesn't support it.
4713 * Turn off the check for a hybrid architecture, because the
4714 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4715 * the architecture features. The perf metrics is a model-specific
4716 * feature for now. The corresponding bit should always be 0 on
4717 * a hybrid platform, e.g., Alder Lake.
4719 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4720 union perf_capabilities perf_cap;
4722 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4723 if (!perf_cap.perf_metrics) {
4724 x86_pmu.intel_cap.perf_metrics = 0;
4725 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4729 if (!cpuc->shared_regs)
4732 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4733 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4734 struct intel_shared_regs *pc;
4736 pc = per_cpu(cpu_hw_events, i).shared_regs;
4737 if (pc && pc->core_id == core_id) {
4738 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4739 cpuc->shared_regs = pc;
4743 cpuc->shared_regs->core_id = core_id;
4744 cpuc->shared_regs->refcnt++;
4747 if (x86_pmu.lbr_sel_map)
4748 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4750 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4751 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4752 struct cpu_hw_events *sibling;
4753 struct intel_excl_cntrs *c;
4755 sibling = &per_cpu(cpu_hw_events, i);
4756 c = sibling->excl_cntrs;
4757 if (c && c->core_id == core_id) {
4758 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4759 cpuc->excl_cntrs = c;
4760 if (!sibling->excl_thread_id)
4761 cpuc->excl_thread_id = 1;
4765 cpuc->excl_cntrs->core_id = core_id;
4766 cpuc->excl_cntrs->refcnt++;
4770 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4772 struct intel_excl_cntrs *c;
4774 c = cpuc->excl_cntrs;
4776 if (c->core_id == -1 || --c->refcnt == 0)
4778 cpuc->excl_cntrs = NULL;
4781 kfree(cpuc->constraint_list);
4782 cpuc->constraint_list = NULL;
4785 static void intel_pmu_cpu_dying(int cpu)
4787 fini_debug_store_on_cpu(cpu);
4790 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4792 struct intel_shared_regs *pc;
4794 pc = cpuc->shared_regs;
4796 if (pc->core_id == -1 || --pc->refcnt == 0)
4798 cpuc->shared_regs = NULL;
4801 free_excl_cntrs(cpuc);
4804 static void intel_pmu_cpu_dead(int cpu)
4806 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4808 intel_cpuc_finish(cpuc);
4810 if (is_hybrid() && cpuc->pmu)
4811 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4814 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
4817 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
4818 intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
4821 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
4822 struct perf_event_pmu_context *next_epc)
4824 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
4827 static int intel_pmu_check_period(struct perf_event *event, u64 value)
4829 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4832 static void intel_aux_output_init(void)
4834 /* Refer also intel_pmu_aux_output_match() */
4835 if (x86_pmu.intel_cap.pebs_output_pt_available)
4836 x86_pmu.assign = intel_pmu_assign_event;
4839 static int intel_pmu_aux_output_match(struct perf_event *event)
4841 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
4842 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4845 return is_intel_pt_event(event);
4848 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
4850 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
4852 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
4855 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4857 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4859 PMU_FORMAT_ATTR(frontend, "config1:0-23");
4861 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
4863 static struct attribute *intel_arch3_formats_attr[] = {
4864 &format_attr_event.attr,
4865 &format_attr_umask.attr,
4866 &format_attr_edge.attr,
4867 &format_attr_pc.attr,
4868 &format_attr_any.attr,
4869 &format_attr_inv.attr,
4870 &format_attr_cmask.attr,
4874 static struct attribute *hsw_format_attr[] = {
4875 &format_attr_in_tx.attr,
4876 &format_attr_in_tx_cp.attr,
4877 &format_attr_offcore_rsp.attr,
4878 &format_attr_ldlat.attr,
4882 static struct attribute *nhm_format_attr[] = {
4883 &format_attr_offcore_rsp.attr,
4884 &format_attr_ldlat.attr,
4888 static struct attribute *slm_format_attr[] = {
4889 &format_attr_offcore_rsp.attr,
4893 static struct attribute *cmt_format_attr[] = {
4894 &format_attr_offcore_rsp.attr,
4895 &format_attr_ldlat.attr,
4896 &format_attr_snoop_rsp.attr,
4900 static struct attribute *skl_format_attr[] = {
4901 &format_attr_frontend.attr,
4905 static __initconst const struct x86_pmu core_pmu = {
4907 .handle_irq = x86_pmu_handle_irq,
4908 .disable_all = x86_pmu_disable_all,
4909 .enable_all = core_pmu_enable_all,
4910 .enable = core_pmu_enable_event,
4911 .disable = x86_pmu_disable_event,
4912 .hw_config = core_pmu_hw_config,
4913 .schedule_events = x86_schedule_events,
4914 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4915 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4916 .event_map = intel_pmu_event_map,
4917 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4919 .large_pebs_flags = LARGE_PEBS_FLAGS,
4922 * Intel PMCs cannot be accessed sanely above 32-bit width,
4923 * so we install an artificial 1<<31 period regardless of
4924 * the generic event period:
4926 .max_period = (1ULL<<31) - 1,
4927 .get_event_constraints = intel_get_event_constraints,
4928 .put_event_constraints = intel_put_event_constraints,
4929 .event_constraints = intel_core_event_constraints,
4930 .guest_get_msrs = core_guest_get_msrs,
4931 .format_attrs = intel_arch_formats_attr,
4932 .events_sysfs_show = intel_event_sysfs_show,
4935 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4936 * together with PMU version 1 and thus be using core_pmu with
4937 * shared_regs. We need following callbacks here to allocate
4940 .cpu_prepare = intel_pmu_cpu_prepare,
4941 .cpu_starting = intel_pmu_cpu_starting,
4942 .cpu_dying = intel_pmu_cpu_dying,
4943 .cpu_dead = intel_pmu_cpu_dead,
4945 .check_period = intel_pmu_check_period,
4947 .lbr_reset = intel_pmu_lbr_reset_64,
4948 .lbr_read = intel_pmu_lbr_read_64,
4949 .lbr_save = intel_pmu_lbr_save,
4950 .lbr_restore = intel_pmu_lbr_restore,
4953 static __initconst const struct x86_pmu intel_pmu = {
4955 .handle_irq = intel_pmu_handle_irq,
4956 .disable_all = intel_pmu_disable_all,
4957 .enable_all = intel_pmu_enable_all,
4958 .enable = intel_pmu_enable_event,
4959 .disable = intel_pmu_disable_event,
4960 .add = intel_pmu_add_event,
4961 .del = intel_pmu_del_event,
4962 .read = intel_pmu_read_event,
4963 .set_period = intel_pmu_set_period,
4964 .update = intel_pmu_update,
4965 .hw_config = intel_pmu_hw_config,
4966 .schedule_events = x86_schedule_events,
4967 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4968 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4969 .event_map = intel_pmu_event_map,
4970 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4972 .large_pebs_flags = LARGE_PEBS_FLAGS,
4974 * Intel PMCs cannot be accessed sanely above 32 bit width,
4975 * so we install an artificial 1<<31 period regardless of
4976 * the generic event period:
4978 .max_period = (1ULL << 31) - 1,
4979 .get_event_constraints = intel_get_event_constraints,
4980 .put_event_constraints = intel_put_event_constraints,
4981 .pebs_aliases = intel_pebs_aliases_core2,
4983 .format_attrs = intel_arch3_formats_attr,
4984 .events_sysfs_show = intel_event_sysfs_show,
4986 .cpu_prepare = intel_pmu_cpu_prepare,
4987 .cpu_starting = intel_pmu_cpu_starting,
4988 .cpu_dying = intel_pmu_cpu_dying,
4989 .cpu_dead = intel_pmu_cpu_dead,
4991 .guest_get_msrs = intel_guest_get_msrs,
4992 .sched_task = intel_pmu_sched_task,
4993 .swap_task_ctx = intel_pmu_swap_task_ctx,
4995 .check_period = intel_pmu_check_period,
4997 .aux_output_match = intel_pmu_aux_output_match,
4999 .lbr_reset = intel_pmu_lbr_reset_64,
5000 .lbr_read = intel_pmu_lbr_read_64,
5001 .lbr_save = intel_pmu_lbr_save,
5002 .lbr_restore = intel_pmu_lbr_restore,
5005 * SMM has access to all 4 rings and while traditionally SMM code only
5006 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5008 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5009 * between SMM or not, this results in what should be pure userspace
5010 * counters including SMM data.
5012 * This is a clear privilege issue, therefore globally disable
5013 * counting SMM by default.
5015 .attr_freeze_on_smi = 1,
5018 static __init void intel_clovertown_quirk(void)
5021 * PEBS is unreliable due to:
5023 * AJ67 - PEBS may experience CPL leaks
5024 * AJ68 - PEBS PMI may be delayed by one event
5025 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5026 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5028 * AJ67 could be worked around by restricting the OS/USR flags.
5029 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5031 * AJ106 could possibly be worked around by not allowing LBR
5032 * usage from PEBS, including the fixup.
5033 * AJ68 could possibly be worked around by always programming
5034 * a pebs_event_reset[0] value and coping with the lost events.
5036 * But taken together it might just make sense to not enable PEBS on
5039 pr_warn("PEBS disabled due to CPU errata\n");
5041 x86_pmu.pebs_constraints = NULL;
5044 static const struct x86_cpu_desc isolation_ucodes[] = {
5045 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
5046 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
5047 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
5048 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
5049 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
5050 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
5051 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
5052 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
5053 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
5054 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
5055 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
5056 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
5057 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
5058 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
5059 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
5060 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
5061 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
5062 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
5063 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
5064 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
5065 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
5066 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
5067 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
5068 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
5069 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
5070 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
5071 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
5072 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
5073 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
5077 static void intel_check_pebs_isolation(void)
5079 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
5082 static __init void intel_pebs_isolation_quirk(void)
5084 WARN_ON_ONCE(x86_pmu.check_microcode);
5085 x86_pmu.check_microcode = intel_check_pebs_isolation;
5086 intel_check_pebs_isolation();
5089 static const struct x86_cpu_desc pebs_ucodes[] = {
5090 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
5091 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
5092 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
5096 static bool intel_snb_pebs_broken(void)
5098 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
5101 static void intel_snb_check_microcode(void)
5103 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5107 * Serialized by the microcode lock..
5109 if (x86_pmu.pebs_broken) {
5110 pr_info("PEBS enabled due to microcode update\n");
5111 x86_pmu.pebs_broken = 0;
5113 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5114 x86_pmu.pebs_broken = 1;
5118 static bool is_lbr_from(unsigned long msr)
5120 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5122 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5126 * Under certain circumstances, access certain MSR may cause #GP.
5127 * The function tests if the input MSR can be safely accessed.
5129 static bool check_msr(unsigned long msr, u64 mask)
5131 u64 val_old, val_new, val_tmp;
5134 * Disable the check for real HW, so we don't
5135 * mess with potentially enabled registers:
5137 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5141 * Read the current value, change it and read it back to see if it
5142 * matches, this is needed to detect certain hardware emulators
5143 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5145 if (rdmsrl_safe(msr, &val_old))
5149 * Only change the bits which can be updated by wrmsrl.
5151 val_tmp = val_old ^ mask;
5153 if (is_lbr_from(msr))
5154 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5156 if (wrmsrl_safe(msr, val_tmp) ||
5157 rdmsrl_safe(msr, &val_new))
5161 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5162 * should equal rdmsrl()'s even with the quirk.
5164 if (val_new != val_tmp)
5167 if (is_lbr_from(msr))
5168 val_old = lbr_from_signext_quirk_wr(val_old);
5170 /* Here it's sure that the MSR can be safely accessed.
5171 * Restore the old value and return.
5173 wrmsrl(msr, val_old);
5178 static __init void intel_sandybridge_quirk(void)
5180 x86_pmu.check_microcode = intel_snb_check_microcode;
5182 intel_snb_check_microcode();
5186 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5187 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5188 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5189 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5190 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5191 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5192 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5193 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5196 static __init void intel_arch_events_quirk(void)
5200 /* disable event that reported as not present by cpuid */
5201 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5202 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5203 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5204 intel_arch_events_map[bit].name);
5208 static __init void intel_nehalem_quirk(void)
5210 union cpuid10_ebx ebx;
5212 ebx.full = x86_pmu.events_maskl;
5213 if (ebx.split.no_branch_misses_retired) {
5215 * Erratum AAJ80 detected, we work it around by using
5216 * the BR_MISP_EXEC.ANY event. This will over-count
5217 * branch-misses, but it's still much better than the
5218 * architectural event which is often completely bogus:
5220 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5221 ebx.split.no_branch_misses_retired = 0;
5222 x86_pmu.events_maskl = ebx.full;
5223 pr_info("CPU erratum AAJ80 worked around\n");
5228 * enable software workaround for errata:
5233 * Only needed when HT is enabled. However detecting
5234 * if HT is enabled is difficult (model specific). So instead,
5235 * we enable the workaround in the early boot, and verify if
5236 * it is needed in a later initcall phase once we have valid
5237 * topology information to check if HT is actually enabled
5239 static __init void intel_ht_bug(void)
5241 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5243 x86_pmu.start_scheduling = intel_start_scheduling;
5244 x86_pmu.commit_scheduling = intel_commit_scheduling;
5245 x86_pmu.stop_scheduling = intel_stop_scheduling;
5248 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5249 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5251 /* Haswell special events */
5252 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5253 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5254 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5255 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5256 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5257 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5258 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5259 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5260 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5261 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5262 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5263 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5265 static struct attribute *hsw_events_attrs[] = {
5266 EVENT_PTR(td_slots_issued),
5267 EVENT_PTR(td_slots_retired),
5268 EVENT_PTR(td_fetch_bubbles),
5269 EVENT_PTR(td_total_slots),
5270 EVENT_PTR(td_total_slots_scale),
5271 EVENT_PTR(td_recovery_bubbles),
5272 EVENT_PTR(td_recovery_bubbles_scale),
5276 static struct attribute *hsw_mem_events_attrs[] = {
5277 EVENT_PTR(mem_ld_hsw),
5278 EVENT_PTR(mem_st_hsw),
5282 static struct attribute *hsw_tsx_events_attrs[] = {
5283 EVENT_PTR(tx_start),
5284 EVENT_PTR(tx_commit),
5285 EVENT_PTR(tx_abort),
5286 EVENT_PTR(tx_capacity),
5287 EVENT_PTR(tx_conflict),
5288 EVENT_PTR(el_start),
5289 EVENT_PTR(el_commit),
5290 EVENT_PTR(el_abort),
5291 EVENT_PTR(el_capacity),
5292 EVENT_PTR(el_conflict),
5293 EVENT_PTR(cycles_t),
5294 EVENT_PTR(cycles_ct),
5298 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5299 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5300 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5301 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5303 static struct attribute *icl_events_attrs[] = {
5304 EVENT_PTR(mem_ld_hsw),
5305 EVENT_PTR(mem_st_hsw),
5309 static struct attribute *icl_td_events_attrs[] = {
5311 EVENT_PTR(td_retiring),
5312 EVENT_PTR(td_bad_spec),
5313 EVENT_PTR(td_fe_bound),
5314 EVENT_PTR(td_be_bound),
5318 static struct attribute *icl_tsx_events_attrs[] = {
5319 EVENT_PTR(tx_start),
5320 EVENT_PTR(tx_abort),
5321 EVENT_PTR(tx_commit),
5322 EVENT_PTR(tx_capacity_read),
5323 EVENT_PTR(tx_capacity_write),
5324 EVENT_PTR(tx_conflict),
5325 EVENT_PTR(el_start),
5326 EVENT_PTR(el_abort),
5327 EVENT_PTR(el_commit),
5328 EVENT_PTR(el_capacity_read),
5329 EVENT_PTR(el_capacity_write),
5330 EVENT_PTR(el_conflict),
5331 EVENT_PTR(cycles_t),
5332 EVENT_PTR(cycles_ct),
5337 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5338 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5340 static struct attribute *spr_events_attrs[] = {
5341 EVENT_PTR(mem_ld_hsw),
5342 EVENT_PTR(mem_st_spr),
5343 EVENT_PTR(mem_ld_aux),
5347 static struct attribute *spr_td_events_attrs[] = {
5349 EVENT_PTR(td_retiring),
5350 EVENT_PTR(td_bad_spec),
5351 EVENT_PTR(td_fe_bound),
5352 EVENT_PTR(td_be_bound),
5353 EVENT_PTR(td_heavy_ops),
5354 EVENT_PTR(td_br_mispredict),
5355 EVENT_PTR(td_fetch_lat),
5356 EVENT_PTR(td_mem_bound),
5360 static struct attribute *spr_tsx_events_attrs[] = {
5361 EVENT_PTR(tx_start),
5362 EVENT_PTR(tx_abort),
5363 EVENT_PTR(tx_commit),
5364 EVENT_PTR(tx_capacity_read),
5365 EVENT_PTR(tx_capacity_write),
5366 EVENT_PTR(tx_conflict),
5367 EVENT_PTR(cycles_t),
5368 EVENT_PTR(cycles_ct),
5372 static ssize_t freeze_on_smi_show(struct device *cdev,
5373 struct device_attribute *attr,
5376 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5379 static DEFINE_MUTEX(freeze_on_smi_mutex);
5381 static ssize_t freeze_on_smi_store(struct device *cdev,
5382 struct device_attribute *attr,
5383 const char *buf, size_t count)
5388 ret = kstrtoul(buf, 0, &val);
5395 mutex_lock(&freeze_on_smi_mutex);
5397 if (x86_pmu.attr_freeze_on_smi == val)
5400 x86_pmu.attr_freeze_on_smi = val;
5403 on_each_cpu(flip_smm_bit, &val, 1);
5406 mutex_unlock(&freeze_on_smi_mutex);
5411 static void update_tfa_sched(void *ignored)
5413 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5416 * check if PMC3 is used
5417 * and if so force schedule out for all event types all contexts
5419 if (test_bit(3, cpuc->active_mask))
5420 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5423 static ssize_t show_sysctl_tfa(struct device *cdev,
5424 struct device_attribute *attr,
5427 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5430 static ssize_t set_sysctl_tfa(struct device *cdev,
5431 struct device_attribute *attr,
5432 const char *buf, size_t count)
5437 ret = kstrtobool(buf, &val);
5442 if (val == allow_tsx_force_abort)
5445 allow_tsx_force_abort = val;
5448 on_each_cpu(update_tfa_sched, NULL, 1);
5455 static DEVICE_ATTR_RW(freeze_on_smi);
5457 static ssize_t branches_show(struct device *cdev,
5458 struct device_attribute *attr,
5461 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5464 static DEVICE_ATTR_RO(branches);
5466 static struct attribute *lbr_attrs[] = {
5467 &dev_attr_branches.attr,
5471 static char pmu_name_str[30];
5473 static ssize_t pmu_name_show(struct device *cdev,
5474 struct device_attribute *attr,
5477 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5480 static DEVICE_ATTR_RO(pmu_name);
5482 static struct attribute *intel_pmu_caps_attrs[] = {
5483 &dev_attr_pmu_name.attr,
5487 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5491 static struct attribute *intel_pmu_attrs[] = {
5492 &dev_attr_freeze_on_smi.attr,
5493 &dev_attr_allow_tsx_force_abort.attr,
5498 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5500 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5504 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5506 return x86_pmu.pebs ? attr->mode : 0;
5510 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5512 if (attr == &event_attr_mem_ld_aux.attr.attr)
5513 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
5515 return pebs_is_visible(kobj, attr, i);
5519 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5521 return x86_pmu.lbr_nr ? attr->mode : 0;
5525 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5527 return x86_pmu.version >= 2 ? attr->mode : 0;
5531 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5533 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5534 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5539 static struct attribute_group group_events_td = {
5543 static struct attribute_group group_events_mem = {
5545 .is_visible = mem_is_visible,
5548 static struct attribute_group group_events_tsx = {
5550 .is_visible = tsx_is_visible,
5553 static struct attribute_group group_caps_gen = {
5555 .attrs = intel_pmu_caps_attrs,
5558 static struct attribute_group group_caps_lbr = {
5561 .is_visible = lbr_is_visible,
5564 static struct attribute_group group_format_extra = {
5566 .is_visible = exra_is_visible,
5569 static struct attribute_group group_format_extra_skl = {
5571 .is_visible = exra_is_visible,
5574 static struct attribute_group group_default = {
5575 .attrs = intel_pmu_attrs,
5576 .is_visible = default_is_visible,
5579 static const struct attribute_group *attr_update[] = {
5585 &group_format_extra,
5586 &group_format_extra_skl,
5591 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5592 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5593 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5594 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5595 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5596 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5597 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5598 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5599 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5601 static struct attribute *adl_hybrid_events_attrs[] = {
5602 EVENT_PTR(slots_adl),
5603 EVENT_PTR(td_retiring_adl),
5604 EVENT_PTR(td_bad_spec_adl),
5605 EVENT_PTR(td_fe_bound_adl),
5606 EVENT_PTR(td_be_bound_adl),
5607 EVENT_PTR(td_heavy_ops_adl),
5608 EVENT_PTR(td_br_mis_adl),
5609 EVENT_PTR(td_fetch_lat_adl),
5610 EVENT_PTR(td_mem_bound_adl),
5614 /* Must be in IDX order */
5615 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5616 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5617 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5619 static struct attribute *adl_hybrid_mem_attrs[] = {
5620 EVENT_PTR(mem_ld_adl),
5621 EVENT_PTR(mem_st_adl),
5622 EVENT_PTR(mem_ld_aux_adl),
5626 static struct attribute *mtl_hybrid_mem_attrs[] = {
5627 EVENT_PTR(mem_ld_adl),
5628 EVENT_PTR(mem_st_adl),
5632 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5633 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5634 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5635 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5636 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5637 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5638 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5639 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5641 static struct attribute *adl_hybrid_tsx_attrs[] = {
5642 EVENT_PTR(tx_start_adl),
5643 EVENT_PTR(tx_abort_adl),
5644 EVENT_PTR(tx_commit_adl),
5645 EVENT_PTR(tx_capacity_read_adl),
5646 EVENT_PTR(tx_capacity_write_adl),
5647 EVENT_PTR(tx_conflict_adl),
5648 EVENT_PTR(cycles_t_adl),
5649 EVENT_PTR(cycles_ct_adl),
5653 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5654 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5655 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5656 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5657 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5659 #define ADL_HYBRID_RTM_FORMAT_ATTR \
5660 FORMAT_HYBRID_PTR(in_tx), \
5661 FORMAT_HYBRID_PTR(in_tx_cp)
5663 #define ADL_HYBRID_FORMAT_ATTR \
5664 FORMAT_HYBRID_PTR(offcore_rsp), \
5665 FORMAT_HYBRID_PTR(ldlat), \
5666 FORMAT_HYBRID_PTR(frontend)
5668 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5669 ADL_HYBRID_RTM_FORMAT_ATTR,
5670 ADL_HYBRID_FORMAT_ATTR,
5674 static struct attribute *adl_hybrid_extra_attr[] = {
5675 ADL_HYBRID_FORMAT_ATTR,
5679 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small);
5681 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
5682 ADL_HYBRID_RTM_FORMAT_ATTR,
5683 ADL_HYBRID_FORMAT_ATTR,
5684 FORMAT_HYBRID_PTR(snoop_rsp),
5688 static struct attribute *mtl_hybrid_extra_attr[] = {
5689 ADL_HYBRID_FORMAT_ATTR,
5690 FORMAT_HYBRID_PTR(snoop_rsp),
5694 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5696 struct device *dev = kobj_to_dev(kobj);
5697 struct x86_hybrid_pmu *pmu =
5698 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5699 struct perf_pmu_events_hybrid_attr *pmu_attr =
5700 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5702 return pmu->cpu_type & pmu_attr->pmu_type;
5705 static umode_t hybrid_events_is_visible(struct kobject *kobj,
5706 struct attribute *attr, int i)
5708 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5711 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5713 int cpu = cpumask_first(&pmu->supported_cpus);
5715 return (cpu >= nr_cpu_ids) ? -1 : cpu;
5718 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5719 struct attribute *attr, int i)
5721 struct device *dev = kobj_to_dev(kobj);
5722 struct x86_hybrid_pmu *pmu =
5723 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5724 int cpu = hybrid_find_supported_cpu(pmu);
5726 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5729 static umode_t hybrid_format_is_visible(struct kobject *kobj,
5730 struct attribute *attr, int i)
5732 struct device *dev = kobj_to_dev(kobj);
5733 struct x86_hybrid_pmu *pmu =
5734 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5735 struct perf_pmu_format_hybrid_attr *pmu_attr =
5736 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5737 int cpu = hybrid_find_supported_cpu(pmu);
5739 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5742 static struct attribute_group hybrid_group_events_td = {
5744 .is_visible = hybrid_events_is_visible,
5747 static struct attribute_group hybrid_group_events_mem = {
5749 .is_visible = hybrid_events_is_visible,
5752 static struct attribute_group hybrid_group_events_tsx = {
5754 .is_visible = hybrid_tsx_is_visible,
5757 static struct attribute_group hybrid_group_format_extra = {
5759 .is_visible = hybrid_format_is_visible,
5762 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5763 struct device_attribute *attr,
5766 struct x86_hybrid_pmu *pmu =
5767 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5769 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5772 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5773 static struct attribute *intel_hybrid_cpus_attrs[] = {
5774 &dev_attr_cpus.attr,
5778 static struct attribute_group hybrid_group_cpus = {
5779 .attrs = intel_hybrid_cpus_attrs,
5782 static const struct attribute_group *hybrid_attr_update[] = {
5783 &hybrid_group_events_td,
5784 &hybrid_group_events_mem,
5785 &hybrid_group_events_tsx,
5788 &hybrid_group_format_extra,
5794 static struct attribute *empty_attrs;
5796 static void intel_pmu_check_num_counters(int *num_counters,
5797 int *num_counters_fixed,
5798 u64 *intel_ctrl, u64 fixed_mask)
5800 if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5801 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5802 *num_counters, INTEL_PMC_MAX_GENERIC);
5803 *num_counters = INTEL_PMC_MAX_GENERIC;
5805 *intel_ctrl = (1ULL << *num_counters) - 1;
5807 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5808 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5809 *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5810 *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5813 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5816 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5818 int num_counters_fixed,
5821 struct event_constraint *c;
5823 if (!event_constraints)
5827 * event on fixed counter2 (REF_CYCLES) only works on this
5828 * counter, so do not extend mask to generic counters
5830 for_each_event_constraint(c, event_constraints) {
5832 * Don't extend the topdown slots and metrics
5833 * events to the generic counters.
5835 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5837 * Disable topdown slots and metrics events,
5838 * if slots event is not in CPUID.
5840 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
5842 c->weight = hweight64(c->idxmsk64);
5846 if (c->cmask == FIXED_EVENT_FLAGS) {
5847 /* Disabled fixed counters which are not in CPUID */
5848 c->idxmsk64 &= intel_ctrl;
5851 * Don't extend the pseudo-encoding to the
5854 if (!use_fixed_pseudo_encoding(c->code))
5855 c->idxmsk64 |= (1ULL << num_counters) - 1;
5858 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
5859 c->weight = hweight64(c->idxmsk64);
5863 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
5865 struct extra_reg *er;
5868 * Access extra MSR may cause #GP under certain circumstances.
5869 * E.g. KVM doesn't support offcore event
5870 * Check all extra_regs here.
5875 for (er = extra_regs; er->msr; er++) {
5876 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5877 /* Disable LBR select mapping */
5878 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5879 x86_pmu.lbr_sel_map = NULL;
5883 static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
5885 struct x86_hybrid_pmu *pmu;
5888 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5889 pmu = &x86_pmu.hybrid_pmu[i];
5891 intel_pmu_check_num_counters(&pmu->num_counters,
5892 &pmu->num_counters_fixed,
5896 if (pmu->intel_cap.perf_metrics) {
5897 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5898 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
5901 if (pmu->intel_cap.pebs_output_pt_available)
5902 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
5904 intel_pmu_check_event_constraints(pmu->event_constraints,
5906 pmu->num_counters_fixed,
5909 intel_pmu_check_extra_regs(pmu->extra_regs);
5913 static __always_inline bool is_mtl(u8 x86_model)
5915 return (x86_model == INTEL_FAM6_METEORLAKE) ||
5916 (x86_model == INTEL_FAM6_METEORLAKE_L);
5919 __init int intel_pmu_init(void)
5921 struct attribute **extra_skl_attr = &empty_attrs;
5922 struct attribute **extra_attr = &empty_attrs;
5923 struct attribute **td_attr = &empty_attrs;
5924 struct attribute **mem_attr = &empty_attrs;
5925 struct attribute **tsx_attr = &empty_attrs;
5926 union cpuid10_edx edx;
5927 union cpuid10_eax eax;
5928 union cpuid10_ebx ebx;
5929 unsigned int fixed_mask;
5933 struct x86_hybrid_pmu *pmu;
5935 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
5936 switch (boot_cpu_data.x86) {
5938 return p6_pmu_init();
5940 return knc_pmu_init();
5942 return p4_pmu_init();
5948 * Check whether the Architectural PerfMon supports
5949 * Branch Misses Retired hw_event or not.
5951 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
5952 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
5955 version = eax.split.version_id;
5959 x86_pmu = intel_pmu;
5961 x86_pmu.version = version;
5962 x86_pmu.num_counters = eax.split.num_counters;
5963 x86_pmu.cntval_bits = eax.split.bit_width;
5964 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
5966 x86_pmu.events_maskl = ebx.full;
5967 x86_pmu.events_mask_len = eax.split.mask_length;
5969 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
5970 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
5973 * Quirk: v2 perfmon does not report fixed-purpose events, so
5974 * assume at least 3 events, when not running in a hypervisor:
5976 if (version > 1 && version < 5) {
5977 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
5979 x86_pmu.num_counters_fixed =
5980 max((int)edx.split.num_counters_fixed, assume);
5982 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
5983 } else if (version >= 5)
5984 x86_pmu.num_counters_fixed = fls(fixed_mask);
5986 if (boot_cpu_has(X86_FEATURE_PDCM)) {
5989 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
5990 x86_pmu.intel_cap.capabilities = capabilities;
5993 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
5994 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
5995 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
5998 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
5999 intel_pmu_arch_lbr_init();
6003 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6006 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6007 if (x86_pmu.intel_cap.anythread_deprecated)
6008 pr_cont(" AnyThread deprecated, ");
6012 * Install the hw-cache-events table:
6014 switch (boot_cpu_data.x86_model) {
6015 case INTEL_FAM6_CORE_YONAH:
6016 pr_cont("Core events, ");
6020 case INTEL_FAM6_CORE2_MEROM:
6021 x86_add_quirk(intel_clovertown_quirk);
6024 case INTEL_FAM6_CORE2_MEROM_L:
6025 case INTEL_FAM6_CORE2_PENRYN:
6026 case INTEL_FAM6_CORE2_DUNNINGTON:
6027 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6028 sizeof(hw_cache_event_ids));
6030 intel_pmu_lbr_init_core();
6032 x86_pmu.event_constraints = intel_core2_event_constraints;
6033 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6034 pr_cont("Core2 events, ");
6038 case INTEL_FAM6_NEHALEM:
6039 case INTEL_FAM6_NEHALEM_EP:
6040 case INTEL_FAM6_NEHALEM_EX:
6041 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6042 sizeof(hw_cache_event_ids));
6043 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6044 sizeof(hw_cache_extra_regs));
6046 intel_pmu_lbr_init_nhm();
6048 x86_pmu.event_constraints = intel_nehalem_event_constraints;
6049 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6050 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6051 x86_pmu.extra_regs = intel_nehalem_extra_regs;
6052 x86_pmu.limit_period = nhm_limit_period;
6054 mem_attr = nhm_mem_events_attrs;
6056 /* UOPS_ISSUED.STALLED_CYCLES */
6057 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6058 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6059 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6060 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6061 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6063 intel_pmu_pebs_data_source_nhm();
6064 x86_add_quirk(intel_nehalem_quirk);
6065 x86_pmu.pebs_no_tlb = 1;
6066 extra_attr = nhm_format_attr;
6068 pr_cont("Nehalem events, ");
6072 case INTEL_FAM6_ATOM_BONNELL:
6073 case INTEL_FAM6_ATOM_BONNELL_MID:
6074 case INTEL_FAM6_ATOM_SALTWELL:
6075 case INTEL_FAM6_ATOM_SALTWELL_MID:
6076 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
6077 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6078 sizeof(hw_cache_event_ids));
6080 intel_pmu_lbr_init_atom();
6082 x86_pmu.event_constraints = intel_gen_event_constraints;
6083 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6084 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6085 pr_cont("Atom events, ");
6089 case INTEL_FAM6_ATOM_SILVERMONT:
6090 case INTEL_FAM6_ATOM_SILVERMONT_D:
6091 case INTEL_FAM6_ATOM_SILVERMONT_MID:
6092 case INTEL_FAM6_ATOM_AIRMONT:
6093 case INTEL_FAM6_ATOM_AIRMONT_MID:
6094 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6095 sizeof(hw_cache_event_ids));
6096 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6097 sizeof(hw_cache_extra_regs));
6099 intel_pmu_lbr_init_slm();
6101 x86_pmu.event_constraints = intel_slm_event_constraints;
6102 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6103 x86_pmu.extra_regs = intel_slm_extra_regs;
6104 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6105 td_attr = slm_events_attrs;
6106 extra_attr = slm_format_attr;
6107 pr_cont("Silvermont events, ");
6108 name = "silvermont";
6111 case INTEL_FAM6_ATOM_GOLDMONT:
6112 case INTEL_FAM6_ATOM_GOLDMONT_D:
6113 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6114 sizeof(hw_cache_event_ids));
6115 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6116 sizeof(hw_cache_extra_regs));
6118 intel_pmu_lbr_init_skl();
6120 x86_pmu.event_constraints = intel_slm_event_constraints;
6121 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6122 x86_pmu.extra_regs = intel_glm_extra_regs;
6124 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6125 * for precise cycles.
6126 * :pp is identical to :ppp
6128 x86_pmu.pebs_aliases = NULL;
6129 x86_pmu.pebs_prec_dist = true;
6130 x86_pmu.lbr_pt_coexist = true;
6131 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6132 td_attr = glm_events_attrs;
6133 extra_attr = slm_format_attr;
6134 pr_cont("Goldmont events, ");
6138 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
6139 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6140 sizeof(hw_cache_event_ids));
6141 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6142 sizeof(hw_cache_extra_regs));
6144 intel_pmu_lbr_init_skl();
6146 x86_pmu.event_constraints = intel_slm_event_constraints;
6147 x86_pmu.extra_regs = intel_glm_extra_regs;
6149 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6150 * for precise cycles.
6152 x86_pmu.pebs_aliases = NULL;
6153 x86_pmu.pebs_prec_dist = true;
6154 x86_pmu.lbr_pt_coexist = true;
6155 x86_pmu.pebs_capable = ~0ULL;
6156 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6157 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6158 x86_pmu.get_event_constraints = glp_get_event_constraints;
6159 td_attr = glm_events_attrs;
6160 /* Goldmont Plus has 4-wide pipeline */
6161 event_attr_td_total_slots_scale_glm.event_str = "4";
6162 extra_attr = slm_format_attr;
6163 pr_cont("Goldmont plus events, ");
6164 name = "goldmont_plus";
6167 case INTEL_FAM6_ATOM_TREMONT_D:
6168 case INTEL_FAM6_ATOM_TREMONT:
6169 case INTEL_FAM6_ATOM_TREMONT_L:
6170 x86_pmu.late_ack = true;
6171 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6172 sizeof(hw_cache_event_ids));
6173 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6174 sizeof(hw_cache_extra_regs));
6175 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6177 intel_pmu_lbr_init_skl();
6179 x86_pmu.event_constraints = intel_slm_event_constraints;
6180 x86_pmu.extra_regs = intel_tnt_extra_regs;
6182 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6183 * for precise cycles.
6185 x86_pmu.pebs_aliases = NULL;
6186 x86_pmu.pebs_prec_dist = true;
6187 x86_pmu.lbr_pt_coexist = true;
6188 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6189 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6190 td_attr = tnt_events_attrs;
6191 extra_attr = slm_format_attr;
6192 pr_cont("Tremont events, ");
6196 case INTEL_FAM6_ATOM_GRACEMONT:
6197 x86_pmu.mid_ack = true;
6198 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6199 sizeof(hw_cache_event_ids));
6200 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6201 sizeof(hw_cache_extra_regs));
6202 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6204 x86_pmu.event_constraints = intel_slm_event_constraints;
6205 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
6206 x86_pmu.extra_regs = intel_grt_extra_regs;
6208 x86_pmu.pebs_aliases = NULL;
6209 x86_pmu.pebs_prec_dist = true;
6210 x86_pmu.pebs_block = true;
6211 x86_pmu.lbr_pt_coexist = true;
6212 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6213 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6215 intel_pmu_pebs_data_source_grt();
6216 x86_pmu.pebs_latency_data = adl_latency_data_small;
6217 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6218 x86_pmu.limit_period = spr_limit_period;
6219 td_attr = tnt_events_attrs;
6220 mem_attr = grt_mem_attrs;
6221 extra_attr = nhm_format_attr;
6222 pr_cont("Gracemont events, ");
6226 case INTEL_FAM6_ATOM_CRESTMONT:
6227 case INTEL_FAM6_ATOM_CRESTMONT_X:
6228 x86_pmu.mid_ack = true;
6229 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6230 sizeof(hw_cache_event_ids));
6231 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6232 sizeof(hw_cache_extra_regs));
6233 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6235 x86_pmu.event_constraints = intel_slm_event_constraints;
6236 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
6237 x86_pmu.extra_regs = intel_cmt_extra_regs;
6239 x86_pmu.pebs_aliases = NULL;
6240 x86_pmu.pebs_prec_dist = true;
6241 x86_pmu.lbr_pt_coexist = true;
6242 x86_pmu.pebs_block = true;
6243 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6244 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6246 intel_pmu_pebs_data_source_cmt();
6247 x86_pmu.pebs_latency_data = mtl_latency_data_small;
6248 x86_pmu.get_event_constraints = cmt_get_event_constraints;
6249 x86_pmu.limit_period = spr_limit_period;
6250 td_attr = cmt_events_attrs;
6251 mem_attr = grt_mem_attrs;
6252 extra_attr = cmt_format_attr;
6253 pr_cont("Crestmont events, ");
6257 case INTEL_FAM6_WESTMERE:
6258 case INTEL_FAM6_WESTMERE_EP:
6259 case INTEL_FAM6_WESTMERE_EX:
6260 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6261 sizeof(hw_cache_event_ids));
6262 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6263 sizeof(hw_cache_extra_regs));
6265 intel_pmu_lbr_init_nhm();
6267 x86_pmu.event_constraints = intel_westmere_event_constraints;
6268 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6269 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6270 x86_pmu.extra_regs = intel_westmere_extra_regs;
6271 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6273 mem_attr = nhm_mem_events_attrs;
6275 /* UOPS_ISSUED.STALLED_CYCLES */
6276 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6277 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6278 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6279 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6280 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6282 intel_pmu_pebs_data_source_nhm();
6283 extra_attr = nhm_format_attr;
6284 pr_cont("Westmere events, ");
6288 case INTEL_FAM6_SANDYBRIDGE:
6289 case INTEL_FAM6_SANDYBRIDGE_X:
6290 x86_add_quirk(intel_sandybridge_quirk);
6291 x86_add_quirk(intel_ht_bug);
6292 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6293 sizeof(hw_cache_event_ids));
6294 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6295 sizeof(hw_cache_extra_regs));
6297 intel_pmu_lbr_init_snb();
6299 x86_pmu.event_constraints = intel_snb_event_constraints;
6300 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6301 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6302 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
6303 x86_pmu.extra_regs = intel_snbep_extra_regs;
6305 x86_pmu.extra_regs = intel_snb_extra_regs;
6308 /* all extra regs are per-cpu when HT is on */
6309 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6310 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6312 td_attr = snb_events_attrs;
6313 mem_attr = snb_mem_events_attrs;
6315 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6316 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6317 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6318 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6319 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6320 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6322 extra_attr = nhm_format_attr;
6324 pr_cont("SandyBridge events, ");
6325 name = "sandybridge";
6328 case INTEL_FAM6_IVYBRIDGE:
6329 case INTEL_FAM6_IVYBRIDGE_X:
6330 x86_add_quirk(intel_ht_bug);
6331 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6332 sizeof(hw_cache_event_ids));
6333 /* dTLB-load-misses on IVB is different than SNB */
6334 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6336 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6337 sizeof(hw_cache_extra_regs));
6339 intel_pmu_lbr_init_snb();
6341 x86_pmu.event_constraints = intel_ivb_event_constraints;
6342 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6343 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6344 x86_pmu.pebs_prec_dist = true;
6345 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
6346 x86_pmu.extra_regs = intel_snbep_extra_regs;
6348 x86_pmu.extra_regs = intel_snb_extra_regs;
6349 /* all extra regs are per-cpu when HT is on */
6350 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6351 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6353 td_attr = snb_events_attrs;
6354 mem_attr = snb_mem_events_attrs;
6356 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6357 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6358 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6360 extra_attr = nhm_format_attr;
6362 pr_cont("IvyBridge events, ");
6367 case INTEL_FAM6_HASWELL:
6368 case INTEL_FAM6_HASWELL_X:
6369 case INTEL_FAM6_HASWELL_L:
6370 case INTEL_FAM6_HASWELL_G:
6371 x86_add_quirk(intel_ht_bug);
6372 x86_add_quirk(intel_pebs_isolation_quirk);
6373 x86_pmu.late_ack = true;
6374 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6375 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6377 intel_pmu_lbr_init_hsw();
6379 x86_pmu.event_constraints = intel_hsw_event_constraints;
6380 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6381 x86_pmu.extra_regs = intel_snbep_extra_regs;
6382 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6383 x86_pmu.pebs_prec_dist = true;
6384 /* all extra regs are per-cpu when HT is on */
6385 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6386 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6388 x86_pmu.hw_config = hsw_hw_config;
6389 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6390 x86_pmu.lbr_double_abort = true;
6391 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6392 hsw_format_attr : nhm_format_attr;
6393 td_attr = hsw_events_attrs;
6394 mem_attr = hsw_mem_events_attrs;
6395 tsx_attr = hsw_tsx_events_attrs;
6396 pr_cont("Haswell events, ");
6400 case INTEL_FAM6_BROADWELL:
6401 case INTEL_FAM6_BROADWELL_D:
6402 case INTEL_FAM6_BROADWELL_G:
6403 case INTEL_FAM6_BROADWELL_X:
6404 x86_add_quirk(intel_pebs_isolation_quirk);
6405 x86_pmu.late_ack = true;
6406 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6407 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6409 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6410 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6411 BDW_L3_MISS|HSW_SNOOP_DRAM;
6412 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6414 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6415 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6416 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6417 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6419 intel_pmu_lbr_init_hsw();
6421 x86_pmu.event_constraints = intel_bdw_event_constraints;
6422 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6423 x86_pmu.extra_regs = intel_snbep_extra_regs;
6424 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6425 x86_pmu.pebs_prec_dist = true;
6426 /* all extra regs are per-cpu when HT is on */
6427 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6428 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6430 x86_pmu.hw_config = hsw_hw_config;
6431 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6432 x86_pmu.limit_period = bdw_limit_period;
6433 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6434 hsw_format_attr : nhm_format_attr;
6435 td_attr = hsw_events_attrs;
6436 mem_attr = hsw_mem_events_attrs;
6437 tsx_attr = hsw_tsx_events_attrs;
6438 pr_cont("Broadwell events, ");
6442 case INTEL_FAM6_XEON_PHI_KNL:
6443 case INTEL_FAM6_XEON_PHI_KNM:
6444 memcpy(hw_cache_event_ids,
6445 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6446 memcpy(hw_cache_extra_regs,
6447 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6448 intel_pmu_lbr_init_knl();
6450 x86_pmu.event_constraints = intel_slm_event_constraints;
6451 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6452 x86_pmu.extra_regs = intel_knl_extra_regs;
6454 /* all extra regs are per-cpu when HT is on */
6455 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6456 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6457 extra_attr = slm_format_attr;
6458 pr_cont("Knights Landing/Mill events, ");
6459 name = "knights-landing";
6462 case INTEL_FAM6_SKYLAKE_X:
6465 case INTEL_FAM6_SKYLAKE_L:
6466 case INTEL_FAM6_SKYLAKE:
6467 case INTEL_FAM6_KABYLAKE_L:
6468 case INTEL_FAM6_KABYLAKE:
6469 case INTEL_FAM6_COMETLAKE_L:
6470 case INTEL_FAM6_COMETLAKE:
6471 x86_add_quirk(intel_pebs_isolation_quirk);
6472 x86_pmu.late_ack = true;
6473 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6474 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6475 intel_pmu_lbr_init_skl();
6477 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6478 event_attr_td_recovery_bubbles.event_str_noht =
6479 "event=0xd,umask=0x1,cmask=1";
6480 event_attr_td_recovery_bubbles.event_str_ht =
6481 "event=0xd,umask=0x1,cmask=1,any=1";
6483 x86_pmu.event_constraints = intel_skl_event_constraints;
6484 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6485 x86_pmu.extra_regs = intel_skl_extra_regs;
6486 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6487 x86_pmu.pebs_prec_dist = true;
6488 /* all extra regs are per-cpu when HT is on */
6489 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6490 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6492 x86_pmu.hw_config = hsw_hw_config;
6493 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6494 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6495 hsw_format_attr : nhm_format_attr;
6496 extra_skl_attr = skl_format_attr;
6497 td_attr = hsw_events_attrs;
6498 mem_attr = hsw_mem_events_attrs;
6499 tsx_attr = hsw_tsx_events_attrs;
6500 intel_pmu_pebs_data_source_skl(pmem);
6503 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6504 * TSX force abort hooks are not required on these systems. Only deploy
6505 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6507 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6508 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6509 x86_pmu.flags |= PMU_FL_TFA;
6510 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6511 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6512 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6515 pr_cont("Skylake events, ");
6519 case INTEL_FAM6_ICELAKE_X:
6520 case INTEL_FAM6_ICELAKE_D:
6521 x86_pmu.pebs_ept = 1;
6524 case INTEL_FAM6_ICELAKE_L:
6525 case INTEL_FAM6_ICELAKE:
6526 case INTEL_FAM6_TIGERLAKE_L:
6527 case INTEL_FAM6_TIGERLAKE:
6528 case INTEL_FAM6_ROCKETLAKE:
6529 x86_pmu.late_ack = true;
6530 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6531 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6532 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6533 intel_pmu_lbr_init_skl();
6535 x86_pmu.event_constraints = intel_icl_event_constraints;
6536 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6537 x86_pmu.extra_regs = intel_icl_extra_regs;
6538 x86_pmu.pebs_aliases = NULL;
6539 x86_pmu.pebs_prec_dist = true;
6540 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6541 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6543 x86_pmu.hw_config = hsw_hw_config;
6544 x86_pmu.get_event_constraints = icl_get_event_constraints;
6545 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6546 hsw_format_attr : nhm_format_attr;
6547 extra_skl_attr = skl_format_attr;
6548 mem_attr = icl_events_attrs;
6549 td_attr = icl_td_events_attrs;
6550 tsx_attr = icl_tsx_events_attrs;
6551 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6552 x86_pmu.lbr_pt_coexist = true;
6553 intel_pmu_pebs_data_source_skl(pmem);
6554 x86_pmu.num_topdown_events = 4;
6555 static_call_update(intel_pmu_update_topdown_event,
6556 &icl_update_topdown_event);
6557 static_call_update(intel_pmu_set_topdown_event_period,
6558 &icl_set_topdown_event_period);
6559 pr_cont("Icelake events, ");
6563 case INTEL_FAM6_SAPPHIRERAPIDS_X:
6564 case INTEL_FAM6_EMERALDRAPIDS_X:
6565 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6566 x86_pmu.extra_regs = intel_spr_extra_regs;
6568 case INTEL_FAM6_GRANITERAPIDS_X:
6569 case INTEL_FAM6_GRANITERAPIDS_D:
6571 x86_pmu.late_ack = true;
6572 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6573 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6575 x86_pmu.event_constraints = intel_spr_event_constraints;
6576 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
6577 if (!x86_pmu.extra_regs)
6578 x86_pmu.extra_regs = intel_gnr_extra_regs;
6579 x86_pmu.limit_period = spr_limit_period;
6580 x86_pmu.pebs_ept = 1;
6581 x86_pmu.pebs_aliases = NULL;
6582 x86_pmu.pebs_prec_dist = true;
6583 x86_pmu.pebs_block = true;
6584 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6585 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6586 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6588 x86_pmu.hw_config = hsw_hw_config;
6589 x86_pmu.get_event_constraints = spr_get_event_constraints;
6590 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6591 hsw_format_attr : nhm_format_attr;
6592 extra_skl_attr = skl_format_attr;
6593 mem_attr = spr_events_attrs;
6594 td_attr = spr_td_events_attrs;
6595 tsx_attr = spr_tsx_events_attrs;
6596 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6597 x86_pmu.lbr_pt_coexist = true;
6598 intel_pmu_pebs_data_source_skl(pmem);
6599 x86_pmu.num_topdown_events = 8;
6600 static_call_update(intel_pmu_update_topdown_event,
6601 &icl_update_topdown_event);
6602 static_call_update(intel_pmu_set_topdown_event_period,
6603 &icl_set_topdown_event_period);
6604 pr_cont("Sapphire Rapids events, ");
6605 name = "sapphire_rapids";
6608 case INTEL_FAM6_ALDERLAKE:
6609 case INTEL_FAM6_ALDERLAKE_L:
6610 case INTEL_FAM6_RAPTORLAKE:
6611 case INTEL_FAM6_RAPTORLAKE_P:
6612 case INTEL_FAM6_RAPTORLAKE_S:
6613 case INTEL_FAM6_METEORLAKE:
6614 case INTEL_FAM6_METEORLAKE_L:
6616 * Alder Lake has 2 types of CPU, core and atom.
6618 * Initialize the common PerfMon capabilities here.
6620 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
6621 sizeof(struct x86_hybrid_pmu),
6623 if (!x86_pmu.hybrid_pmu)
6625 static_branch_enable(&perf_is_hybrid);
6626 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
6628 x86_pmu.pebs_aliases = NULL;
6629 x86_pmu.pebs_prec_dist = true;
6630 x86_pmu.pebs_block = true;
6631 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6632 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6633 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6634 x86_pmu.lbr_pt_coexist = true;
6635 x86_pmu.pebs_latency_data = adl_latency_data_small;
6636 x86_pmu.num_topdown_events = 8;
6637 static_call_update(intel_pmu_update_topdown_event,
6638 &adl_update_topdown_event);
6639 static_call_update(intel_pmu_set_topdown_event_period,
6640 &adl_set_topdown_event_period);
6642 x86_pmu.filter = intel_pmu_filter;
6643 x86_pmu.get_event_constraints = adl_get_event_constraints;
6644 x86_pmu.hw_config = adl_hw_config;
6645 x86_pmu.limit_period = spr_limit_period;
6646 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6648 * The rtm_abort_event is used to check whether to enable GPRs
6649 * for the RTM abort event. Atom doesn't have the RTM abort
6650 * event. There is no harmful to set it in the common
6651 * x86_pmu.rtm_abort_event.
6653 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6655 td_attr = adl_hybrid_events_attrs;
6656 mem_attr = adl_hybrid_mem_attrs;
6657 tsx_attr = adl_hybrid_tsx_attrs;
6658 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6659 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6661 /* Initialize big core specific PerfMon capabilities.*/
6662 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6663 pmu->name = "cpu_core";
6664 pmu->cpu_type = hybrid_big;
6665 pmu->late_ack = true;
6666 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6667 pmu->num_counters = x86_pmu.num_counters + 2;
6668 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6670 pmu->num_counters = x86_pmu.num_counters;
6671 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6675 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
6676 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
6677 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
6678 * mistakenly add extra counters for P-cores. Correct the number of
6681 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
6682 pmu->num_counters = x86_pmu.num_counters;
6683 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6686 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6687 pmu->unconstrained = (struct event_constraint)
6688 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6689 0, pmu->num_counters, 0, 0);
6690 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6691 pmu->intel_cap.perf_metrics = 1;
6692 pmu->intel_cap.pebs_output_pt_available = 0;
6694 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6695 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6696 pmu->event_constraints = intel_spr_event_constraints;
6697 pmu->pebs_constraints = intel_spr_pebs_event_constraints;
6698 pmu->extra_regs = intel_spr_extra_regs;
6700 /* Initialize Atom core specific PerfMon capabilities.*/
6701 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6702 pmu->name = "cpu_atom";
6703 pmu->cpu_type = hybrid_small;
6704 pmu->mid_ack = true;
6705 pmu->num_counters = x86_pmu.num_counters;
6706 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6707 pmu->max_pebs_events = x86_pmu.max_pebs_events;
6708 pmu->unconstrained = (struct event_constraint)
6709 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6710 0, pmu->num_counters, 0, 0);
6711 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6712 pmu->intel_cap.perf_metrics = 0;
6713 pmu->intel_cap.pebs_output_pt_available = 1;
6715 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6716 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6717 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6718 pmu->event_constraints = intel_slm_event_constraints;
6719 pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6720 pmu->extra_regs = intel_grt_extra_regs;
6721 if (is_mtl(boot_cpu_data.x86_model)) {
6722 x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs;
6723 x86_pmu.pebs_latency_data = mtl_latency_data_small;
6724 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6725 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
6726 mem_attr = mtl_hybrid_mem_attrs;
6727 intel_pmu_pebs_data_source_mtl();
6728 x86_pmu.get_event_constraints = mtl_get_event_constraints;
6729 pmu->extra_regs = intel_cmt_extra_regs;
6730 pr_cont("Meteorlake Hybrid events, ");
6731 name = "meteorlake_hybrid";
6733 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6734 intel_pmu_pebs_data_source_adl();
6735 pr_cont("Alderlake Hybrid events, ");
6736 name = "alderlake_hybrid";
6741 switch (x86_pmu.version) {
6743 x86_pmu.event_constraints = intel_v1_event_constraints;
6744 pr_cont("generic architected perfmon v1, ");
6745 name = "generic_arch_v1";
6751 * default constraints for v2 and up
6753 x86_pmu.event_constraints = intel_gen_event_constraints;
6754 pr_cont("generic architected perfmon, ");
6755 name = "generic_arch_v2+";
6759 * The default constraints for v5 and up can support up to
6760 * 16 fixed counters. For the fixed counters 4 and later,
6761 * the pseudo-encoding is applied.
6762 * The constraints may be cut according to the CPUID enumeration
6763 * by inserting the EVENT_CONSTRAINT_END.
6765 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
6766 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
6767 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
6768 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
6769 pr_cont("generic architected perfmon, ");
6770 name = "generic_arch_v5+";
6775 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6778 group_events_td.attrs = td_attr;
6779 group_events_mem.attrs = mem_attr;
6780 group_events_tsx.attrs = tsx_attr;
6781 group_format_extra.attrs = extra_attr;
6782 group_format_extra_skl.attrs = extra_skl_attr;
6784 x86_pmu.attr_update = attr_update;
6786 hybrid_group_events_td.attrs = td_attr;
6787 hybrid_group_events_mem.attrs = mem_attr;
6788 hybrid_group_events_tsx.attrs = tsx_attr;
6789 hybrid_group_format_extra.attrs = extra_attr;
6791 x86_pmu.attr_update = hybrid_attr_update;
6794 intel_pmu_check_num_counters(&x86_pmu.num_counters,
6795 &x86_pmu.num_counters_fixed,
6796 &x86_pmu.intel_ctrl,
6799 /* AnyThread may be deprecated on arch perfmon v5 or later */
6800 if (x86_pmu.intel_cap.anythread_deprecated)
6801 x86_pmu.format_attrs = intel_arch_formats_attr;
6803 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6804 x86_pmu.num_counters,
6805 x86_pmu.num_counters_fixed,
6806 x86_pmu.intel_ctrl);
6808 * Access LBR MSR may cause #GP under certain circumstances.
6809 * Check all LBR MSR here.
6810 * Disable LBR access if any LBR MSRs can not be accessed.
6812 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6814 for (i = 0; i < x86_pmu.lbr_nr; i++) {
6815 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6816 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6820 if (x86_pmu.lbr_nr) {
6821 intel_pmu_lbr_init();
6823 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6825 /* only support branch_stack snapshot for perfmon >= v2 */
6826 if (x86_pmu.disable_all == intel_pmu_disable_all) {
6827 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
6828 static_call_update(perf_snapshot_branch_stack,
6829 intel_pmu_snapshot_arch_branch_stack);
6831 static_call_update(perf_snapshot_branch_stack,
6832 intel_pmu_snapshot_branch_stack);
6837 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6839 /* Support full width counters using alternative MSR range */
6840 if (x86_pmu.intel_cap.full_width_write) {
6841 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6842 x86_pmu.perfctr = MSR_IA32_PMC0;
6843 pr_cont("full-width counters, ");
6846 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6847 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6850 intel_pmu_check_hybrid_pmus((u64)fixed_mask);
6852 if (x86_pmu.intel_cap.pebs_timing_info)
6853 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
6855 intel_aux_output_init();
6861 * HT bug: phase 2 init
6862 * Called once we have valid topology information to check
6863 * whether or not HT is enabled
6864 * If HT is off, then we disable the workaround
6866 static __init int fixup_ht_bug(void)
6870 * problem not present on this CPU model, nothing to do
6872 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
6875 if (topology_max_smt_threads() > 1) {
6876 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
6882 hardlockup_detector_perf_stop();
6884 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
6886 x86_pmu.start_scheduling = NULL;
6887 x86_pmu.commit_scheduling = NULL;
6888 x86_pmu.stop_scheduling = NULL;
6890 hardlockup_detector_perf_restart();
6892 for_each_online_cpu(c)
6893 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
6896 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
6899 subsys_initcall(fixup_ht_bug)