4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
27 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints[] __read_mostly =
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
160 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
177 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
178 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
179 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
180 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
181 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
185 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
186 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
187 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
188 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
189 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
193 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
194 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
195 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
197 struct attribute *nhm_events_attrs[] = {
198 EVENT_PTR(mem_ld_nhm),
202 struct attribute *snb_events_attrs[] = {
203 EVENT_PTR(mem_ld_snb),
204 EVENT_PTR(mem_st_snb),
208 static struct event_constraint intel_hsw_event_constraints[] = {
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
212 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
213 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
214 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
215 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
216 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
217 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
218 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
219 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
220 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
224 static u64 intel_pmu_event_map(int hw_event)
226 return intel_perfmon_event_map[hw_event];
229 #define SNB_DMND_DATA_RD (1ULL << 0)
230 #define SNB_DMND_RFO (1ULL << 1)
231 #define SNB_DMND_IFETCH (1ULL << 2)
232 #define SNB_DMND_WB (1ULL << 3)
233 #define SNB_PF_DATA_RD (1ULL << 4)
234 #define SNB_PF_RFO (1ULL << 5)
235 #define SNB_PF_IFETCH (1ULL << 6)
236 #define SNB_LLC_DATA_RD (1ULL << 7)
237 #define SNB_LLC_RFO (1ULL << 8)
238 #define SNB_LLC_IFETCH (1ULL << 9)
239 #define SNB_BUS_LOCKS (1ULL << 10)
240 #define SNB_STRM_ST (1ULL << 11)
241 #define SNB_OTHER (1ULL << 15)
242 #define SNB_RESP_ANY (1ULL << 16)
243 #define SNB_NO_SUPP (1ULL << 17)
244 #define SNB_LLC_HITM (1ULL << 18)
245 #define SNB_LLC_HITE (1ULL << 19)
246 #define SNB_LLC_HITS (1ULL << 20)
247 #define SNB_LLC_HITF (1ULL << 21)
248 #define SNB_LOCAL (1ULL << 22)
249 #define SNB_REMOTE (0xffULL << 23)
250 #define SNB_SNP_NONE (1ULL << 31)
251 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
252 #define SNB_SNP_MISS (1ULL << 33)
253 #define SNB_NO_FWD (1ULL << 34)
254 #define SNB_SNP_FWD (1ULL << 35)
255 #define SNB_HITM (1ULL << 36)
256 #define SNB_NON_DRAM (1ULL << 37)
258 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
259 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
260 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
262 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
263 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
266 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
267 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
269 #define SNB_L3_ACCESS SNB_RESP_ANY
270 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
272 static __initconst const u64 snb_hw_cache_extra_regs
273 [PERF_COUNT_HW_CACHE_MAX]
274 [PERF_COUNT_HW_CACHE_OP_MAX]
275 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
279 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
280 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
283 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
284 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
286 [ C(OP_PREFETCH) ] = {
287 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
288 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
293 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
294 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
297 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
298 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
300 [ C(OP_PREFETCH) ] = {
301 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
302 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
307 static __initconst const u64 snb_hw_cache_event_ids
308 [PERF_COUNT_HW_CACHE_MAX]
309 [PERF_COUNT_HW_CACHE_OP_MAX]
310 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
314 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
315 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
318 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
319 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
321 [ C(OP_PREFETCH) ] = {
322 [ C(RESULT_ACCESS) ] = 0x0,
323 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
328 [ C(RESULT_ACCESS) ] = 0x0,
329 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
332 [ C(RESULT_ACCESS) ] = -1,
333 [ C(RESULT_MISS) ] = -1,
335 [ C(OP_PREFETCH) ] = {
336 [ C(RESULT_ACCESS) ] = 0x0,
337 [ C(RESULT_MISS) ] = 0x0,
342 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
343 [ C(RESULT_ACCESS) ] = 0x01b7,
344 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
345 [ C(RESULT_MISS) ] = 0x01b7,
348 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
349 [ C(RESULT_ACCESS) ] = 0x01b7,
350 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
351 [ C(RESULT_MISS) ] = 0x01b7,
353 [ C(OP_PREFETCH) ] = {
354 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
355 [ C(RESULT_ACCESS) ] = 0x01b7,
356 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
357 [ C(RESULT_MISS) ] = 0x01b7,
362 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
363 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
366 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
367 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
369 [ C(OP_PREFETCH) ] = {
370 [ C(RESULT_ACCESS) ] = 0x0,
371 [ C(RESULT_MISS) ] = 0x0,
376 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
377 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
380 [ C(RESULT_ACCESS) ] = -1,
381 [ C(RESULT_MISS) ] = -1,
383 [ C(OP_PREFETCH) ] = {
384 [ C(RESULT_ACCESS) ] = -1,
385 [ C(RESULT_MISS) ] = -1,
390 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
391 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
394 [ C(RESULT_ACCESS) ] = -1,
395 [ C(RESULT_MISS) ] = -1,
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = -1,
399 [ C(RESULT_MISS) ] = -1,
404 [ C(RESULT_ACCESS) ] = 0x01b7,
405 [ C(RESULT_MISS) ] = 0x01b7,
408 [ C(RESULT_ACCESS) ] = 0x01b7,
409 [ C(RESULT_MISS) ] = 0x01b7,
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = 0x01b7,
413 [ C(RESULT_MISS) ] = 0x01b7,
419 static __initconst const u64 westmere_hw_cache_event_ids
420 [PERF_COUNT_HW_CACHE_MAX]
421 [PERF_COUNT_HW_CACHE_OP_MAX]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
426 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
427 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
430 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
431 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
433 [ C(OP_PREFETCH) ] = {
434 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
435 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
440 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
441 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
444 [ C(RESULT_ACCESS) ] = -1,
445 [ C(RESULT_MISS) ] = -1,
447 [ C(OP_PREFETCH) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x0,
454 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
455 [ C(RESULT_ACCESS) ] = 0x01b7,
456 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
457 [ C(RESULT_MISS) ] = 0x01b7,
460 * Use RFO, not WRITEBACK, because a write miss would typically occur
464 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
465 [ C(RESULT_ACCESS) ] = 0x01b7,
466 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
467 [ C(RESULT_MISS) ] = 0x01b7,
469 [ C(OP_PREFETCH) ] = {
470 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
471 [ C(RESULT_ACCESS) ] = 0x01b7,
472 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
473 [ C(RESULT_MISS) ] = 0x01b7,
478 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
479 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
482 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
483 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
485 [ C(OP_PREFETCH) ] = {
486 [ C(RESULT_ACCESS) ] = 0x0,
487 [ C(RESULT_MISS) ] = 0x0,
492 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
493 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
496 [ C(RESULT_ACCESS) ] = -1,
497 [ C(RESULT_MISS) ] = -1,
499 [ C(OP_PREFETCH) ] = {
500 [ C(RESULT_ACCESS) ] = -1,
501 [ C(RESULT_MISS) ] = -1,
506 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
507 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
510 [ C(RESULT_ACCESS) ] = -1,
511 [ C(RESULT_MISS) ] = -1,
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = -1,
515 [ C(RESULT_MISS) ] = -1,
520 [ C(RESULT_ACCESS) ] = 0x01b7,
521 [ C(RESULT_MISS) ] = 0x01b7,
524 [ C(RESULT_ACCESS) ] = 0x01b7,
525 [ C(RESULT_MISS) ] = 0x01b7,
527 [ C(OP_PREFETCH) ] = {
528 [ C(RESULT_ACCESS) ] = 0x01b7,
529 [ C(RESULT_MISS) ] = 0x01b7,
535 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
536 * See IA32 SDM Vol 3B 30.6.1.3
539 #define NHM_DMND_DATA_RD (1 << 0)
540 #define NHM_DMND_RFO (1 << 1)
541 #define NHM_DMND_IFETCH (1 << 2)
542 #define NHM_DMND_WB (1 << 3)
543 #define NHM_PF_DATA_RD (1 << 4)
544 #define NHM_PF_DATA_RFO (1 << 5)
545 #define NHM_PF_IFETCH (1 << 6)
546 #define NHM_OFFCORE_OTHER (1 << 7)
547 #define NHM_UNCORE_HIT (1 << 8)
548 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
549 #define NHM_OTHER_CORE_HITM (1 << 10)
551 #define NHM_REMOTE_CACHE_FWD (1 << 12)
552 #define NHM_REMOTE_DRAM (1 << 13)
553 #define NHM_LOCAL_DRAM (1 << 14)
554 #define NHM_NON_DRAM (1 << 15)
556 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
557 #define NHM_REMOTE (NHM_REMOTE_DRAM)
559 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
560 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
561 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
563 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
564 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
565 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
567 static __initconst const u64 nehalem_hw_cache_extra_regs
568 [PERF_COUNT_HW_CACHE_MAX]
569 [PERF_COUNT_HW_CACHE_OP_MAX]
570 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
574 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
575 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
578 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
579 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
581 [ C(OP_PREFETCH) ] = {
582 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
583 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
588 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
589 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
592 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
593 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
597 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
602 static __initconst const u64 nehalem_hw_cache_event_ids
603 [PERF_COUNT_HW_CACHE_MAX]
604 [PERF_COUNT_HW_CACHE_OP_MAX]
605 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
609 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
610 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
613 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
614 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
618 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
623 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
624 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
627 [ C(RESULT_ACCESS) ] = -1,
628 [ C(RESULT_MISS) ] = -1,
630 [ C(OP_PREFETCH) ] = {
631 [ C(RESULT_ACCESS) ] = 0x0,
632 [ C(RESULT_MISS) ] = 0x0,
637 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
638 [ C(RESULT_ACCESS) ] = 0x01b7,
639 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
640 [ C(RESULT_MISS) ] = 0x01b7,
643 * Use RFO, not WRITEBACK, because a write miss would typically occur
647 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
648 [ C(RESULT_ACCESS) ] = 0x01b7,
649 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
650 [ C(RESULT_MISS) ] = 0x01b7,
652 [ C(OP_PREFETCH) ] = {
653 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
654 [ C(RESULT_ACCESS) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
656 [ C(RESULT_MISS) ] = 0x01b7,
661 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
662 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
665 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
666 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
668 [ C(OP_PREFETCH) ] = {
669 [ C(RESULT_ACCESS) ] = 0x0,
670 [ C(RESULT_MISS) ] = 0x0,
675 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
676 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
679 [ C(RESULT_ACCESS) ] = -1,
680 [ C(RESULT_MISS) ] = -1,
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = -1,
684 [ C(RESULT_MISS) ] = -1,
689 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
690 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = -1,
698 [ C(RESULT_MISS) ] = -1,
703 [ C(RESULT_ACCESS) ] = 0x01b7,
704 [ C(RESULT_MISS) ] = 0x01b7,
707 [ C(RESULT_ACCESS) ] = 0x01b7,
708 [ C(RESULT_MISS) ] = 0x01b7,
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0x01b7,
712 [ C(RESULT_MISS) ] = 0x01b7,
717 static __initconst const u64 core2_hw_cache_event_ids
718 [PERF_COUNT_HW_CACHE_MAX]
719 [PERF_COUNT_HW_CACHE_OP_MAX]
720 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
724 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
725 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
728 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
729 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
731 [ C(OP_PREFETCH) ] = {
732 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
733 [ C(RESULT_MISS) ] = 0,
738 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
739 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
742 [ C(RESULT_ACCESS) ] = -1,
743 [ C(RESULT_MISS) ] = -1,
745 [ C(OP_PREFETCH) ] = {
746 [ C(RESULT_ACCESS) ] = 0,
747 [ C(RESULT_MISS) ] = 0,
752 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
753 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
756 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
757 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
759 [ C(OP_PREFETCH) ] = {
760 [ C(RESULT_ACCESS) ] = 0,
761 [ C(RESULT_MISS) ] = 0,
766 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
767 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
770 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
771 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
773 [ C(OP_PREFETCH) ] = {
774 [ C(RESULT_ACCESS) ] = 0,
775 [ C(RESULT_MISS) ] = 0,
780 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
781 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
784 [ C(RESULT_ACCESS) ] = -1,
785 [ C(RESULT_MISS) ] = -1,
787 [ C(OP_PREFETCH) ] = {
788 [ C(RESULT_ACCESS) ] = -1,
789 [ C(RESULT_MISS) ] = -1,
794 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
795 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
798 [ C(RESULT_ACCESS) ] = -1,
799 [ C(RESULT_MISS) ] = -1,
801 [ C(OP_PREFETCH) ] = {
802 [ C(RESULT_ACCESS) ] = -1,
803 [ C(RESULT_MISS) ] = -1,
808 static __initconst const u64 atom_hw_cache_event_ids
809 [PERF_COUNT_HW_CACHE_MAX]
810 [PERF_COUNT_HW_CACHE_OP_MAX]
811 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
815 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
816 [ C(RESULT_MISS) ] = 0,
819 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
820 [ C(RESULT_MISS) ] = 0,
822 [ C(OP_PREFETCH) ] = {
823 [ C(RESULT_ACCESS) ] = 0x0,
824 [ C(RESULT_MISS) ] = 0,
829 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
830 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
833 [ C(RESULT_ACCESS) ] = -1,
834 [ C(RESULT_MISS) ] = -1,
836 [ C(OP_PREFETCH) ] = {
837 [ C(RESULT_ACCESS) ] = 0,
838 [ C(RESULT_MISS) ] = 0,
843 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
844 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
847 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
848 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
850 [ C(OP_PREFETCH) ] = {
851 [ C(RESULT_ACCESS) ] = 0,
852 [ C(RESULT_MISS) ] = 0,
857 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
858 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
861 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
862 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
864 [ C(OP_PREFETCH) ] = {
865 [ C(RESULT_ACCESS) ] = 0,
866 [ C(RESULT_MISS) ] = 0,
871 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
872 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
875 [ C(RESULT_ACCESS) ] = -1,
876 [ C(RESULT_MISS) ] = -1,
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = -1,
880 [ C(RESULT_MISS) ] = -1,
885 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
886 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
889 [ C(RESULT_ACCESS) ] = -1,
890 [ C(RESULT_MISS) ] = -1,
892 [ C(OP_PREFETCH) ] = {
893 [ C(RESULT_ACCESS) ] = -1,
894 [ C(RESULT_MISS) ] = -1,
899 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
907 #define SLM_DMND_READ SNB_DMND_DATA_RD
908 #define SLM_DMND_WRITE SNB_DMND_RFO
909 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
911 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
912 #define SLM_LLC_ACCESS SNB_RESP_ANY
913 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
915 static __initconst const u64 slm_hw_cache_extra_regs
916 [PERF_COUNT_HW_CACHE_MAX]
917 [PERF_COUNT_HW_CACHE_OP_MAX]
918 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
922 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
923 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
926 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
927 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
929 [ C(OP_PREFETCH) ] = {
930 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
931 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
936 static __initconst const u64 slm_hw_cache_event_ids
937 [PERF_COUNT_HW_CACHE_MAX]
938 [PERF_COUNT_HW_CACHE_OP_MAX]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
943 [ C(RESULT_ACCESS) ] = 0,
944 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
947 [ C(RESULT_ACCESS) ] = 0,
948 [ C(RESULT_MISS) ] = 0,
950 [ C(OP_PREFETCH) ] = {
951 [ C(RESULT_ACCESS) ] = 0,
952 [ C(RESULT_MISS) ] = 0,
957 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
958 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
961 [ C(RESULT_ACCESS) ] = -1,
962 [ C(RESULT_MISS) ] = -1,
964 [ C(OP_PREFETCH) ] = {
965 [ C(RESULT_ACCESS) ] = 0,
966 [ C(RESULT_MISS) ] = 0,
971 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
972 [ C(RESULT_ACCESS) ] = 0x01b7,
973 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
974 [ C(RESULT_MISS) ] = 0x01b7,
977 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
978 [ C(RESULT_ACCESS) ] = 0x01b7,
979 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
980 [ C(RESULT_MISS) ] = 0x01b7,
982 [ C(OP_PREFETCH) ] = {
983 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
984 [ C(RESULT_ACCESS) ] = 0x01b7,
985 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
986 [ C(RESULT_MISS) ] = 0x01b7,
991 [ C(RESULT_ACCESS) ] = 0,
992 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
995 [ C(RESULT_ACCESS) ] = 0,
996 [ C(RESULT_MISS) ] = 0,
998 [ C(OP_PREFETCH) ] = {
999 [ C(RESULT_ACCESS) ] = 0,
1000 [ C(RESULT_MISS) ] = 0,
1005 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1006 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1009 [ C(RESULT_ACCESS) ] = -1,
1010 [ C(RESULT_MISS) ] = -1,
1012 [ C(OP_PREFETCH) ] = {
1013 [ C(RESULT_ACCESS) ] = -1,
1014 [ C(RESULT_MISS) ] = -1,
1019 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1020 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1023 [ C(RESULT_ACCESS) ] = -1,
1024 [ C(RESULT_MISS) ] = -1,
1026 [ C(OP_PREFETCH) ] = {
1027 [ C(RESULT_ACCESS) ] = -1,
1028 [ C(RESULT_MISS) ] = -1,
1033 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1035 /* user explicitly requested branch sampling */
1036 if (has_branch_stack(event))
1039 /* implicit branch sampling to correct PEBS skid */
1040 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
1041 x86_pmu.intel_cap.pebs_format < 2)
1047 static void intel_pmu_disable_all(void)
1049 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1051 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1053 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1054 intel_pmu_disable_bts();
1056 intel_pmu_pebs_disable_all();
1057 intel_pmu_lbr_disable_all();
1060 static void intel_pmu_enable_all(int added)
1062 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1064 intel_pmu_pebs_enable_all();
1065 intel_pmu_lbr_enable_all();
1066 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1067 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1069 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1070 struct perf_event *event =
1071 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1073 if (WARN_ON_ONCE(!event))
1076 intel_pmu_enable_bts(event->hw.config);
1082 * Intel Errata AAK100 (model 26)
1083 * Intel Errata AAP53 (model 30)
1084 * Intel Errata BD53 (model 44)
1086 * The official story:
1087 * These chips need to be 'reset' when adding counters by programming the
1088 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1089 * in sequence on the same PMC or on different PMCs.
1091 * In practise it appears some of these events do in fact count, and
1092 * we need to programm all 4 events.
1094 static void intel_pmu_nhm_workaround(void)
1096 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1097 static const unsigned long nhm_magic[4] = {
1103 struct perf_event *event;
1107 * The Errata requires below steps:
1108 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1109 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1110 * the corresponding PMCx;
1111 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1112 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1113 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1117 * The real steps we choose are a little different from above.
1118 * A) To reduce MSR operations, we don't run step 1) as they
1119 * are already cleared before this function is called;
1120 * B) Call x86_perf_event_update to save PMCx before configuring
1121 * PERFEVTSELx with magic number;
1122 * C) With step 5), we do clear only when the PERFEVTSELx is
1123 * not used currently.
1124 * D) Call x86_perf_event_set_period to restore PMCx;
1127 /* We always operate 4 pairs of PERF Counters */
1128 for (i = 0; i < 4; i++) {
1129 event = cpuc->events[i];
1131 x86_perf_event_update(event);
1134 for (i = 0; i < 4; i++) {
1135 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1136 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1139 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1140 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1142 for (i = 0; i < 4; i++) {
1143 event = cpuc->events[i];
1146 x86_perf_event_set_period(event);
1147 __x86_pmu_enable_event(&event->hw,
1148 ARCH_PERFMON_EVENTSEL_ENABLE);
1150 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1154 static void intel_pmu_nhm_enable_all(int added)
1157 intel_pmu_nhm_workaround();
1158 intel_pmu_enable_all(added);
1161 static inline u64 intel_pmu_get_status(void)
1165 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1170 static inline void intel_pmu_ack_status(u64 ack)
1172 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1175 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1177 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1180 mask = 0xfULL << (idx * 4);
1182 rdmsrl(hwc->config_base, ctrl_val);
1184 wrmsrl(hwc->config_base, ctrl_val);
1187 static inline bool event_is_checkpointed(struct perf_event *event)
1189 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1192 static void intel_pmu_disable_event(struct perf_event *event)
1194 struct hw_perf_event *hwc = &event->hw;
1195 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1197 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1198 intel_pmu_disable_bts();
1199 intel_pmu_drain_bts_buffer();
1203 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1204 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1205 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1208 * must disable before any actual event
1209 * because any event may be combined with LBR
1211 if (intel_pmu_needs_lbr_smpl(event))
1212 intel_pmu_lbr_disable(event);
1214 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1215 intel_pmu_disable_fixed(hwc);
1219 x86_pmu_disable_event(event);
1221 if (unlikely(event->attr.precise_ip))
1222 intel_pmu_pebs_disable(event);
1225 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1227 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1228 u64 ctrl_val, bits, mask;
1231 * Enable IRQ generation (0x8),
1232 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1236 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1238 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1242 * ANY bit is supported in v3 and up
1244 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1248 mask = 0xfULL << (idx * 4);
1250 rdmsrl(hwc->config_base, ctrl_val);
1253 wrmsrl(hwc->config_base, ctrl_val);
1256 static void intel_pmu_enable_event(struct perf_event *event)
1258 struct hw_perf_event *hwc = &event->hw;
1259 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1261 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1262 if (!__this_cpu_read(cpu_hw_events.enabled))
1265 intel_pmu_enable_bts(hwc->config);
1269 * must enabled before any actual event
1270 * because any event may be combined with LBR
1272 if (intel_pmu_needs_lbr_smpl(event))
1273 intel_pmu_lbr_enable(event);
1275 if (event->attr.exclude_host)
1276 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1277 if (event->attr.exclude_guest)
1278 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1280 if (unlikely(event_is_checkpointed(event)))
1281 cpuc->intel_cp_status |= (1ull << hwc->idx);
1283 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1284 intel_pmu_enable_fixed(hwc);
1288 if (unlikely(event->attr.precise_ip))
1289 intel_pmu_pebs_enable(event);
1291 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1295 * Save and restart an expired event. Called by NMI contexts,
1296 * so it has to be careful about preempting normal event ops:
1298 int intel_pmu_save_and_restart(struct perf_event *event)
1300 x86_perf_event_update(event);
1302 * For a checkpointed counter always reset back to 0. This
1303 * avoids a situation where the counter overflows, aborts the
1304 * transaction and is then set back to shortly before the
1305 * overflow, and overflows and aborts again.
1307 if (unlikely(event_is_checkpointed(event))) {
1308 /* No race with NMIs because the counter should not be armed */
1309 wrmsrl(event->hw.event_base, 0);
1310 local64_set(&event->hw.prev_count, 0);
1312 return x86_perf_event_set_period(event);
1315 static void intel_pmu_reset(void)
1317 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1318 unsigned long flags;
1321 if (!x86_pmu.num_counters)
1324 local_irq_save(flags);
1326 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1328 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1329 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1330 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1332 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1333 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1336 ds->bts_index = ds->bts_buffer_base;
1338 local_irq_restore(flags);
1342 * This handler is triggered by the local APIC, so the APIC IRQ handling
1345 static int intel_pmu_handle_irq(struct pt_regs *regs)
1347 struct perf_sample_data data;
1348 struct cpu_hw_events *cpuc;
1353 cpuc = &__get_cpu_var(cpu_hw_events);
1356 * No known reason to not always do late ACK,
1357 * but just in case do it opt-in.
1359 if (!x86_pmu.late_ack)
1360 apic_write(APIC_LVTPC, APIC_DM_NMI);
1361 intel_pmu_disable_all();
1362 handled = intel_pmu_drain_bts_buffer();
1363 status = intel_pmu_get_status();
1369 intel_pmu_ack_status(status);
1370 if (++loops > 100) {
1371 static bool warned = false;
1373 WARN(1, "perfevents: irq loop stuck!\n");
1374 perf_event_print_debug();
1381 inc_irq_stat(apic_perf_irqs);
1383 intel_pmu_lbr_read();
1386 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1387 * and clear the bit.
1389 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1395 * PEBS overflow sets bit 62 in the global status register
1397 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1399 x86_pmu.drain_pebs(regs);
1403 * Checkpointed counters can lead to 'spurious' PMIs because the
1404 * rollback caused by the PMI will have cleared the overflow status
1405 * bit. Therefore always force probe these counters.
1407 status |= cpuc->intel_cp_status;
1409 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1410 struct perf_event *event = cpuc->events[bit];
1414 if (!test_bit(bit, cpuc->active_mask))
1417 if (!intel_pmu_save_and_restart(event))
1420 perf_sample_data_init(&data, 0, event->hw.last_period);
1422 if (has_branch_stack(event))
1423 data.br_stack = &cpuc->lbr_stack;
1425 if (perf_event_overflow(event, &data, regs))
1426 x86_pmu_stop(event, 0);
1430 * Repeat if there is more work to be done:
1432 status = intel_pmu_get_status();
1437 intel_pmu_enable_all(0);
1439 * Only unmask the NMI after the overflow counters
1440 * have been reset. This avoids spurious NMIs on
1443 if (x86_pmu.late_ack)
1444 apic_write(APIC_LVTPC, APIC_DM_NMI);
1448 static struct event_constraint *
1449 intel_bts_constraints(struct perf_event *event)
1451 struct hw_perf_event *hwc = &event->hw;
1452 unsigned int hw_event, bts_event;
1454 if (event->attr.freq)
1457 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1458 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1460 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1461 return &bts_constraint;
1466 static int intel_alt_er(int idx)
1468 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1471 if (idx == EXTRA_REG_RSP_0)
1472 return EXTRA_REG_RSP_1;
1474 if (idx == EXTRA_REG_RSP_1)
1475 return EXTRA_REG_RSP_0;
1480 static void intel_fixup_er(struct perf_event *event, int idx)
1482 event->hw.extra_reg.idx = idx;
1484 if (idx == EXTRA_REG_RSP_0) {
1485 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1486 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1487 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1488 } else if (idx == EXTRA_REG_RSP_1) {
1489 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1490 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1491 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1496 * manage allocation of shared extra msr for certain events
1499 * per-cpu: to be shared between the various events on a single PMU
1500 * per-core: per-cpu + shared by HT threads
1502 static struct event_constraint *
1503 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1504 struct perf_event *event,
1505 struct hw_perf_event_extra *reg)
1507 struct event_constraint *c = &emptyconstraint;
1508 struct er_account *era;
1509 unsigned long flags;
1513 * reg->alloc can be set due to existing state, so for fake cpuc we
1514 * need to ignore this, otherwise we might fail to allocate proper fake
1515 * state for this extra reg constraint. Also see the comment below.
1517 if (reg->alloc && !cpuc->is_fake)
1518 return NULL; /* call x86_get_event_constraint() */
1521 era = &cpuc->shared_regs->regs[idx];
1523 * we use spin_lock_irqsave() to avoid lockdep issues when
1524 * passing a fake cpuc
1526 raw_spin_lock_irqsave(&era->lock, flags);
1528 if (!atomic_read(&era->ref) || era->config == reg->config) {
1531 * If its a fake cpuc -- as per validate_{group,event}() we
1532 * shouldn't touch event state and we can avoid doing so
1533 * since both will only call get_event_constraints() once
1534 * on each event, this avoids the need for reg->alloc.
1536 * Not doing the ER fixup will only result in era->reg being
1537 * wrong, but since we won't actually try and program hardware
1538 * this isn't a problem either.
1540 if (!cpuc->is_fake) {
1541 if (idx != reg->idx)
1542 intel_fixup_er(event, idx);
1545 * x86_schedule_events() can call get_event_constraints()
1546 * multiple times on events in the case of incremental
1547 * scheduling(). reg->alloc ensures we only do the ER
1553 /* lock in msr value */
1554 era->config = reg->config;
1555 era->reg = reg->reg;
1558 atomic_inc(&era->ref);
1561 * need to call x86_get_event_constraint()
1562 * to check if associated event has constraints
1566 idx = intel_alt_er(idx);
1567 if (idx != reg->idx) {
1568 raw_spin_unlock_irqrestore(&era->lock, flags);
1572 raw_spin_unlock_irqrestore(&era->lock, flags);
1578 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1579 struct hw_perf_event_extra *reg)
1581 struct er_account *era;
1584 * Only put constraint if extra reg was actually allocated. Also takes
1585 * care of event which do not use an extra shared reg.
1587 * Also, if this is a fake cpuc we shouldn't touch any event state
1588 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1589 * either since it'll be thrown out.
1591 if (!reg->alloc || cpuc->is_fake)
1594 era = &cpuc->shared_regs->regs[reg->idx];
1596 /* one fewer user */
1597 atomic_dec(&era->ref);
1599 /* allocate again next time */
1603 static struct event_constraint *
1604 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1605 struct perf_event *event)
1607 struct event_constraint *c = NULL, *d;
1608 struct hw_perf_event_extra *xreg, *breg;
1610 xreg = &event->hw.extra_reg;
1611 if (xreg->idx != EXTRA_REG_NONE) {
1612 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1613 if (c == &emptyconstraint)
1616 breg = &event->hw.branch_reg;
1617 if (breg->idx != EXTRA_REG_NONE) {
1618 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1619 if (d == &emptyconstraint) {
1620 __intel_shared_reg_put_constraints(cpuc, xreg);
1627 struct event_constraint *
1628 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1630 struct event_constraint *c;
1632 if (x86_pmu.event_constraints) {
1633 for_each_event_constraint(c, x86_pmu.event_constraints) {
1634 if ((event->hw.config & c->cmask) == c->code) {
1635 event->hw.flags |= c->flags;
1641 return &unconstrained;
1644 static struct event_constraint *
1645 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1647 struct event_constraint *c;
1649 c = intel_bts_constraints(event);
1653 c = intel_pebs_constraints(event);
1657 c = intel_shared_regs_constraints(cpuc, event);
1661 return x86_get_event_constraints(cpuc, event);
1665 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1666 struct perf_event *event)
1668 struct hw_perf_event_extra *reg;
1670 reg = &event->hw.extra_reg;
1671 if (reg->idx != EXTRA_REG_NONE)
1672 __intel_shared_reg_put_constraints(cpuc, reg);
1674 reg = &event->hw.branch_reg;
1675 if (reg->idx != EXTRA_REG_NONE)
1676 __intel_shared_reg_put_constraints(cpuc, reg);
1679 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1680 struct perf_event *event)
1682 intel_put_shared_regs_event_constraints(cpuc, event);
1685 static void intel_pebs_aliases_core2(struct perf_event *event)
1687 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1689 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1690 * (0x003c) so that we can use it with PEBS.
1692 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1693 * PEBS capable. However we can use INST_RETIRED.ANY_P
1694 * (0x00c0), which is a PEBS capable event, to get the same
1697 * INST_RETIRED.ANY_P counts the number of cycles that retires
1698 * CNTMASK instructions. By setting CNTMASK to a value (16)
1699 * larger than the maximum number of instructions that can be
1700 * retired per cycle (4) and then inverting the condition, we
1701 * count all cycles that retire 16 or less instructions, which
1704 * Thereby we gain a PEBS capable cycle counter.
1706 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1708 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1709 event->hw.config = alt_config;
1713 static void intel_pebs_aliases_snb(struct perf_event *event)
1715 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1717 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1718 * (0x003c) so that we can use it with PEBS.
1720 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1721 * PEBS capable. However we can use UOPS_RETIRED.ALL
1722 * (0x01c2), which is a PEBS capable event, to get the same
1725 * UOPS_RETIRED.ALL counts the number of cycles that retires
1726 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1727 * larger than the maximum number of micro-ops that can be
1728 * retired per cycle (4) and then inverting the condition, we
1729 * count all cycles that retire 16 or less micro-ops, which
1732 * Thereby we gain a PEBS capable cycle counter.
1734 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1736 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1737 event->hw.config = alt_config;
1741 static int intel_pmu_hw_config(struct perf_event *event)
1743 int ret = x86_pmu_hw_config(event);
1748 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1749 x86_pmu.pebs_aliases(event);
1751 if (intel_pmu_needs_lbr_smpl(event)) {
1752 ret = intel_pmu_setup_lbr_filter(event);
1757 if (event->attr.type != PERF_TYPE_RAW)
1760 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1763 if (x86_pmu.version < 3)
1766 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1769 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1774 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1776 if (x86_pmu.guest_get_msrs)
1777 return x86_pmu.guest_get_msrs(nr);
1781 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1783 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1785 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1786 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1788 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1789 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1790 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1792 * If PMU counter has PEBS enabled it is not enough to disable counter
1793 * on a guest entry since PEBS memory write can overshoot guest entry
1794 * and corrupt guest memory. Disabling PEBS solves the problem.
1796 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1797 arr[1].host = cpuc->pebs_enabled;
1804 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1806 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1807 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1810 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1811 struct perf_event *event = cpuc->events[idx];
1813 arr[idx].msr = x86_pmu_config_addr(idx);
1814 arr[idx].host = arr[idx].guest = 0;
1816 if (!test_bit(idx, cpuc->active_mask))
1819 arr[idx].host = arr[idx].guest =
1820 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1822 if (event->attr.exclude_host)
1823 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1824 else if (event->attr.exclude_guest)
1825 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1828 *nr = x86_pmu.num_counters;
1832 static void core_pmu_enable_event(struct perf_event *event)
1834 if (!event->attr.exclude_host)
1835 x86_pmu_enable_event(event);
1838 static void core_pmu_enable_all(int added)
1840 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1843 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1844 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1846 if (!test_bit(idx, cpuc->active_mask) ||
1847 cpuc->events[idx]->attr.exclude_host)
1850 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1854 static int hsw_hw_config(struct perf_event *event)
1856 int ret = intel_pmu_hw_config(event);
1860 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
1862 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
1865 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
1866 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
1869 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
1870 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
1871 event->attr.precise_ip > 0))
1874 if (event_is_checkpointed(event)) {
1876 * Sampling of checkpointed events can cause situations where
1877 * the CPU constantly aborts because of a overflow, which is
1878 * then checkpointed back and ignored. Forbid checkpointing
1881 * But still allow a long sampling period, so that perf stat
1884 if (event->attr.sample_period > 0 &&
1885 event->attr.sample_period < 0x7fffffff)
1891 static struct event_constraint counter2_constraint =
1892 EVENT_CONSTRAINT(0, 0x4, 0);
1894 static struct event_constraint *
1895 hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1897 struct event_constraint *c = intel_get_event_constraints(cpuc, event);
1899 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
1900 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
1901 if (c->idxmsk64 & (1U << 2))
1902 return &counter2_constraint;
1903 return &emptyconstraint;
1909 PMU_FORMAT_ATTR(event, "config:0-7" );
1910 PMU_FORMAT_ATTR(umask, "config:8-15" );
1911 PMU_FORMAT_ATTR(edge, "config:18" );
1912 PMU_FORMAT_ATTR(pc, "config:19" );
1913 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1914 PMU_FORMAT_ATTR(inv, "config:23" );
1915 PMU_FORMAT_ATTR(cmask, "config:24-31" );
1916 PMU_FORMAT_ATTR(in_tx, "config:32");
1917 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
1919 static struct attribute *intel_arch_formats_attr[] = {
1920 &format_attr_event.attr,
1921 &format_attr_umask.attr,
1922 &format_attr_edge.attr,
1923 &format_attr_pc.attr,
1924 &format_attr_inv.attr,
1925 &format_attr_cmask.attr,
1929 ssize_t intel_event_sysfs_show(char *page, u64 config)
1931 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1933 return x86_event_sysfs_show(page, config, event);
1936 static __initconst const struct x86_pmu core_pmu = {
1938 .handle_irq = x86_pmu_handle_irq,
1939 .disable_all = x86_pmu_disable_all,
1940 .enable_all = core_pmu_enable_all,
1941 .enable = core_pmu_enable_event,
1942 .disable = x86_pmu_disable_event,
1943 .hw_config = x86_pmu_hw_config,
1944 .schedule_events = x86_schedule_events,
1945 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1946 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1947 .event_map = intel_pmu_event_map,
1948 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1951 * Intel PMCs cannot be accessed sanely above 32 bit width,
1952 * so we install an artificial 1<<31 period regardless of
1953 * the generic event period:
1955 .max_period = (1ULL << 31) - 1,
1956 .get_event_constraints = intel_get_event_constraints,
1957 .put_event_constraints = intel_put_event_constraints,
1958 .event_constraints = intel_core_event_constraints,
1959 .guest_get_msrs = core_guest_get_msrs,
1960 .format_attrs = intel_arch_formats_attr,
1961 .events_sysfs_show = intel_event_sysfs_show,
1964 struct intel_shared_regs *allocate_shared_regs(int cpu)
1966 struct intel_shared_regs *regs;
1969 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1970 GFP_KERNEL, cpu_to_node(cpu));
1973 * initialize the locks to keep lockdep happy
1975 for (i = 0; i < EXTRA_REG_MAX; i++)
1976 raw_spin_lock_init(®s->regs[i].lock);
1983 static int intel_pmu_cpu_prepare(int cpu)
1985 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1987 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
1990 cpuc->shared_regs = allocate_shared_regs(cpu);
1991 if (!cpuc->shared_regs)
1997 static void intel_pmu_cpu_starting(int cpu)
1999 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2000 int core_id = topology_core_id(cpu);
2003 init_debug_store_on_cpu(cpu);
2005 * Deal with CPUs that don't clear their LBRs on power-up.
2007 intel_pmu_lbr_reset();
2009 cpuc->lbr_sel = NULL;
2011 if (!cpuc->shared_regs)
2014 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
2015 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2016 struct intel_shared_regs *pc;
2018 pc = per_cpu(cpu_hw_events, i).shared_regs;
2019 if (pc && pc->core_id == core_id) {
2020 cpuc->kfree_on_online = cpuc->shared_regs;
2021 cpuc->shared_regs = pc;
2025 cpuc->shared_regs->core_id = core_id;
2026 cpuc->shared_regs->refcnt++;
2029 if (x86_pmu.lbr_sel_map)
2030 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2033 static void intel_pmu_cpu_dying(int cpu)
2035 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2036 struct intel_shared_regs *pc;
2038 pc = cpuc->shared_regs;
2040 if (pc->core_id == -1 || --pc->refcnt == 0)
2042 cpuc->shared_regs = NULL;
2045 fini_debug_store_on_cpu(cpu);
2048 static void intel_pmu_flush_branch_stack(void)
2051 * Intel LBR does not tag entries with the
2052 * PID of the current task, then we need to
2054 * For now, we simply reset it
2057 intel_pmu_lbr_reset();
2060 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2062 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2064 static struct attribute *intel_arch3_formats_attr[] = {
2065 &format_attr_event.attr,
2066 &format_attr_umask.attr,
2067 &format_attr_edge.attr,
2068 &format_attr_pc.attr,
2069 &format_attr_any.attr,
2070 &format_attr_inv.attr,
2071 &format_attr_cmask.attr,
2072 &format_attr_in_tx.attr,
2073 &format_attr_in_tx_cp.attr,
2075 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2076 &format_attr_ldlat.attr, /* PEBS load latency */
2080 static __initconst const struct x86_pmu intel_pmu = {
2082 .handle_irq = intel_pmu_handle_irq,
2083 .disable_all = intel_pmu_disable_all,
2084 .enable_all = intel_pmu_enable_all,
2085 .enable = intel_pmu_enable_event,
2086 .disable = intel_pmu_disable_event,
2087 .hw_config = intel_pmu_hw_config,
2088 .schedule_events = x86_schedule_events,
2089 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2090 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2091 .event_map = intel_pmu_event_map,
2092 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2095 * Intel PMCs cannot be accessed sanely above 32 bit width,
2096 * so we install an artificial 1<<31 period regardless of
2097 * the generic event period:
2099 .max_period = (1ULL << 31) - 1,
2100 .get_event_constraints = intel_get_event_constraints,
2101 .put_event_constraints = intel_put_event_constraints,
2102 .pebs_aliases = intel_pebs_aliases_core2,
2104 .format_attrs = intel_arch3_formats_attr,
2105 .events_sysfs_show = intel_event_sysfs_show,
2107 .cpu_prepare = intel_pmu_cpu_prepare,
2108 .cpu_starting = intel_pmu_cpu_starting,
2109 .cpu_dying = intel_pmu_cpu_dying,
2110 .guest_get_msrs = intel_guest_get_msrs,
2111 .flush_branch_stack = intel_pmu_flush_branch_stack,
2114 static __init void intel_clovertown_quirk(void)
2117 * PEBS is unreliable due to:
2119 * AJ67 - PEBS may experience CPL leaks
2120 * AJ68 - PEBS PMI may be delayed by one event
2121 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2122 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2124 * AJ67 could be worked around by restricting the OS/USR flags.
2125 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2127 * AJ106 could possibly be worked around by not allowing LBR
2128 * usage from PEBS, including the fixup.
2129 * AJ68 could possibly be worked around by always programming
2130 * a pebs_event_reset[0] value and coping with the lost events.
2132 * But taken together it might just make sense to not enable PEBS on
2135 pr_warn("PEBS disabled due to CPU errata\n");
2137 x86_pmu.pebs_constraints = NULL;
2140 static int intel_snb_pebs_broken(int cpu)
2142 u32 rev = UINT_MAX; /* default to broken for unknown models */
2144 switch (cpu_data(cpu).x86_model) {
2149 case 45: /* SNB-EP */
2150 switch (cpu_data(cpu).x86_mask) {
2151 case 6: rev = 0x618; break;
2152 case 7: rev = 0x70c; break;
2156 return (cpu_data(cpu).microcode < rev);
2159 static void intel_snb_check_microcode(void)
2161 int pebs_broken = 0;
2165 for_each_online_cpu(cpu) {
2166 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2171 if (pebs_broken == x86_pmu.pebs_broken)
2175 * Serialized by the microcode lock..
2177 if (x86_pmu.pebs_broken) {
2178 pr_info("PEBS enabled due to microcode update\n");
2179 x86_pmu.pebs_broken = 0;
2181 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2182 x86_pmu.pebs_broken = 1;
2186 static __init void intel_sandybridge_quirk(void)
2188 x86_pmu.check_microcode = intel_snb_check_microcode;
2189 intel_snb_check_microcode();
2192 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2193 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2194 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2195 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2196 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2197 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2198 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2199 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
2202 static __init void intel_arch_events_quirk(void)
2206 /* disable event that reported as not presend by cpuid */
2207 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2208 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
2209 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2210 intel_arch_events_map[bit].name);
2214 static __init void intel_nehalem_quirk(void)
2216 union cpuid10_ebx ebx;
2218 ebx.full = x86_pmu.events_maskl;
2219 if (ebx.split.no_branch_misses_retired) {
2221 * Erratum AAJ80 detected, we work it around by using
2222 * the BR_MISP_EXEC.ANY event. This will over-count
2223 * branch-misses, but it's still much better than the
2224 * architectural event which is often completely bogus:
2226 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2227 ebx.split.no_branch_misses_retired = 0;
2228 x86_pmu.events_maskl = ebx.full;
2229 pr_info("CPU erratum AAJ80 worked around\n");
2233 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2234 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
2236 /* Haswell special events */
2237 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2238 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2239 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2240 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2241 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2242 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2243 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2244 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2245 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2246 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2247 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2248 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
2250 static struct attribute *hsw_events_attrs[] = {
2251 EVENT_PTR(tx_start),
2252 EVENT_PTR(tx_commit),
2253 EVENT_PTR(tx_abort),
2254 EVENT_PTR(tx_capacity),
2255 EVENT_PTR(tx_conflict),
2256 EVENT_PTR(el_start),
2257 EVENT_PTR(el_commit),
2258 EVENT_PTR(el_abort),
2259 EVENT_PTR(el_capacity),
2260 EVENT_PTR(el_conflict),
2261 EVENT_PTR(cycles_t),
2262 EVENT_PTR(cycles_ct),
2263 EVENT_PTR(mem_ld_hsw),
2264 EVENT_PTR(mem_st_hsw),
2268 __init int intel_pmu_init(void)
2270 union cpuid10_edx edx;
2271 union cpuid10_eax eax;
2272 union cpuid10_ebx ebx;
2273 struct event_constraint *c;
2274 unsigned int unused;
2277 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2278 switch (boot_cpu_data.x86) {
2280 return p6_pmu_init();
2282 return knc_pmu_init();
2284 return p4_pmu_init();
2290 * Check whether the Architectural PerfMon supports
2291 * Branch Misses Retired hw_event or not.
2293 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2294 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
2297 version = eax.split.version_id;
2301 x86_pmu = intel_pmu;
2303 x86_pmu.version = version;
2304 x86_pmu.num_counters = eax.split.num_counters;
2305 x86_pmu.cntval_bits = eax.split.bit_width;
2306 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
2308 x86_pmu.events_maskl = ebx.full;
2309 x86_pmu.events_mask_len = eax.split.mask_length;
2311 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2314 * Quirk: v2 perfmon does not report fixed-purpose events, so
2315 * assume at least 3 events:
2318 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2320 if (boot_cpu_has(X86_FEATURE_PDCM)) {
2323 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2324 x86_pmu.intel_cap.capabilities = capabilities;
2329 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2332 * Install the hw-cache-events table:
2334 switch (boot_cpu_data.x86_model) {
2335 case 14: /* 65 nm core solo/duo, "Yonah" */
2336 pr_cont("Core events, ");
2339 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2340 x86_add_quirk(intel_clovertown_quirk);
2341 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2342 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2343 case 29: /* six-core 45 nm xeon "Dunnington" */
2344 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2345 sizeof(hw_cache_event_ids));
2347 intel_pmu_lbr_init_core();
2349 x86_pmu.event_constraints = intel_core2_event_constraints;
2350 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
2351 pr_cont("Core2 events, ");
2354 case 26: /* 45 nm nehalem, "Bloomfield" */
2355 case 30: /* 45 nm nehalem, "Lynnfield" */
2356 case 46: /* 45 nm nehalem-ex, "Beckton" */
2357 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2358 sizeof(hw_cache_event_ids));
2359 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2360 sizeof(hw_cache_extra_regs));
2362 intel_pmu_lbr_init_nhm();
2364 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2365 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
2366 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2367 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2369 x86_pmu.cpu_events = nhm_events_attrs;
2371 /* UOPS_ISSUED.STALLED_CYCLES */
2372 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2373 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2374 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2375 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2376 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2378 x86_add_quirk(intel_nehalem_quirk);
2380 pr_cont("Nehalem events, ");
2384 case 38: /* Lincroft */
2385 case 39: /* Penwell */
2386 case 53: /* Cloverview */
2387 case 54: /* Cedarview */
2388 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2389 sizeof(hw_cache_event_ids));
2391 intel_pmu_lbr_init_atom();
2393 x86_pmu.event_constraints = intel_gen_event_constraints;
2394 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
2395 pr_cont("Atom events, ");
2398 case 55: /* Atom 22nm "Silvermont" */
2399 case 77: /* Avoton "Silvermont" */
2400 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2401 sizeof(hw_cache_event_ids));
2402 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2403 sizeof(hw_cache_extra_regs));
2405 intel_pmu_lbr_init_atom();
2407 x86_pmu.event_constraints = intel_slm_event_constraints;
2408 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2409 x86_pmu.extra_regs = intel_slm_extra_regs;
2410 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2411 pr_cont("Silvermont events, ");
2414 case 37: /* 32 nm nehalem, "Clarkdale" */
2415 case 44: /* 32 nm nehalem, "Gulftown" */
2416 case 47: /* 32 nm Xeon E7 */
2417 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2418 sizeof(hw_cache_event_ids));
2419 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2420 sizeof(hw_cache_extra_regs));
2422 intel_pmu_lbr_init_nhm();
2424 x86_pmu.event_constraints = intel_westmere_event_constraints;
2425 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2426 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
2427 x86_pmu.extra_regs = intel_westmere_extra_regs;
2428 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2430 x86_pmu.cpu_events = nhm_events_attrs;
2432 /* UOPS_ISSUED.STALLED_CYCLES */
2433 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2434 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2435 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2436 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2437 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2439 pr_cont("Westmere events, ");
2442 case 42: /* SandyBridge */
2443 case 45: /* SandyBridge, "Romely-EP" */
2444 x86_add_quirk(intel_sandybridge_quirk);
2445 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2446 sizeof(hw_cache_event_ids));
2447 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2448 sizeof(hw_cache_extra_regs));
2450 intel_pmu_lbr_init_snb();
2452 x86_pmu.event_constraints = intel_snb_event_constraints;
2453 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2454 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2455 if (boot_cpu_data.x86_model == 45)
2456 x86_pmu.extra_regs = intel_snbep_extra_regs;
2458 x86_pmu.extra_regs = intel_snb_extra_regs;
2459 /* all extra regs are per-cpu when HT is on */
2460 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2461 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2463 x86_pmu.cpu_events = snb_events_attrs;
2465 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2466 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2467 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2468 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2469 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2470 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
2472 pr_cont("SandyBridge events, ");
2474 case 58: /* IvyBridge */
2475 case 62: /* IvyBridge EP */
2476 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2477 sizeof(hw_cache_event_ids));
2478 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2479 sizeof(hw_cache_extra_regs));
2481 intel_pmu_lbr_init_snb();
2483 x86_pmu.event_constraints = intel_ivb_event_constraints;
2484 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2485 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2486 if (boot_cpu_data.x86_model == 62)
2487 x86_pmu.extra_regs = intel_snbep_extra_regs;
2489 x86_pmu.extra_regs = intel_snb_extra_regs;
2490 /* all extra regs are per-cpu when HT is on */
2491 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2492 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2494 x86_pmu.cpu_events = snb_events_attrs;
2496 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2497 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2498 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2500 pr_cont("IvyBridge events, ");
2504 case 60: /* Haswell Client */
2509 x86_pmu.late_ack = true;
2510 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2511 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2513 intel_pmu_lbr_init_snb();
2515 x86_pmu.event_constraints = intel_hsw_event_constraints;
2516 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2517 x86_pmu.extra_regs = intel_snb_extra_regs;
2518 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2519 /* all extra regs are per-cpu when HT is on */
2520 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2521 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2523 x86_pmu.hw_config = hsw_hw_config;
2524 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2525 x86_pmu.cpu_events = hsw_events_attrs;
2526 x86_pmu.lbr_double_abort = true;
2527 pr_cont("Haswell events, ");
2531 switch (x86_pmu.version) {
2533 x86_pmu.event_constraints = intel_v1_event_constraints;
2534 pr_cont("generic architected perfmon v1, ");
2538 * default constraints for v2 and up
2540 x86_pmu.event_constraints = intel_gen_event_constraints;
2541 pr_cont("generic architected perfmon, ");
2546 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2547 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2548 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2549 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2551 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2553 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2554 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2555 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2556 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2559 x86_pmu.intel_ctrl |=
2560 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2562 if (x86_pmu.event_constraints) {
2564 * event on fixed counter2 (REF_CYCLES) only works on this
2565 * counter, so do not extend mask to generic counters
2567 for_each_event_constraint(c, x86_pmu.event_constraints) {
2568 if (c->cmask != FIXED_EVENT_FLAGS
2569 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2573 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2574 c->weight += x86_pmu.num_counters;
2578 /* Support full width counters using alternative MSR range */
2579 if (x86_pmu.intel_cap.full_width_write) {
2580 x86_pmu.max_period = x86_pmu.cntval_mask;
2581 x86_pmu.perfctr = MSR_IA32_PMC0;
2582 pr_cont("full-width counters, ");