1 #ifdef CONFIG_CPU_SUP_INTEL
4 * Intel PerfMon, used on Core and later.
6 static const u64 intel_perfmon_event_map[] =
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
17 static struct event_constraint intel_core_event_constraints[] =
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
28 static struct event_constraint intel_core2_event_constraints[] =
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
51 static struct event_constraint intel_nehalem_event_constraints[] =
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
67 static struct event_constraint intel_westmere_event_constraints[] =
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
75 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
79 static struct event_constraint intel_snb_event_constraints[] =
81 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
82 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
83 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
84 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
85 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
86 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
87 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
88 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
92 static struct event_constraint intel_gen_event_constraints[] =
94 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
95 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
96 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
100 static u64 intel_pmu_event_map(int hw_event)
102 return intel_perfmon_event_map[hw_event];
105 static __initconst const u64 snb_hw_cache_event_ids
106 [PERF_COUNT_HW_CACHE_MAX]
107 [PERF_COUNT_HW_CACHE_OP_MAX]
108 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
112 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
113 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
116 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
117 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
119 [ C(OP_PREFETCH) ] = {
120 [ C(RESULT_ACCESS) ] = 0x0,
121 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
126 [ C(RESULT_ACCESS) ] = 0x0,
127 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
130 [ C(RESULT_ACCESS) ] = -1,
131 [ C(RESULT_MISS) ] = -1,
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0x0,
135 [ C(RESULT_MISS) ] = 0x0,
140 * TBD: Need Off-core Response Performance Monitoring support
143 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
144 [ C(RESULT_ACCESS) ] = 0x01b7,
145 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
146 [ C(RESULT_MISS) ] = 0x01bb,
149 /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
150 [ C(RESULT_ACCESS) ] = 0x01b7,
151 /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
152 [ C(RESULT_MISS) ] = 0x01bb,
154 [ C(OP_PREFETCH) ] = {
155 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
156 [ C(RESULT_ACCESS) ] = 0x01b7,
157 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
158 [ C(RESULT_MISS) ] = 0x01bb,
163 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
164 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
167 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
168 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
170 [ C(OP_PREFETCH) ] = {
171 [ C(RESULT_ACCESS) ] = 0x0,
172 [ C(RESULT_MISS) ] = 0x0,
177 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
178 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
181 [ C(RESULT_ACCESS) ] = -1,
182 [ C(RESULT_MISS) ] = -1,
184 [ C(OP_PREFETCH) ] = {
185 [ C(RESULT_ACCESS) ] = -1,
186 [ C(RESULT_MISS) ] = -1,
191 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
192 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
195 [ C(RESULT_ACCESS) ] = -1,
196 [ C(RESULT_MISS) ] = -1,
198 [ C(OP_PREFETCH) ] = {
199 [ C(RESULT_ACCESS) ] = -1,
200 [ C(RESULT_MISS) ] = -1,
205 static __initconst const u64 westmere_hw_cache_event_ids
206 [PERF_COUNT_HW_CACHE_MAX]
207 [PERF_COUNT_HW_CACHE_OP_MAX]
208 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
212 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
213 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
216 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
217 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
219 [ C(OP_PREFETCH) ] = {
220 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
221 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
226 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
227 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
230 [ C(RESULT_ACCESS) ] = -1,
231 [ C(RESULT_MISS) ] = -1,
233 [ C(OP_PREFETCH) ] = {
234 [ C(RESULT_ACCESS) ] = 0x0,
235 [ C(RESULT_MISS) ] = 0x0,
240 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
241 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
244 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
245 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
247 [ C(OP_PREFETCH) ] = {
248 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
249 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
254 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
255 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
258 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
259 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
261 [ C(OP_PREFETCH) ] = {
262 [ C(RESULT_ACCESS) ] = 0x0,
263 [ C(RESULT_MISS) ] = 0x0,
268 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
269 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
272 [ C(RESULT_ACCESS) ] = -1,
273 [ C(RESULT_MISS) ] = -1,
275 [ C(OP_PREFETCH) ] = {
276 [ C(RESULT_ACCESS) ] = -1,
277 [ C(RESULT_MISS) ] = -1,
282 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
283 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
286 [ C(RESULT_ACCESS) ] = -1,
287 [ C(RESULT_MISS) ] = -1,
289 [ C(OP_PREFETCH) ] = {
290 [ C(RESULT_ACCESS) ] = -1,
291 [ C(RESULT_MISS) ] = -1,
296 static __initconst const u64 nehalem_hw_cache_event_ids
297 [PERF_COUNT_HW_CACHE_MAX]
298 [PERF_COUNT_HW_CACHE_OP_MAX]
299 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
303 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
304 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
307 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
308 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
310 [ C(OP_PREFETCH) ] = {
311 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
312 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
317 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
318 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
321 [ C(RESULT_ACCESS) ] = -1,
322 [ C(RESULT_MISS) ] = -1,
324 [ C(OP_PREFETCH) ] = {
325 [ C(RESULT_ACCESS) ] = 0x0,
326 [ C(RESULT_MISS) ] = 0x0,
331 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
332 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
335 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
336 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
338 [ C(OP_PREFETCH) ] = {
339 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
340 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
345 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
346 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
349 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
350 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
352 [ C(OP_PREFETCH) ] = {
353 [ C(RESULT_ACCESS) ] = 0x0,
354 [ C(RESULT_MISS) ] = 0x0,
359 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
360 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
363 [ C(RESULT_ACCESS) ] = -1,
364 [ C(RESULT_MISS) ] = -1,
366 [ C(OP_PREFETCH) ] = {
367 [ C(RESULT_ACCESS) ] = -1,
368 [ C(RESULT_MISS) ] = -1,
373 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
374 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
377 [ C(RESULT_ACCESS) ] = -1,
378 [ C(RESULT_MISS) ] = -1,
380 [ C(OP_PREFETCH) ] = {
381 [ C(RESULT_ACCESS) ] = -1,
382 [ C(RESULT_MISS) ] = -1,
387 static __initconst const u64 core2_hw_cache_event_ids
388 [PERF_COUNT_HW_CACHE_MAX]
389 [PERF_COUNT_HW_CACHE_OP_MAX]
390 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
394 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
395 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
398 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
399 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
401 [ C(OP_PREFETCH) ] = {
402 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
403 [ C(RESULT_MISS) ] = 0,
408 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
409 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
412 [ C(RESULT_ACCESS) ] = -1,
413 [ C(RESULT_MISS) ] = -1,
415 [ C(OP_PREFETCH) ] = {
416 [ C(RESULT_ACCESS) ] = 0,
417 [ C(RESULT_MISS) ] = 0,
422 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
423 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
426 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
427 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
429 [ C(OP_PREFETCH) ] = {
430 [ C(RESULT_ACCESS) ] = 0,
431 [ C(RESULT_MISS) ] = 0,
436 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
437 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
440 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
441 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
443 [ C(OP_PREFETCH) ] = {
444 [ C(RESULT_ACCESS) ] = 0,
445 [ C(RESULT_MISS) ] = 0,
450 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
451 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
454 [ C(RESULT_ACCESS) ] = -1,
455 [ C(RESULT_MISS) ] = -1,
457 [ C(OP_PREFETCH) ] = {
458 [ C(RESULT_ACCESS) ] = -1,
459 [ C(RESULT_MISS) ] = -1,
464 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
465 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
468 [ C(RESULT_ACCESS) ] = -1,
469 [ C(RESULT_MISS) ] = -1,
471 [ C(OP_PREFETCH) ] = {
472 [ C(RESULT_ACCESS) ] = -1,
473 [ C(RESULT_MISS) ] = -1,
478 static __initconst const u64 atom_hw_cache_event_ids
479 [PERF_COUNT_HW_CACHE_MAX]
480 [PERF_COUNT_HW_CACHE_OP_MAX]
481 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
485 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
486 [ C(RESULT_MISS) ] = 0,
489 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
490 [ C(RESULT_MISS) ] = 0,
492 [ C(OP_PREFETCH) ] = {
493 [ C(RESULT_ACCESS) ] = 0x0,
494 [ C(RESULT_MISS) ] = 0,
499 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
500 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
503 [ C(RESULT_ACCESS) ] = -1,
504 [ C(RESULT_MISS) ] = -1,
506 [ C(OP_PREFETCH) ] = {
507 [ C(RESULT_ACCESS) ] = 0,
508 [ C(RESULT_MISS) ] = 0,
513 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
514 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
517 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
518 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
520 [ C(OP_PREFETCH) ] = {
521 [ C(RESULT_ACCESS) ] = 0,
522 [ C(RESULT_MISS) ] = 0,
527 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
528 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
531 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
532 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
534 [ C(OP_PREFETCH) ] = {
535 [ C(RESULT_ACCESS) ] = 0,
536 [ C(RESULT_MISS) ] = 0,
541 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
542 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
545 [ C(RESULT_ACCESS) ] = -1,
546 [ C(RESULT_MISS) ] = -1,
548 [ C(OP_PREFETCH) ] = {
549 [ C(RESULT_ACCESS) ] = -1,
550 [ C(RESULT_MISS) ] = -1,
555 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
556 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
559 [ C(RESULT_ACCESS) ] = -1,
560 [ C(RESULT_MISS) ] = -1,
562 [ C(OP_PREFETCH) ] = {
563 [ C(RESULT_ACCESS) ] = -1,
564 [ C(RESULT_MISS) ] = -1,
569 static void intel_pmu_disable_all(void)
571 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
573 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
575 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
576 intel_pmu_disable_bts();
578 intel_pmu_pebs_disable_all();
579 intel_pmu_lbr_disable_all();
582 static void intel_pmu_enable_all(int added)
584 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
586 intel_pmu_pebs_enable_all();
587 intel_pmu_lbr_enable_all();
588 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
590 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
591 struct perf_event *event =
592 cpuc->events[X86_PMC_IDX_FIXED_BTS];
594 if (WARN_ON_ONCE(!event))
597 intel_pmu_enable_bts(event->hw.config);
603 * Intel Errata AAK100 (model 26)
604 * Intel Errata AAP53 (model 30)
605 * Intel Errata BD53 (model 44)
607 * The official story:
608 * These chips need to be 'reset' when adding counters by programming the
609 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
610 * in sequence on the same PMC or on different PMCs.
612 * In practise it appears some of these events do in fact count, and
613 * we need to programm all 4 events.
615 static void intel_pmu_nhm_workaround(void)
617 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
618 static const unsigned long nhm_magic[4] = {
624 struct perf_event *event;
628 * The Errata requires below steps:
629 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
630 * 2) Configure 4 PERFEVTSELx with the magic events and clear
631 * the corresponding PMCx;
632 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
633 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
634 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
638 * The real steps we choose are a little different from above.
639 * A) To reduce MSR operations, we don't run step 1) as they
640 * are already cleared before this function is called;
641 * B) Call x86_perf_event_update to save PMCx before configuring
642 * PERFEVTSELx with magic number;
643 * C) With step 5), we do clear only when the PERFEVTSELx is
644 * not used currently.
645 * D) Call x86_perf_event_set_period to restore PMCx;
648 /* We always operate 4 pairs of PERF Counters */
649 for (i = 0; i < 4; i++) {
650 event = cpuc->events[i];
652 x86_perf_event_update(event);
655 for (i = 0; i < 4; i++) {
656 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
657 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
660 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
661 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
663 for (i = 0; i < 4; i++) {
664 event = cpuc->events[i];
667 x86_perf_event_set_period(event);
668 __x86_pmu_enable_event(&event->hw,
669 ARCH_PERFMON_EVENTSEL_ENABLE);
671 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
675 static void intel_pmu_nhm_enable_all(int added)
678 intel_pmu_nhm_workaround();
679 intel_pmu_enable_all(added);
682 static inline u64 intel_pmu_get_status(void)
686 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
691 static inline void intel_pmu_ack_status(u64 ack)
693 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
696 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
698 int idx = hwc->idx - X86_PMC_IDX_FIXED;
701 mask = 0xfULL << (idx * 4);
703 rdmsrl(hwc->config_base, ctrl_val);
705 wrmsrl(hwc->config_base, ctrl_val);
708 static void intel_pmu_disable_event(struct perf_event *event)
710 struct hw_perf_event *hwc = &event->hw;
712 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
713 intel_pmu_disable_bts();
714 intel_pmu_drain_bts_buffer();
718 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
719 intel_pmu_disable_fixed(hwc);
723 x86_pmu_disable_event(event);
725 if (unlikely(event->attr.precise_ip))
726 intel_pmu_pebs_disable(event);
729 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
731 int idx = hwc->idx - X86_PMC_IDX_FIXED;
732 u64 ctrl_val, bits, mask;
735 * Enable IRQ generation (0x8),
736 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
740 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
742 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
746 * ANY bit is supported in v3 and up
748 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
752 mask = 0xfULL << (idx * 4);
754 rdmsrl(hwc->config_base, ctrl_val);
757 wrmsrl(hwc->config_base, ctrl_val);
760 static void intel_pmu_enable_event(struct perf_event *event)
762 struct hw_perf_event *hwc = &event->hw;
764 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
765 if (!__this_cpu_read(cpu_hw_events.enabled))
768 intel_pmu_enable_bts(hwc->config);
772 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
773 intel_pmu_enable_fixed(hwc);
777 if (unlikely(event->attr.precise_ip))
778 intel_pmu_pebs_enable(event);
780 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
784 * Save and restart an expired event. Called by NMI contexts,
785 * so it has to be careful about preempting normal event ops:
787 static int intel_pmu_save_and_restart(struct perf_event *event)
789 x86_perf_event_update(event);
790 return x86_perf_event_set_period(event);
793 static void intel_pmu_reset(void)
795 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
799 if (!x86_pmu.num_counters)
802 local_irq_save(flags);
804 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
806 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
807 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
808 checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
810 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
811 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
814 ds->bts_index = ds->bts_buffer_base;
816 local_irq_restore(flags);
820 * This handler is triggered by the local APIC, so the APIC IRQ handling
823 static int intel_pmu_handle_irq(struct pt_regs *regs)
825 struct perf_sample_data data;
826 struct cpu_hw_events *cpuc;
831 perf_sample_data_init(&data, 0);
833 cpuc = &__get_cpu_var(cpu_hw_events);
835 intel_pmu_disable_all();
836 handled = intel_pmu_drain_bts_buffer();
837 status = intel_pmu_get_status();
839 intel_pmu_enable_all(0);
845 intel_pmu_ack_status(status);
847 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
848 perf_event_print_debug();
853 inc_irq_stat(apic_perf_irqs);
855 intel_pmu_lbr_read();
858 * PEBS overflow sets bit 62 in the global status register
860 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
862 x86_pmu.drain_pebs(regs);
865 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
866 struct perf_event *event = cpuc->events[bit];
870 if (!test_bit(bit, cpuc->active_mask))
873 if (!intel_pmu_save_and_restart(event))
876 data.period = event->hw.last_period;
878 if (perf_event_overflow(event, 1, &data, regs))
879 x86_pmu_stop(event, 0);
883 * Repeat if there is more work to be done:
885 status = intel_pmu_get_status();
890 intel_pmu_enable_all(0);
894 static struct event_constraint *
895 intel_bts_constraints(struct perf_event *event)
897 struct hw_perf_event *hwc = &event->hw;
898 unsigned int hw_event, bts_event;
900 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
901 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
903 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
904 return &bts_constraint;
909 static struct event_constraint *
910 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
912 struct event_constraint *c;
914 c = intel_bts_constraints(event);
918 c = intel_pebs_constraints(event);
922 return x86_get_event_constraints(cpuc, event);
925 static int intel_pmu_hw_config(struct perf_event *event)
927 int ret = x86_pmu_hw_config(event);
932 if (event->attr.precise_ip &&
933 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
935 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
936 * (0x003c) so that we can use it with PEBS.
938 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
939 * PEBS capable. However we can use INST_RETIRED.ANY_P
940 * (0x00c0), which is a PEBS capable event, to get the same
943 * INST_RETIRED.ANY_P counts the number of cycles that retires
944 * CNTMASK instructions. By setting CNTMASK to a value (16)
945 * larger than the maximum number of instructions that can be
946 * retired per cycle (4) and then inverting the condition, we
947 * count all cycles that retire 16 or less instructions, which
950 * Thereby we gain a PEBS capable cycle counter.
952 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
954 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
955 event->hw.config = alt_config;
958 if (event->attr.type != PERF_TYPE_RAW)
961 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
964 if (x86_pmu.version < 3)
967 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
970 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
975 static __initconst const struct x86_pmu core_pmu = {
977 .handle_irq = x86_pmu_handle_irq,
978 .disable_all = x86_pmu_disable_all,
979 .enable_all = x86_pmu_enable_all,
980 .enable = x86_pmu_enable_event,
981 .disable = x86_pmu_disable_event,
982 .hw_config = x86_pmu_hw_config,
983 .schedule_events = x86_schedule_events,
984 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
985 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
986 .event_map = intel_pmu_event_map,
987 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
990 * Intel PMCs cannot be accessed sanely above 32 bit width,
991 * so we install an artificial 1<<31 period regardless of
992 * the generic event period:
994 .max_period = (1ULL << 31) - 1,
995 .get_event_constraints = intel_get_event_constraints,
996 .event_constraints = intel_core_event_constraints,
999 static void intel_pmu_cpu_starting(int cpu)
1001 init_debug_store_on_cpu(cpu);
1003 * Deal with CPUs that don't clear their LBRs on power-up.
1005 intel_pmu_lbr_reset();
1008 static void intel_pmu_cpu_dying(int cpu)
1010 fini_debug_store_on_cpu(cpu);
1013 static __initconst const struct x86_pmu intel_pmu = {
1015 .handle_irq = intel_pmu_handle_irq,
1016 .disable_all = intel_pmu_disable_all,
1017 .enable_all = intel_pmu_enable_all,
1018 .enable = intel_pmu_enable_event,
1019 .disable = intel_pmu_disable_event,
1020 .hw_config = intel_pmu_hw_config,
1021 .schedule_events = x86_schedule_events,
1022 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1023 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1024 .event_map = intel_pmu_event_map,
1025 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1028 * Intel PMCs cannot be accessed sanely above 32 bit width,
1029 * so we install an artificial 1<<31 period regardless of
1030 * the generic event period:
1032 .max_period = (1ULL << 31) - 1,
1033 .get_event_constraints = intel_get_event_constraints,
1035 .cpu_starting = intel_pmu_cpu_starting,
1036 .cpu_dying = intel_pmu_cpu_dying,
1039 static void intel_clovertown_quirks(void)
1042 * PEBS is unreliable due to:
1044 * AJ67 - PEBS may experience CPL leaks
1045 * AJ68 - PEBS PMI may be delayed by one event
1046 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1047 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1049 * AJ67 could be worked around by restricting the OS/USR flags.
1050 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1052 * AJ106 could possibly be worked around by not allowing LBR
1053 * usage from PEBS, including the fixup.
1054 * AJ68 could possibly be worked around by always programming
1055 * a pebs_event_reset[0] value and coping with the lost events.
1057 * But taken together it might just make sense to not enable PEBS on
1060 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1062 x86_pmu.pebs_constraints = NULL;
1065 static __init int intel_pmu_init(void)
1067 union cpuid10_edx edx;
1068 union cpuid10_eax eax;
1069 unsigned int unused;
1073 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1074 switch (boot_cpu_data.x86) {
1076 return p6_pmu_init();
1078 return p4_pmu_init();
1084 * Check whether the Architectural PerfMon supports
1085 * Branch Misses Retired hw_event or not.
1087 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1088 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
1091 version = eax.split.version_id;
1095 x86_pmu = intel_pmu;
1097 x86_pmu.version = version;
1098 x86_pmu.num_counters = eax.split.num_counters;
1099 x86_pmu.cntval_bits = eax.split.bit_width;
1100 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
1103 * Quirk: v2 perfmon does not report fixed-purpose events, so
1104 * assume at least 3 events:
1107 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1110 * v2 and above have a perf capabilities MSR
1115 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1116 x86_pmu.intel_cap.capabilities = capabilities;
1122 * Install the hw-cache-events table:
1124 switch (boot_cpu_data.x86_model) {
1125 case 14: /* 65 nm core solo/duo, "Yonah" */
1126 pr_cont("Core events, ");
1129 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1130 x86_pmu.quirks = intel_clovertown_quirks;
1131 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1132 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1133 case 29: /* six-core 45 nm xeon "Dunnington" */
1134 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1135 sizeof(hw_cache_event_ids));
1137 intel_pmu_lbr_init_core();
1139 x86_pmu.event_constraints = intel_core2_event_constraints;
1140 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
1141 pr_cont("Core2 events, ");
1144 case 26: /* 45 nm nehalem, "Bloomfield" */
1145 case 30: /* 45 nm nehalem, "Lynnfield" */
1146 case 46: /* 45 nm nehalem-ex, "Beckton" */
1147 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1148 sizeof(hw_cache_event_ids));
1150 intel_pmu_lbr_init_nhm();
1152 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1153 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1154 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1155 pr_cont("Nehalem events, ");
1159 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1160 sizeof(hw_cache_event_ids));
1162 intel_pmu_lbr_init_atom();
1164 x86_pmu.event_constraints = intel_gen_event_constraints;
1165 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
1166 pr_cont("Atom events, ");
1169 case 37: /* 32 nm nehalem, "Clarkdale" */
1170 case 44: /* 32 nm nehalem, "Gulftown" */
1171 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1172 sizeof(hw_cache_event_ids));
1174 intel_pmu_lbr_init_nhm();
1176 x86_pmu.event_constraints = intel_westmere_event_constraints;
1177 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1178 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1179 pr_cont("Westmere events, ");
1182 case 42: /* SandyBridge */
1183 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1184 sizeof(hw_cache_event_ids));
1186 intel_pmu_lbr_init_nhm();
1188 x86_pmu.event_constraints = intel_snb_event_constraints;
1189 x86_pmu.pebs_constraints = intel_snb_pebs_events;
1190 pr_cont("SandyBridge events, ");
1195 * default constraints for v2 and up
1197 x86_pmu.event_constraints = intel_gen_event_constraints;
1198 pr_cont("generic architected perfmon, ");
1203 #else /* CONFIG_CPU_SUP_INTEL */
1205 static int intel_pmu_init(void)
1210 #endif /* CONFIG_CPU_SUP_INTEL */