1 #ifdef CONFIG_CPU_SUP_INTEL
3 /* The maximal number of PEBS events: */
4 #define MAX_PEBS_EVENTS 4
6 /* The size of a BTS record in bytes: */
7 #define BTS_RECORD_SIZE 24
9 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
10 #define PEBS_BUFFER_SIZE PAGE_SIZE
13 * pebs_record_32 for p4 and core not supported
15 struct pebs_record_32 {
23 struct pebs_record_core {
28 u64 r12, r13, r14, r15;
31 struct pebs_record_nhm {
36 u64 r12, r13, r14, r15;
37 u64 status, dla, dse, lat;
41 * A debug store configuration.
43 * We only support architectures that use 64bit fields.
48 u64 bts_absolute_maximum;
49 u64 bts_interrupt_threshold;
52 u64 pebs_absolute_maximum;
53 u64 pebs_interrupt_threshold;
54 u64 pebs_event_reset[MAX_PEBS_EVENTS];
57 static void init_debug_store_on_cpu(int cpu)
59 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
64 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
65 (u32)((u64)(unsigned long)ds),
66 (u32)((u64)(unsigned long)ds >> 32));
69 static void fini_debug_store_on_cpu(int cpu)
71 if (!per_cpu(cpu_hw_events, cpu).ds)
74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
77 static int alloc_pebs_buffer(int cpu)
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80 int max, thresh = 1; /* always use a single PEBS record */
86 buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
87 if (unlikely(!buffer))
90 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
92 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
93 ds->pebs_index = ds->pebs_buffer_base;
94 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
95 max * x86_pmu.pebs_record_size;
97 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
98 thresh * x86_pmu.pebs_record_size;
103 static void release_pebs_buffer(int cpu)
105 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
107 if (!ds || !x86_pmu.pebs)
110 kfree((void *)(unsigned long)ds->pebs_buffer_base);
111 ds->pebs_buffer_base = 0;
114 static int alloc_bts_buffer(int cpu)
116 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
123 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
124 if (unlikely(!buffer))
127 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
130 ds->bts_buffer_base = (u64)(unsigned long)buffer;
131 ds->bts_index = ds->bts_buffer_base;
132 ds->bts_absolute_maximum = ds->bts_buffer_base +
133 max * BTS_RECORD_SIZE;
134 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
135 thresh * BTS_RECORD_SIZE;
140 static void release_bts_buffer(int cpu)
142 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
144 if (!ds || !x86_pmu.bts)
147 kfree((void *)(unsigned long)ds->bts_buffer_base);
148 ds->bts_buffer_base = 0;
151 static int alloc_ds_buffer(int cpu)
153 struct debug_store *ds;
155 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
159 per_cpu(cpu_hw_events, cpu).ds = ds;
164 static void release_ds_buffer(int cpu)
166 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
171 per_cpu(cpu_hw_events, cpu).ds = NULL;
175 static void release_ds_buffers(void)
179 if (!x86_pmu.bts && !x86_pmu.pebs)
183 for_each_online_cpu(cpu)
184 fini_debug_store_on_cpu(cpu);
186 for_each_possible_cpu(cpu) {
187 release_pebs_buffer(cpu);
188 release_bts_buffer(cpu);
189 release_ds_buffer(cpu);
194 static int reserve_ds_buffers(void)
196 int bts_err = 0, pebs_err = 0;
199 x86_pmu.bts_active = 0;
200 x86_pmu.pebs_active = 0;
202 if (!x86_pmu.bts && !x86_pmu.pebs)
213 for_each_possible_cpu(cpu) {
214 if (alloc_ds_buffer(cpu)) {
219 if (!bts_err && alloc_bts_buffer(cpu))
222 if (!pebs_err && alloc_pebs_buffer(cpu))
225 if (bts_err && pebs_err)
230 for_each_possible_cpu(cpu)
231 release_bts_buffer(cpu);
235 for_each_possible_cpu(cpu)
236 release_pebs_buffer(cpu);
239 if (bts_err && pebs_err) {
240 for_each_possible_cpu(cpu)
241 release_ds_buffer(cpu);
243 if (x86_pmu.bts && !bts_err)
244 x86_pmu.bts_active = 1;
246 if (x86_pmu.pebs && !pebs_err)
247 x86_pmu.pebs_active = 1;
249 for_each_online_cpu(cpu)
250 init_debug_store_on_cpu(cpu);
262 static struct event_constraint bts_constraint =
263 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265 static void intel_pmu_enable_bts(u64 config)
267 unsigned long debugctlmsr;
269 debugctlmsr = get_debugctlmsr();
271 debugctlmsr |= DEBUGCTLMSR_TR;
272 debugctlmsr |= DEBUGCTLMSR_BTS;
273 debugctlmsr |= DEBUGCTLMSR_BTINT;
275 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
276 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
278 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
279 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
281 update_debugctlmsr(debugctlmsr);
284 static void intel_pmu_disable_bts(void)
286 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
287 unsigned long debugctlmsr;
292 debugctlmsr = get_debugctlmsr();
295 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
296 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
298 update_debugctlmsr(debugctlmsr);
301 static int intel_pmu_drain_bts_buffer(void)
303 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
304 struct debug_store *ds = cpuc->ds;
310 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
311 struct bts_record *at, *top;
312 struct perf_output_handle handle;
313 struct perf_event_header header;
314 struct perf_sample_data data;
320 if (!x86_pmu.bts_active)
323 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
324 top = (struct bts_record *)(unsigned long)ds->bts_index;
329 ds->bts_index = ds->bts_buffer_base;
331 perf_sample_data_init(&data, 0);
332 data.period = event->hw.last_period;
336 * Prepare a generic sample, i.e. fill in the invariant fields.
337 * We will overwrite the from and to address before we output
340 perf_prepare_sample(&header, &data, event, ®s);
342 if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
345 for (; at < top; at++) {
349 perf_output_sample(&handle, &header, &data, event);
352 perf_output_end(&handle);
354 /* There's new data available. */
355 event->hw.interrupts++;
356 event->pending_kill = POLL_IN;
364 static struct event_constraint intel_core_pebs_events[] = {
365 PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
366 PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367 PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
368 PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
369 PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
370 PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
371 PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
372 PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
373 PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
377 static struct event_constraint intel_nehalem_pebs_events[] = {
378 PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
379 PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
380 PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
381 PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
382 PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
383 PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
384 PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
385 PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
386 PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
390 static struct event_constraint *
391 intel_pebs_constraints(struct perf_event *event)
393 struct event_constraint *c;
395 if (!event->attr.precise_ip)
398 if (x86_pmu.pebs_constraints) {
399 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
400 if ((event->hw.config & c->cmask) == c->code)
405 return &emptyconstraint;
408 static void intel_pmu_pebs_enable(struct perf_event *event)
410 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
411 struct hw_perf_event *hwc = &event->hw;
413 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
415 cpuc->pebs_enabled |= 1ULL << hwc->idx;
416 WARN_ON_ONCE(cpuc->enabled);
418 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
419 intel_pmu_lbr_enable(event);
422 static void intel_pmu_pebs_disable(struct perf_event *event)
424 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
425 struct hw_perf_event *hwc = &event->hw;
427 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
429 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
431 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
433 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
434 intel_pmu_lbr_disable(event);
437 static void intel_pmu_pebs_enable_all(void)
439 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
441 if (cpuc->pebs_enabled)
442 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
445 static void intel_pmu_pebs_disable_all(void)
447 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
449 if (cpuc->pebs_enabled)
450 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
453 #include <asm/insn.h>
455 static inline bool kernel_ip(unsigned long ip)
458 return ip > PAGE_OFFSET;
464 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
466 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
467 unsigned long from = cpuc->lbr_entries[0].from;
468 unsigned long old_to, to = cpuc->lbr_entries[0].to;
469 unsigned long ip = regs->ip;
472 * We don't need to fixup if the PEBS assist is fault like
474 if (!x86_pmu.intel_cap.pebs_trap)
478 * No LBR entry, no basic block, no rewinding
480 if (!cpuc->lbr_stack.nr || !from || !to)
484 * Basic blocks should never cross user/kernel boundaries
486 if (kernel_ip(ip) != kernel_ip(to))
490 * unsigned math, either ip is before the start (impossible) or
491 * the basic block is larger than 1 page (sanity)
493 if ((ip - to) > PAGE_SIZE)
497 * We sampled a branch insn, rewind using the LBR stack
506 u8 buf[MAX_INSN_SIZE];
510 if (!kernel_ip(ip)) {
511 int bytes, size = MAX_INSN_SIZE;
513 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
521 kernel_insn_init(&insn, kaddr);
522 insn_get_length(&insn);
532 * Even though we decoded the basic block, the instruction stream
533 * never matched the given IP, either the TO or the IP got corrupted.
538 static int intel_pmu_save_and_restart(struct perf_event *event);
540 static void __intel_pmu_pebs_event(struct perf_event *event,
541 struct pt_regs *iregs, void *__pebs)
544 * We cast to pebs_record_core since that is a subset of
545 * both formats and we don't use the other fields in this
548 struct pebs_record_core *pebs = __pebs;
549 struct perf_sample_data data;
552 if (!intel_pmu_save_and_restart(event))
555 perf_sample_data_init(&data, 0);
556 data.period = event->hw.last_period;
559 * We use the interrupt regs as a base because the PEBS record
560 * does not contain a full regs set, specifically it seems to
561 * lack segment descriptors, which get used by things like
564 * In the simple case fix up only the IP and BP,SP regs, for
565 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
566 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
573 if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
574 regs.flags |= PERF_EFLAGS_EXACT;
576 regs.flags &= ~PERF_EFLAGS_EXACT;
578 if (perf_event_overflow(event, 1, &data, ®s))
579 x86_pmu_stop(event, 0);
582 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
584 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
585 struct debug_store *ds = cpuc->ds;
586 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
587 struct pebs_record_core *at, *top;
590 if (!x86_pmu.pebs_active)
593 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
594 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
597 * Whatever else happens, drain the thing
599 ds->pebs_index = ds->pebs_buffer_base;
601 if (!test_bit(0, cpuc->active_mask))
604 WARN_ON_ONCE(!event);
606 if (!event->attr.precise_ip)
614 * Should not happen, we program the threshold at 1 and do not
620 __intel_pmu_pebs_event(event, iregs, at);
623 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
625 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
626 struct debug_store *ds = cpuc->ds;
627 struct pebs_record_nhm *at, *top;
628 struct perf_event *event = NULL;
632 if (!x86_pmu.pebs_active)
635 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
636 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
638 ds->pebs_index = ds->pebs_buffer_base;
645 * Should not happen, we program the threshold at 1 and do not
648 WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
650 for ( ; at < top; at++) {
651 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
652 event = cpuc->events[bit];
653 if (!test_bit(bit, cpuc->active_mask))
656 WARN_ON_ONCE(!event);
658 if (!event->attr.precise_ip)
661 if (__test_and_set_bit(bit, (unsigned long *)&status))
667 if (!event || bit >= MAX_PEBS_EVENTS)
670 __intel_pmu_pebs_event(event, iregs, at);
675 * BTS, PEBS probe and setup
678 static void intel_ds_init(void)
681 * No support for 32bit formats
683 if (!boot_cpu_has(X86_FEATURE_DTES64))
686 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
687 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
689 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
690 int format = x86_pmu.intel_cap.pebs_format;
694 printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
695 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
696 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
697 x86_pmu.pebs_constraints = intel_core_pebs_events;
701 printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
702 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
703 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
704 x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
708 printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
715 #else /* CONFIG_CPU_SUP_INTEL */
717 static int reserve_ds_buffers(void)
722 static void release_ds_buffers(void)
726 #endif /* CONFIG_CPU_SUP_INTEL */