1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/perf_event.h>
3 #include <linux/export.h>
4 #include <linux/types.h>
5 #include <linux/init.h>
6 #include <linux/slab.h>
7 #include <linux/delay.h>
8 #include <linux/jiffies.h>
9 #include <asm/apicdef.h>
12 #include "../perf_event.h"
14 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
15 static unsigned long perf_nmi_window;
17 /* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
18 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
19 #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
21 static __initconst const u64 amd_hw_cache_event_ids
22 [PERF_COUNT_HW_CACHE_MAX]
23 [PERF_COUNT_HW_CACHE_OP_MAX]
24 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
28 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
29 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
32 [ C(RESULT_ACCESS) ] = 0,
33 [ C(RESULT_MISS) ] = 0,
35 [ C(OP_PREFETCH) ] = {
36 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
37 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
42 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
43 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
46 [ C(RESULT_ACCESS) ] = -1,
47 [ C(RESULT_MISS) ] = -1,
49 [ C(OP_PREFETCH) ] = {
50 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
51 [ C(RESULT_MISS) ] = 0,
56 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
57 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
60 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
61 [ C(RESULT_MISS) ] = 0,
63 [ C(OP_PREFETCH) ] = {
64 [ C(RESULT_ACCESS) ] = 0,
65 [ C(RESULT_MISS) ] = 0,
70 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
71 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
74 [ C(RESULT_ACCESS) ] = 0,
75 [ C(RESULT_MISS) ] = 0,
77 [ C(OP_PREFETCH) ] = {
78 [ C(RESULT_ACCESS) ] = 0,
79 [ C(RESULT_MISS) ] = 0,
84 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
85 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
88 [ C(RESULT_ACCESS) ] = -1,
89 [ C(RESULT_MISS) ] = -1,
91 [ C(OP_PREFETCH) ] = {
92 [ C(RESULT_ACCESS) ] = -1,
93 [ C(RESULT_MISS) ] = -1,
98 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
99 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
102 [ C(RESULT_ACCESS) ] = -1,
103 [ C(RESULT_MISS) ] = -1,
105 [ C(OP_PREFETCH) ] = {
106 [ C(RESULT_ACCESS) ] = -1,
107 [ C(RESULT_MISS) ] = -1,
112 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
113 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
116 [ C(RESULT_ACCESS) ] = -1,
117 [ C(RESULT_MISS) ] = -1,
119 [ C(OP_PREFETCH) ] = {
120 [ C(RESULT_ACCESS) ] = -1,
121 [ C(RESULT_MISS) ] = -1,
126 static __initconst const u64 amd_hw_cache_event_ids_f17h
127 [PERF_COUNT_HW_CACHE_MAX]
128 [PERF_COUNT_HW_CACHE_OP_MAX]
129 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
132 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
133 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
136 [C(RESULT_ACCESS)] = 0,
137 [C(RESULT_MISS)] = 0,
140 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
141 [C(RESULT_MISS)] = 0,
146 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
147 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
150 [C(RESULT_ACCESS)] = -1,
151 [C(RESULT_MISS)] = -1,
154 [C(RESULT_ACCESS)] = 0,
155 [C(RESULT_MISS)] = 0,
160 [C(RESULT_ACCESS)] = 0,
161 [C(RESULT_MISS)] = 0,
164 [C(RESULT_ACCESS)] = 0,
165 [C(RESULT_MISS)] = 0,
168 [C(RESULT_ACCESS)] = 0,
169 [C(RESULT_MISS)] = 0,
174 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
175 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
178 [C(RESULT_ACCESS)] = 0,
179 [C(RESULT_MISS)] = 0,
182 [C(RESULT_ACCESS)] = 0,
183 [C(RESULT_MISS)] = 0,
188 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
189 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
192 [C(RESULT_ACCESS)] = -1,
193 [C(RESULT_MISS)] = -1,
196 [C(RESULT_ACCESS)] = -1,
197 [C(RESULT_MISS)] = -1,
202 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
203 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
206 [C(RESULT_ACCESS)] = -1,
207 [C(RESULT_MISS)] = -1,
210 [C(RESULT_ACCESS)] = -1,
211 [C(RESULT_MISS)] = -1,
216 [C(RESULT_ACCESS)] = 0,
217 [C(RESULT_MISS)] = 0,
220 [C(RESULT_ACCESS)] = -1,
221 [C(RESULT_MISS)] = -1,
224 [C(RESULT_ACCESS)] = -1,
225 [C(RESULT_MISS)] = -1,
231 * AMD Performance Monitor K7 and later, up to and including Family 16h:
233 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
235 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
236 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
237 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
238 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
239 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
240 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
241 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
242 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
246 * AMD Performance Monitor Family 17h and later:
248 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
250 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
251 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
252 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
253 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
254 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
255 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
256 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
257 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
260 static u64 amd_pmu_event_map(int hw_event)
262 if (boot_cpu_data.x86 >= 0x17)
263 return amd_f17h_perfmon_event_map[hw_event];
265 return amd_perfmon_event_map[hw_event];
269 * Previously calculated offsets
271 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
272 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
276 * 4 counters starting at 0xc0010000 each offset by 1
278 * CPUs with core performance counter extensions:
279 * 6 counters starting at 0xc0010200 each offset by 2
281 static inline int amd_pmu_addr_offset(int index, bool eventsel)
289 offset = event_offsets[index];
291 offset = count_offsets[index];
296 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
302 event_offsets[index] = offset;
304 count_offsets[index] = offset;
310 * AMD64 events are detected based on their event codes.
312 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
314 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
317 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
319 if (!(x86_pmu.flags & PMU_FL_PAIR))
322 switch (amd_get_event_code(hwc)) {
323 case 0x003: return true; /* Retired SSE/AVX FLOPs */
324 default: return false;
328 static int amd_core_hw_config(struct perf_event *event)
330 if (event->attr.exclude_host && event->attr.exclude_guest)
332 * When HO == GO == 1 the hardware treats that as GO == HO == 0
333 * and will count in both modes. We don't want to count in that
334 * case so we emulate no-counting by setting US = OS = 0.
336 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
337 ARCH_PERFMON_EVENTSEL_OS);
338 else if (event->attr.exclude_host)
339 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
340 else if (event->attr.exclude_guest)
341 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
343 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
344 event->hw.flags |= PERF_X86_EVENT_PAIR;
349 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
351 return (hwc->config & 0xe0) == 0xe0;
354 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
356 struct amd_nb *nb = cpuc->amd_nb;
358 return nb && nb->nb_id != -1;
361 static int amd_pmu_hw_config(struct perf_event *event)
365 /* pass precise event sampling to ibs: */
366 if (event->attr.precise_ip && get_ibs_caps())
369 if (has_branch_stack(event))
372 ret = x86_pmu_hw_config(event);
376 if (event->attr.type == PERF_TYPE_RAW)
377 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
379 return amd_core_hw_config(event);
382 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
383 struct perf_event *event)
385 struct amd_nb *nb = cpuc->amd_nb;
389 * need to scan whole list because event may not have
390 * been assigned during scheduling
392 * no race condition possible because event can only
393 * be removed on one CPU at a time AND PMU is disabled
396 for (i = 0; i < x86_pmu.num_counters; i++) {
397 if (cmpxchg(nb->owners + i, event, NULL) == event)
403 * AMD64 NorthBridge events need special treatment because
404 * counter access needs to be synchronized across all cores
405 * of a package. Refer to BKDG section 3.12
407 * NB events are events measuring L3 cache, Hypertransport
408 * traffic. They are identified by an event code >= 0xe00.
409 * They measure events on the NorthBride which is shared
410 * by all cores on a package. NB events are counted on a
411 * shared set of counters. When a NB event is programmed
412 * in a counter, the data actually comes from a shared
413 * counter. Thus, access to those counters needs to be
416 * We implement the synchronization such that no two cores
417 * can be measuring NB events using the same counters. Thus,
418 * we maintain a per-NB allocation table. The available slot
419 * is propagated using the event_constraint structure.
421 * We provide only one choice for each NB event based on
422 * the fact that only NB events have restrictions. Consequently,
423 * if a counter is available, there is a guarantee the NB event
424 * will be assigned to it. If no slot is available, an empty
425 * constraint is returned and scheduling will eventually fail
428 * Note that all cores attached the same NB compete for the same
429 * counters to host NB events, this is why we use atomic ops. Some
430 * multi-chip CPUs may have more than one NB.
432 * Given that resources are allocated (cmpxchg), they must be
433 * eventually freed for others to use. This is accomplished by
434 * calling __amd_put_nb_event_constraints()
436 * Non NB events are not impacted by this restriction.
438 static struct event_constraint *
439 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
440 struct event_constraint *c)
442 struct hw_perf_event *hwc = &event->hw;
443 struct amd_nb *nb = cpuc->amd_nb;
444 struct perf_event *old;
454 * detect if already present, if so reuse
456 * cannot merge with actual allocation
457 * because of possible holes
459 * event can already be present yet not assigned (in hwc->idx)
460 * because of successive calls to x86_schedule_events() from
461 * hw_perf_group_sched_in() without hw_perf_enable()
463 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
464 if (new == -1 || hwc->idx == idx)
465 /* assign free slot, prefer hwc->idx */
466 old = cmpxchg(nb->owners + idx, NULL, event);
467 else if (nb->owners[idx] == event)
468 /* event already present */
473 if (old && old != event)
476 /* reassign to this slot */
478 cmpxchg(nb->owners + new, event, NULL);
481 /* already present, reuse */
487 return &emptyconstraint;
489 return &nb->event_constraints[new];
492 static struct amd_nb *amd_alloc_nb(int cpu)
497 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
504 * initialize all possible NB constraints
506 for (i = 0; i < x86_pmu.num_counters; i++) {
507 __set_bit(i, nb->event_constraints[i].idxmsk);
508 nb->event_constraints[i].weight = 1;
513 static int amd_pmu_cpu_prepare(int cpu)
515 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
517 WARN_ON_ONCE(cpuc->amd_nb);
519 if (!x86_pmu.amd_nb_constraints)
522 cpuc->amd_nb = amd_alloc_nb(cpu);
529 static void amd_pmu_cpu_starting(int cpu)
531 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
532 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
536 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
538 if (!x86_pmu.amd_nb_constraints)
541 nb_id = amd_get_nb_id(cpu);
542 WARN_ON_ONCE(nb_id == BAD_APICID);
544 for_each_online_cpu(i) {
545 nb = per_cpu(cpu_hw_events, i).amd_nb;
546 if (WARN_ON_ONCE(!nb))
549 if (nb->nb_id == nb_id) {
550 *onln = cpuc->amd_nb;
556 cpuc->amd_nb->nb_id = nb_id;
557 cpuc->amd_nb->refcnt++;
560 static void amd_pmu_cpu_dead(int cpu)
562 struct cpu_hw_events *cpuhw;
564 if (!x86_pmu.amd_nb_constraints)
567 cpuhw = &per_cpu(cpu_hw_events, cpu);
570 struct amd_nb *nb = cpuhw->amd_nb;
572 if (nb->nb_id == -1 || --nb->refcnt == 0)
575 cpuhw->amd_nb = NULL;
580 * When a PMC counter overflows, an NMI is used to process the event and
581 * reset the counter. NMI latency can result in the counter being updated
582 * before the NMI can run, which can result in what appear to be spurious
583 * NMIs. This function is intended to wait for the NMI to run and reset
584 * the counter to avoid possible unhandled NMI messages.
586 #define OVERFLOW_WAIT_COUNT 50
588 static void amd_pmu_wait_on_overflow(int idx)
594 * Wait for the counter to be reset if it has overflowed. This loop
595 * should exit very, very quickly, but just in case, don't wait
598 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
599 rdmsrl(x86_pmu_event_addr(idx), counter);
600 if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
603 /* Might be in IRQ context, so can't sleep */
608 static void amd_pmu_disable_all(void)
610 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
613 x86_pmu_disable_all();
616 * This shouldn't be called from NMI context, but add a safeguard here
617 * to return, since if we're in NMI context we can't wait for an NMI
618 * to reset an overflowed counter value.
624 * Check each counter for overflow and wait for it to be reset by the
625 * NMI if it has overflowed. This relies on the fact that all active
626 * counters are always enabled when this function is caled and
627 * ARCH_PERFMON_EVENTSEL_INT is always set.
629 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
630 if (!test_bit(idx, cpuc->active_mask))
633 amd_pmu_wait_on_overflow(idx);
637 static void amd_pmu_disable_event(struct perf_event *event)
639 x86_pmu_disable_event(event);
642 * This can be called from NMI context (via x86_pmu_stop). The counter
643 * may have overflowed, but either way, we'll never see it get reset
644 * by the NMI if we're already in the NMI. And the NMI latency support
645 * below will take care of any pending NMI that might have been
646 * generated by the overflow.
651 amd_pmu_wait_on_overflow(event->hw.idx);
655 * Because of NMI latency, if multiple PMC counters are active or other sources
656 * of NMIs are received, the perf NMI handler can handle one or more overflowed
657 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
658 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
659 * back-to-back NMI support won't be active. This PMC handler needs to take into
660 * account that this can occur, otherwise this could result in unknown NMI
661 * messages being issued. Examples of this is PMC overflow while in the NMI
662 * handler when multiple PMCs are active or PMC overflow while handling some
663 * other source of an NMI.
665 * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
666 * received during this window will be claimed. This prevents extending the
667 * window past when it is possible that latent NMIs should be received. The
668 * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
669 * handled a counter. When an un-handled NMI is received, it will be claimed
670 * only if arriving within that window.
672 static int amd_pmu_handle_irq(struct pt_regs *regs)
676 /* Process any counter overflows */
677 handled = x86_pmu_handle_irq(regs);
680 * If a counter was handled, record a timestamp such that un-handled
681 * NMIs will be claimed if arriving within that window.
684 this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
689 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
695 static struct event_constraint *
696 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
697 struct perf_event *event)
700 * if not NB event or no NB, then no constraints
702 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
703 return &unconstrained;
705 return __amd_get_nb_event_constraints(cpuc, event, NULL);
708 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
709 struct perf_event *event)
711 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
712 __amd_put_nb_event_constraints(cpuc, event);
715 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
716 PMU_FORMAT_ATTR(umask, "config:8-15" );
717 PMU_FORMAT_ATTR(edge, "config:18" );
718 PMU_FORMAT_ATTR(inv, "config:23" );
719 PMU_FORMAT_ATTR(cmask, "config:24-31" );
721 static struct attribute *amd_format_attr[] = {
722 &format_attr_event.attr,
723 &format_attr_umask.attr,
724 &format_attr_edge.attr,
725 &format_attr_inv.attr,
726 &format_attr_cmask.attr,
732 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
734 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
735 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
736 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
737 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
738 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
739 #define AMD_EVENT_EX_LS 0x000000C0ULL
740 #define AMD_EVENT_DE 0x000000D0ULL
741 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
744 * AMD family 15h event code/PMC mappings:
746 * type = event_code & 0x0F0:
748 * 0x000 FP PERF_CTL[5:3]
749 * 0x010 FP PERF_CTL[5:3]
750 * 0x020 LS PERF_CTL[5:0]
751 * 0x030 LS PERF_CTL[5:0]
752 * 0x040 DC PERF_CTL[5:0]
753 * 0x050 DC PERF_CTL[5:0]
754 * 0x060 CU PERF_CTL[2:0]
755 * 0x070 CU PERF_CTL[2:0]
756 * 0x080 IC/DE PERF_CTL[2:0]
757 * 0x090 IC/DE PERF_CTL[2:0]
760 * 0x0C0 EX/LS PERF_CTL[5:0]
761 * 0x0D0 DE PERF_CTL[2:0]
762 * 0x0E0 NB NB_PERF_CTL[3:0]
763 * 0x0F0 NB NB_PERF_CTL[3:0]
767 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
768 * 0x003 FP PERF_CTL[3]
769 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
770 * 0x00B FP PERF_CTL[3]
771 * 0x00D FP PERF_CTL[3]
772 * 0x023 DE PERF_CTL[2:0]
773 * 0x02D LS PERF_CTL[3]
774 * 0x02E LS PERF_CTL[3,0]
775 * 0x031 LS PERF_CTL[2:0] (**)
776 * 0x043 CU PERF_CTL[2:0]
777 * 0x045 CU PERF_CTL[2:0]
778 * 0x046 CU PERF_CTL[2:0]
779 * 0x054 CU PERF_CTL[2:0]
780 * 0x055 CU PERF_CTL[2:0]
781 * 0x08F IC PERF_CTL[0]
782 * 0x187 DE PERF_CTL[0]
783 * 0x188 DE PERF_CTL[0]
784 * 0x0DB EX PERF_CTL[5:0]
785 * 0x0DC LS PERF_CTL[5:0]
786 * 0x0DD LS PERF_CTL[5:0]
787 * 0x0DE LS PERF_CTL[5:0]
788 * 0x0DF LS PERF_CTL[5:0]
789 * 0x1C0 EX PERF_CTL[5:3]
790 * 0x1D6 EX PERF_CTL[5:0]
791 * 0x1D8 EX PERF_CTL[5:0]
793 * (*) depending on the umask all FPU counters may be used
794 * (**) only one unitmask enabled at a time
797 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
798 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
799 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
800 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
801 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
802 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
804 static struct event_constraint *
805 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
806 struct perf_event *event)
808 struct hw_perf_event *hwc = &event->hw;
809 unsigned int event_code = amd_get_event_code(hwc);
811 switch (event_code & AMD_EVENT_TYPE_MASK) {
813 switch (event_code) {
815 if (!(hwc->config & 0x0000F000ULL))
817 if (!(hwc->config & 0x00000F00ULL))
819 return &amd_f15_PMC3;
821 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
823 return &amd_f15_PMC3;
827 return &amd_f15_PMC3;
829 return &amd_f15_PMC53;
832 case AMD_EVENT_EX_LS:
833 switch (event_code) {
840 return &amd_f15_PMC20;
842 return &amd_f15_PMC3;
844 return &amd_f15_PMC30;
846 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
847 return &amd_f15_PMC20;
848 return &emptyconstraint;
850 return &amd_f15_PMC53;
852 return &amd_f15_PMC50;
855 case AMD_EVENT_IC_DE:
857 switch (event_code) {
861 return &amd_f15_PMC0;
862 case 0x0DB ... 0x0DF:
865 return &amd_f15_PMC50;
867 return &amd_f15_PMC20;
870 /* moved to uncore.c */
871 return &emptyconstraint;
873 return &emptyconstraint;
877 static struct event_constraint pair_constraint;
879 static struct event_constraint *
880 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
881 struct perf_event *event)
883 struct hw_perf_event *hwc = &event->hw;
885 if (amd_is_pair_event_code(hwc))
886 return &pair_constraint;
888 return &unconstrained;
891 static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
892 struct perf_event *event)
894 struct hw_perf_event *hwc = &event->hw;
896 if (is_counter_pair(hwc))
900 static ssize_t amd_event_sysfs_show(char *page, u64 config)
902 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
903 (config & AMD64_EVENTSEL_EVENT) >> 24;
905 return x86_event_sysfs_show(page, config, event);
908 static __initconst const struct x86_pmu amd_pmu = {
910 .handle_irq = amd_pmu_handle_irq,
911 .disable_all = amd_pmu_disable_all,
912 .enable_all = x86_pmu_enable_all,
913 .enable = x86_pmu_enable_event,
914 .disable = amd_pmu_disable_event,
915 .hw_config = amd_pmu_hw_config,
916 .schedule_events = x86_schedule_events,
917 .eventsel = MSR_K7_EVNTSEL0,
918 .perfctr = MSR_K7_PERFCTR0,
919 .addr_offset = amd_pmu_addr_offset,
920 .event_map = amd_pmu_event_map,
921 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
922 .num_counters = AMD64_NUM_COUNTERS,
924 .cntval_mask = (1ULL << 48) - 1,
926 /* use highest bit to detect overflow */
927 .max_period = (1ULL << 47) - 1,
928 .get_event_constraints = amd_get_event_constraints,
929 .put_event_constraints = amd_put_event_constraints,
931 .format_attrs = amd_format_attr,
932 .events_sysfs_show = amd_event_sysfs_show,
934 .cpu_prepare = amd_pmu_cpu_prepare,
935 .cpu_starting = amd_pmu_cpu_starting,
936 .cpu_dead = amd_pmu_cpu_dead,
938 .amd_nb_constraints = 1,
941 static int __init amd_core_pmu_init(void)
943 u64 even_ctr_mask = 0ULL;
946 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
949 /* Avoid calculating the value each time in the NMI handler */
950 perf_nmi_window = msecs_to_jiffies(100);
953 * If core performance counter extensions exists, we must use
954 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
955 * amd_pmu_addr_offset().
957 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
958 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
959 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
961 * AMD Core perfctr has separate MSRs for the NB events, see
962 * the amd/uncore.c driver.
964 x86_pmu.amd_nb_constraints = 0;
966 if (boot_cpu_data.x86 == 0x15) {
968 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
970 if (boot_cpu_data.x86 >= 0x17) {
973 * Family 17h and compatibles have constraints for Large
974 * Increment per Cycle events: they may only be assigned an
975 * even numbered counter that has a consecutive adjacent odd
976 * numbered counter following it.
978 for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
979 even_ctr_mask |= 1 << i;
981 pair_constraint = (struct event_constraint)
982 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
983 x86_pmu.num_counters / 2, 0,
984 PERF_X86_EVENT_PAIR);
986 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
987 x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
988 x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
989 x86_pmu.flags |= PMU_FL_PAIR;
992 pr_cont("core perfctr, ");
996 __init int amd_pmu_init(void)
1000 /* Performance-monitoring supported from K7 and later: */
1001 if (boot_cpu_data.x86 < 6)
1006 ret = amd_core_pmu_init();
1010 if (num_possible_cpus() == 1) {
1012 * No point in allocating data structures to serialize
1013 * against other CPUs, when there is only the one CPU.
1015 x86_pmu.amd_nb_constraints = 0;
1018 if (boot_cpu_data.x86 >= 0x17)
1019 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1021 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1026 void amd_pmu_enable_virt(void)
1028 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1030 cpuc->perf_ctr_virt_mask = 0;
1032 /* Reload all events */
1033 amd_pmu_disable_all();
1034 x86_pmu_enable_all(0);
1036 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1038 void amd_pmu_disable_virt(void)
1040 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1043 * We only mask out the Host-only bit so that host-only counting works
1044 * when SVM is disabled. If someone sets up a guest-only counter when
1045 * SVM is disabled the Guest-only bits still gets set and the counter
1046 * will not count anything.
1048 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1050 /* Reload all events */
1051 amd_pmu_disable_all();
1052 x86_pmu_enable_all(0);
1054 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);