1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
5 * Performance event hw details:
8 #define INTEL_PMC_MAX_GENERIC 32
9 #define INTEL_PMC_MAX_FIXED 3
10 #define INTEL_PMC_IDX_FIXED 32
12 #define X86_PMC_IDX_MAX 64
14 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
32 #define HSW_IN_TX (1ULL << 32)
33 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
36 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
37 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
39 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
40 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
41 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
43 #define AMD64_EVENTSEL_EVENT \
44 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
45 #define INTEL_ARCH_EVENT_MASK \
46 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
48 #define X86_RAW_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_EVENT | \
50 ARCH_PERFMON_EVENTSEL_UMASK | \
51 ARCH_PERFMON_EVENTSEL_EDGE | \
52 ARCH_PERFMON_EVENTSEL_INV | \
53 ARCH_PERFMON_EVENTSEL_CMASK)
54 #define AMD64_RAW_EVENT_MASK \
55 (X86_RAW_EVENT_MASK | \
57 #define AMD64_RAW_EVENT_MASK_NB \
58 (AMD64_EVENTSEL_EVENT | \
59 ARCH_PERFMON_EVENTSEL_UMASK)
60 #define AMD64_NUM_COUNTERS 4
61 #define AMD64_NUM_COUNTERS_CORE 6
62 #define AMD64_NUM_COUNTERS_NB 4
64 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
65 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
66 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
67 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
68 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
70 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
71 #define ARCH_PERFMON_EVENTS_COUNT 7
74 * Intel "Architectural Performance Monitoring" CPUID
75 * detection/enumeration details:
79 unsigned int version_id:8;
80 unsigned int num_counters:8;
81 unsigned int bit_width:8;
82 unsigned int mask_length:8;
89 unsigned int no_unhalted_core_cycles:1;
90 unsigned int no_instructions_retired:1;
91 unsigned int no_unhalted_reference_cycles:1;
92 unsigned int no_llc_reference:1;
93 unsigned int no_llc_misses:1;
94 unsigned int no_branch_instruction_retired:1;
95 unsigned int no_branch_misses_retired:1;
102 unsigned int num_counters_fixed:5;
103 unsigned int bit_width_fixed:8;
104 unsigned int reserved:19;
109 struct x86_pmu_capability {
112 int num_counters_fixed;
115 unsigned int events_mask;
120 * Fixed-purpose performance events:
124 * All 3 fixed-mode PMCs are configured via this single MSR:
126 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
129 * The counts are available in three separate MSRs:
132 /* Instr_Retired.Any: */
133 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
134 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
136 /* CPU_CLK_Unhalted.Core: */
137 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
138 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
140 /* CPU_CLK_Unhalted.Ref: */
141 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
142 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
143 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
146 * We model BTS tracing as another fixed-mode PMC.
148 * We choose a value in the middle of the fixed event range, since lower
149 * values are used by actual fixed events and higher values are used
150 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
152 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
155 * IBS cpuid feature detection
158 #define IBS_CPUID_FEATURES 0x8000001b
161 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
162 * bit 0 is used to indicate the existence of IBS.
164 #define IBS_CAPS_AVAIL (1U<<0)
165 #define IBS_CAPS_FETCHSAM (1U<<1)
166 #define IBS_CAPS_OPSAM (1U<<2)
167 #define IBS_CAPS_RDWROPCNT (1U<<3)
168 #define IBS_CAPS_OPCNT (1U<<4)
169 #define IBS_CAPS_BRNTRGT (1U<<5)
170 #define IBS_CAPS_OPCNTEXT (1U<<6)
171 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
173 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
174 | IBS_CAPS_FETCHSAM \
181 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
182 #define IBSCTL_LVT_OFFSET_MASK 0x0F
184 /* ibs fetch bits/masks */
185 #define IBS_FETCH_RAND_EN (1ULL<<57)
186 #define IBS_FETCH_VAL (1ULL<<49)
187 #define IBS_FETCH_ENABLE (1ULL<<48)
188 #define IBS_FETCH_CNT 0xFFFF0000ULL
189 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
191 /* ibs op bits/masks */
192 /* lower 4 bits of the current count are ignored: */
193 #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
194 #define IBS_OP_CNT_CTL (1ULL<<19)
195 #define IBS_OP_VAL (1ULL<<18)
196 #define IBS_OP_ENABLE (1ULL<<17)
197 #define IBS_OP_MAX_CNT 0x0000FFFFULL
198 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
199 #define IBS_RIP_INVALID (1ULL<<38)
201 #ifdef CONFIG_X86_LOCAL_APIC
202 extern u32 get_ibs_caps(void);
204 static inline u32 get_ibs_caps(void) { return 0; }
207 #ifdef CONFIG_PERF_EVENTS
208 extern void perf_events_lapic_init(void);
211 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
212 * unused and ABI specified to be 0, so nobody should care what we do with
215 * EXACT - the IP points to the exact instruction that triggered the
216 * event (HW bugs exempt).
217 * VM - original X86_VM_MASK; see set_linear_ip().
219 #define PERF_EFLAGS_EXACT (1UL << 3)
220 #define PERF_EFLAGS_VM (1UL << 5)
223 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
224 extern unsigned long perf_misc_flags(struct pt_regs *regs);
225 #define perf_misc_flags(regs) perf_misc_flags(regs)
227 #include <asm/stacktrace.h>
230 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
231 * and the comment with PERF_EFLAGS_EXACT.
233 #define perf_arch_fetch_caller_regs(regs, __ip) { \
234 (regs)->ip = (__ip); \
235 (regs)->bp = caller_frame_pointer(); \
236 (regs)->cs = __KERNEL_CS; \
239 _ASM_MOV "%%"_ASM_SP ", %0\n" \
240 : "=m" ((regs)->sp) \
245 struct perf_guest_switch_msr {
250 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
251 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
252 extern void perf_check_microcode(void);
254 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
260 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
262 memset(cap, 0, sizeof(*cap));
265 static inline void perf_events_lapic_init(void) { }
266 static inline void perf_check_microcode(void) { }
269 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
270 extern void amd_pmu_enable_virt(void);
271 extern void amd_pmu_disable_virt(void);
273 static inline void amd_pmu_enable_virt(void) { }
274 static inline void amd_pmu_disable_virt(void) { }
277 #define arch_perf_out_copy_user copy_from_user_nmi
279 #endif /* _ASM_X86_PERF_EVENT_H */