1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 * Wei Huang <wei@redhat.com>
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <linux/bsearch.h>
17 #include <linux/sort.h>
18 #include <asm/perf_event.h>
24 /* This is enough to filter the vast majority of currently defined events. */
25 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
28 * - Each perf counter is defined as "struct kvm_pmc";
29 * - There are two types of perf counters: general purpose (gp) and fixed.
30 * gp counters are stored in gp_counters[] and fixed counters are stored
31 * in fixed_counters[] respectively. Both of them are part of "struct
33 * - pmu.c understands the difference between gp counters and fixed counters.
34 * However AMD doesn't support fixed-counters;
35 * - There are three types of index to access perf counters (PMC):
36 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
37 * has MSR_K7_PERFCTRn.
38 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
39 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
40 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
41 * that it also supports fixed counters. idx can be used to as index to
42 * gp and fixed counters.
43 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
44 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
45 * all perf counters (both gp and fixed). The mapping relationship
46 * between pmc and perf counters is as the following:
47 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
48 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
49 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
52 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
54 #define KVM_X86_PMU_OP(func) \
55 DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \
56 *(((struct kvm_pmu_ops *)0)->func));
57 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
58 #include <asm/kvm-x86-pmu-ops.h>
60 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
62 memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
64 #define __KVM_X86_PMU_OP(func) \
65 static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
66 #define KVM_X86_PMU_OP(func) \
67 WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
68 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
69 #include <asm/kvm-x86-pmu-ops.h>
70 #undef __KVM_X86_PMU_OP
73 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
75 return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
78 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
80 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
81 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
83 kvm_pmu_deliver_pmi(vcpu);
86 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
88 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
90 /* Ignore counters that have been reprogrammed already. */
91 if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
94 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
95 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
101 * Inject PMI. If vcpu was in a guest mode during NMI PMI
102 * can be ejected on a guest mode re-entry. Otherwise we can't
103 * be sure that vcpu wasn't executing hlt instruction at the
104 * time of vmexit and is not going to re-enter guest mode until
105 * woken up. So we should wake it, but this is impossible from
106 * NMI context. Do it from irq work instead.
108 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
109 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
111 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
114 static void kvm_perf_overflow(struct perf_event *perf_event,
115 struct perf_sample_data *data,
116 struct pt_regs *regs)
118 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
120 __kvm_perf_overflow(pmc, true);
123 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
124 u64 config, bool exclude_user,
125 bool exclude_kernel, bool intr)
127 struct perf_event *event;
128 struct perf_event_attr attr = {
130 .size = sizeof(attr),
132 .exclude_idle = true,
134 .exclude_user = exclude_user,
135 .exclude_kernel = exclude_kernel,
139 if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
142 attr.sample_period = get_sample_period(pmc, pmc->counter);
144 if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
145 guest_cpuid_is_intel(pmc->vcpu)) {
147 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
148 * period. Just clear the sample period so at least
149 * allocating the counter doesn't fail.
151 attr.sample_period = 0;
154 event = perf_event_create_kernel_counter(&attr, -1, current,
155 kvm_perf_overflow, pmc);
157 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
158 PTR_ERR(event), pmc->idx);
162 pmc->perf_event = event;
163 pmc_to_pmu(pmc)->event_count++;
164 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
165 pmc->is_paused = false;
169 static void pmc_pause_counter(struct kvm_pmc *pmc)
171 u64 counter = pmc->counter;
173 if (!pmc->perf_event || pmc->is_paused)
176 /* update counter, reset event value to avoid redundant accumulation */
177 counter += perf_event_pause(pmc->perf_event, true);
178 pmc->counter = counter & pmc_bitmask(pmc);
179 pmc->is_paused = true;
182 static bool pmc_resume_counter(struct kvm_pmc *pmc)
184 if (!pmc->perf_event)
187 /* recalibrate sample period and check if it's accepted by perf core */
188 if (perf_event_period(pmc->perf_event,
189 get_sample_period(pmc, pmc->counter)))
192 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
193 perf_event_enable(pmc->perf_event);
194 pmc->is_paused = false;
196 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
200 static int cmp_u64(const void *pa, const void *pb)
205 return (a > b) - (a < b);
208 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
211 u32 type = PERF_TYPE_RAW;
212 struct kvm *kvm = pmc->vcpu->kvm;
213 struct kvm_pmu_event_filter *filter;
214 struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
215 bool allow_event = true;
217 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
218 printk_once("kvm pmu: pin control bit is ignored\n");
220 pmc->eventsel = eventsel;
222 pmc_pause_counter(pmc);
224 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
227 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
229 __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
231 if (bsearch(&key, filter->events, filter->nevents,
232 sizeof(__u64), cmp_u64))
233 allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
235 allow_event = filter->action == KVM_PMU_EVENT_DENY;
240 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
241 ARCH_PERFMON_EVENTSEL_INV |
242 ARCH_PERFMON_EVENTSEL_CMASK |
244 HSW_IN_TX_CHECKPOINTED))) {
245 config = static_call(kvm_x86_pmu_pmc_perf_hw_id)(pmc);
246 if (config != PERF_COUNT_HW_MAX)
247 type = PERF_TYPE_HARDWARE;
250 if (type == PERF_TYPE_RAW)
251 config = eventsel & pmu->raw_event_mask;
253 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
256 pmc_release_perf_event(pmc);
258 pmc->current_config = eventsel;
259 pmc_reprogram_counter(pmc, type, config,
260 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
261 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
262 eventsel & ARCH_PERFMON_EVENTSEL_INT);
264 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
266 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
268 unsigned en_field = ctrl & 0x3;
269 bool pmi = ctrl & 0x8;
270 struct kvm_pmu_event_filter *filter;
271 struct kvm *kvm = pmc->vcpu->kvm;
273 pmc_pause_counter(pmc);
275 if (!en_field || !pmc_is_enabled(pmc))
278 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
280 if (filter->action == KVM_PMU_EVENT_DENY &&
281 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
283 if (filter->action == KVM_PMU_EVENT_ALLOW &&
284 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
288 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
291 pmc_release_perf_event(pmc);
293 pmc->current_config = (u64)ctrl;
294 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
295 static_call(kvm_x86_pmu_pmc_perf_hw_id)(pmc),
296 !(en_field & 0x2), /* exclude user */
297 !(en_field & 0x1), /* exclude kernel */
300 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
302 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
304 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, pmc_idx);
310 reprogram_gp_counter(pmc, pmc->eventsel);
312 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
313 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
315 reprogram_fixed_counter(pmc, ctrl, idx);
318 EXPORT_SYMBOL_GPL(reprogram_counter);
320 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
322 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
325 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
326 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
328 if (unlikely(!pmc || !pmc->perf_event)) {
329 clear_bit(bit, pmu->reprogram_pmi);
333 reprogram_counter(pmu, bit);
337 * Unused perf_events are only released if the corresponding MSRs
338 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
339 * triggers KVM_REQ_PMU if cleanup is needed.
341 if (unlikely(pmu->need_cleanup))
342 kvm_pmu_cleanup(vcpu);
345 /* check if idx is a valid index to access PMU */
346 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
348 return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
351 bool is_vmware_backdoor_pmc(u32 pmc_idx)
354 case VMWARE_BACKDOOR_PMC_HOST_TSC:
355 case VMWARE_BACKDOOR_PMC_REAL_TIME:
356 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
362 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
367 case VMWARE_BACKDOOR_PMC_HOST_TSC:
370 case VMWARE_BACKDOOR_PMC_REAL_TIME:
371 ctr_val = ktime_get_boottime_ns();
373 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
374 ctr_val = ktime_get_boottime_ns() +
375 vcpu->kvm->arch.kvmclock_offset;
385 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
387 bool fast_mode = idx & (1u << 31);
388 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
390 u64 mask = fast_mode ? ~0u : ~0ull;
395 if (is_vmware_backdoor_pmc(idx))
396 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
398 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
402 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
403 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
404 (kvm_read_cr0(vcpu) & X86_CR0_PE))
407 *data = pmc_read_counter(pmc) & mask;
411 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
413 if (lapic_in_kernel(vcpu)) {
414 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
415 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
419 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
421 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
422 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
425 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
427 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
428 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
431 __set_bit(pmc->idx, pmu->pmc_in_use);
434 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
436 return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
439 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
441 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
442 return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
445 /* refresh PMU settings. This function generally is called when underlying
446 * settings are changed (such as changes of PMU CPUID by guest VMs), which
447 * should rarely happen.
449 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
451 static_call(kvm_x86_pmu_refresh)(vcpu);
454 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
456 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
458 irq_work_sync(&pmu->irq_work);
459 static_call(kvm_x86_pmu_reset)(vcpu);
462 void kvm_pmu_init(struct kvm_vcpu *vcpu)
464 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
466 memset(pmu, 0, sizeof(*pmu));
467 static_call(kvm_x86_pmu_init)(vcpu);
468 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
469 pmu->event_count = 0;
470 pmu->need_cleanup = false;
471 kvm_pmu_refresh(vcpu);
474 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
476 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
478 if (pmc_is_fixed(pmc))
479 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
480 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
482 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
485 /* Release perf_events for vPMCs that have been unused for a full time slice. */
486 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
488 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
489 struct kvm_pmc *pmc = NULL;
490 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
493 pmu->need_cleanup = false;
495 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
496 pmu->pmc_in_use, X86_PMC_IDX_MAX);
498 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
499 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
501 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
502 pmc_stop_counter(pmc);
505 static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
507 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
510 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
515 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
517 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
520 prev_count = pmc->counter;
521 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
523 reprogram_counter(pmu, pmc->idx);
524 if (pmc->counter < prev_count)
525 __kvm_perf_overflow(pmc, false);
528 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
529 unsigned int perf_hw_id)
531 u64 old_eventsel = pmc->eventsel;
534 pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
535 config = static_call(kvm_x86_pmu_pmc_perf_hw_id)(pmc);
536 pmc->eventsel = old_eventsel;
537 return config == perf_hw_id;
540 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
542 bool select_os, select_user;
543 u64 config = pmc->current_config;
545 if (pmc_is_gp(pmc)) {
546 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
547 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
549 select_os = config & 0x1;
550 select_user = config & 0x2;
553 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
556 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
558 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
562 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
563 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
565 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
568 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
569 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
570 kvm_pmu_incr_counter(pmc);
573 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
575 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
577 struct kvm_pmu_event_filter tmp, *filter;
581 if (copy_from_user(&tmp, argp, sizeof(tmp)))
584 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
585 tmp.action != KVM_PMU_EVENT_DENY)
591 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
594 size = struct_size(filter, events, tmp.nevents);
595 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
600 if (copy_from_user(filter, argp, size))
603 /* Ensure nevents can't be changed between the user copies. */
607 * Sort the in-kernel list so that we can search it with bsearch.
609 sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
611 mutex_lock(&kvm->lock);
612 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
613 mutex_is_locked(&kvm->lock));
614 mutex_unlock(&kvm->lock);
616 synchronize_srcu_expedited(&kvm->srcu);