1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 * Wei Huang <wei@redhat.com>
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <asm/perf_event.h>
22 /* This is enough to filter the vast majority of currently defined events. */
23 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
26 * - Each perf counter is defined as "struct kvm_pmc";
27 * - There are two types of perf counters: general purpose (gp) and fixed.
28 * gp counters are stored in gp_counters[] and fixed counters are stored
29 * in fixed_counters[] respectively. Both of them are part of "struct
31 * - pmu.c understands the difference between gp counters and fixed counters.
32 * However AMD doesn't support fixed-counters;
33 * - There are three types of index to access perf counters (PMC):
34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
35 * has MSR_K7_PERFCTRn.
36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
39 * that it also supports fixed counters. idx can be used to as index to
40 * gp and fixed counters.
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
43 * all perf counters (both gp and fixed). The mapping relationship
44 * between pmc and perf counters is as the following:
45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
50 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
52 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
55 kvm_pmu_deliver_pmi(vcpu);
58 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
60 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
62 /* Ignore counters that have been reprogrammed already. */
63 if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
66 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
73 * Inject PMI. If vcpu was in a guest mode during NMI PMI
74 * can be ejected on a guest mode re-entry. Otherwise we can't
75 * be sure that vcpu wasn't executing hlt instruction at the
76 * time of vmexit and is not going to re-enter guest mode until
77 * woken up. So we should wake it, but this is impossible from
78 * NMI context. Do it from irq work instead.
80 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
81 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
83 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
86 static void kvm_perf_overflow(struct perf_event *perf_event,
87 struct perf_sample_data *data,
90 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
92 __kvm_perf_overflow(pmc, true);
95 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
96 unsigned config, bool exclude_user,
97 bool exclude_kernel, bool intr,
98 bool in_tx, bool in_tx_cp)
100 struct perf_event *event;
101 struct perf_event_attr attr = {
103 .size = sizeof(attr),
105 .exclude_idle = true,
107 .exclude_user = exclude_user,
108 .exclude_kernel = exclude_kernel,
112 attr.sample_period = get_sample_period(pmc, pmc->counter);
115 attr.config |= HSW_IN_TX;
118 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
119 * period. Just clear the sample period so at least
120 * allocating the counter doesn't fail.
122 attr.sample_period = 0;
123 attr.config |= HSW_IN_TX_CHECKPOINTED;
126 event = perf_event_create_kernel_counter(&attr, -1, current,
127 kvm_perf_overflow, pmc);
129 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
130 PTR_ERR(event), pmc->idx);
134 pmc->perf_event = event;
135 pmc_to_pmu(pmc)->event_count++;
136 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
137 pmc->is_paused = false;
141 static void pmc_pause_counter(struct kvm_pmc *pmc)
143 u64 counter = pmc->counter;
145 if (!pmc->perf_event || pmc->is_paused)
148 /* update counter, reset event value to avoid redundant accumulation */
149 counter += perf_event_pause(pmc->perf_event, true);
150 pmc->counter = counter & pmc_bitmask(pmc);
151 pmc->is_paused = true;
154 static bool pmc_resume_counter(struct kvm_pmc *pmc)
156 if (!pmc->perf_event)
159 /* recalibrate sample period and check if it's accepted by perf core */
160 if (perf_event_period(pmc->perf_event,
161 get_sample_period(pmc, pmc->counter)))
164 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
165 perf_event_enable(pmc->perf_event);
166 pmc->is_paused = false;
168 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
172 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
174 unsigned config, type = PERF_TYPE_RAW;
175 struct kvm *kvm = pmc->vcpu->kvm;
176 struct kvm_pmu_event_filter *filter;
178 bool allow_event = true;
180 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
181 printk_once("kvm pmu: pin control bit is ignored\n");
183 pmc->eventsel = eventsel;
185 pmc_pause_counter(pmc);
187 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
190 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
192 for (i = 0; i < filter->nevents; i++)
193 if (filter->events[i] ==
194 (eventsel & AMD64_RAW_EVENT_MASK_NB))
196 if (filter->action == KVM_PMU_EVENT_ALLOW &&
197 i == filter->nevents)
199 if (filter->action == KVM_PMU_EVENT_DENY &&
206 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
207 ARCH_PERFMON_EVENTSEL_INV |
208 ARCH_PERFMON_EVENTSEL_CMASK |
210 HSW_IN_TX_CHECKPOINTED))) {
211 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
212 if (config != PERF_COUNT_HW_MAX)
213 type = PERF_TYPE_HARDWARE;
216 if (type == PERF_TYPE_RAW)
217 config = eventsel & X86_RAW_EVENT_MASK;
219 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
222 pmc_release_perf_event(pmc);
224 pmc->current_config = eventsel;
225 pmc_reprogram_counter(pmc, type, config,
226 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
227 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
228 eventsel & ARCH_PERFMON_EVENTSEL_INT,
229 (eventsel & HSW_IN_TX),
230 (eventsel & HSW_IN_TX_CHECKPOINTED));
232 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
234 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
236 unsigned en_field = ctrl & 0x3;
237 bool pmi = ctrl & 0x8;
238 struct kvm_pmu_event_filter *filter;
239 struct kvm *kvm = pmc->vcpu->kvm;
241 pmc_pause_counter(pmc);
243 if (!en_field || !pmc_is_enabled(pmc))
246 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
248 if (filter->action == KVM_PMU_EVENT_DENY &&
249 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
251 if (filter->action == KVM_PMU_EVENT_ALLOW &&
252 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
256 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
259 pmc_release_perf_event(pmc);
261 pmc->current_config = (u64)ctrl;
262 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
263 kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
264 !(en_field & 0x2), /* exclude user */
265 !(en_field & 0x1), /* exclude kernel */
268 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
270 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
272 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
278 reprogram_gp_counter(pmc, pmc->eventsel);
280 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
281 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
283 reprogram_fixed_counter(pmc, ctrl, idx);
286 EXPORT_SYMBOL_GPL(reprogram_counter);
288 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
290 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
293 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
294 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
296 if (unlikely(!pmc || !pmc->perf_event)) {
297 clear_bit(bit, pmu->reprogram_pmi);
301 reprogram_counter(pmu, bit);
305 * Unused perf_events are only released if the corresponding MSRs
306 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
307 * triggers KVM_REQ_PMU if cleanup is needed.
309 if (unlikely(pmu->need_cleanup))
310 kvm_pmu_cleanup(vcpu);
313 /* check if idx is a valid index to access PMU */
314 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
316 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
319 bool is_vmware_backdoor_pmc(u32 pmc_idx)
322 case VMWARE_BACKDOOR_PMC_HOST_TSC:
323 case VMWARE_BACKDOOR_PMC_REAL_TIME:
324 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
330 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
335 case VMWARE_BACKDOOR_PMC_HOST_TSC:
338 case VMWARE_BACKDOOR_PMC_REAL_TIME:
339 ctr_val = ktime_get_boottime_ns();
341 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
342 ctr_val = ktime_get_boottime_ns() +
343 vcpu->kvm->arch.kvmclock_offset;
353 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
355 bool fast_mode = idx & (1u << 31);
356 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
358 u64 mask = fast_mode ? ~0u : ~0ull;
363 if (is_vmware_backdoor_pmc(idx))
364 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
366 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
370 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
371 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
372 (kvm_read_cr0(vcpu) & X86_CR0_PE))
375 *data = pmc_read_counter(pmc) & mask;
379 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
381 if (lapic_in_kernel(vcpu)) {
382 if (kvm_x86_ops.pmu_ops->deliver_pmi)
383 kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
384 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
388 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
390 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
391 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
394 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
396 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
397 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
400 __set_bit(pmc->idx, pmu->pmc_in_use);
403 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
405 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
408 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
410 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
411 return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
414 /* refresh PMU settings. This function generally is called when underlying
415 * settings are changed (such as changes of PMU CPUID by guest VMs), which
416 * should rarely happen.
418 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
420 kvm_x86_ops.pmu_ops->refresh(vcpu);
423 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
425 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
427 irq_work_sync(&pmu->irq_work);
428 kvm_x86_ops.pmu_ops->reset(vcpu);
431 void kvm_pmu_init(struct kvm_vcpu *vcpu)
433 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
435 memset(pmu, 0, sizeof(*pmu));
436 kvm_x86_ops.pmu_ops->init(vcpu);
437 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
438 pmu->event_count = 0;
439 pmu->need_cleanup = false;
440 kvm_pmu_refresh(vcpu);
443 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
445 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
447 if (pmc_is_fixed(pmc))
448 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
449 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
451 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
454 /* Release perf_events for vPMCs that have been unused for a full time slice. */
455 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
457 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
458 struct kvm_pmc *pmc = NULL;
459 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
462 pmu->need_cleanup = false;
464 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
465 pmu->pmc_in_use, X86_PMC_IDX_MAX);
467 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
468 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
470 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
471 pmc_stop_counter(pmc);
474 if (kvm_x86_ops.pmu_ops->cleanup)
475 kvm_x86_ops.pmu_ops->cleanup(vcpu);
477 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
480 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
485 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
487 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
490 prev_count = pmc->counter;
491 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
493 reprogram_counter(pmu, pmc->idx);
494 if (pmc->counter < prev_count)
495 __kvm_perf_overflow(pmc, false);
498 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
499 unsigned int perf_hw_id)
501 u64 old_eventsel = pmc->eventsel;
504 pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
505 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
506 pmc->eventsel = old_eventsel;
507 return config == perf_hw_id;
510 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
512 bool select_os, select_user;
513 u64 config = pmc->current_config;
515 if (pmc_is_gp(pmc)) {
516 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
517 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
519 select_os = config & 0x1;
520 select_user = config & 0x2;
523 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
526 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
528 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
532 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
533 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
535 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
538 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
539 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
540 kvm_pmu_incr_counter(pmc);
543 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
545 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
547 struct kvm_pmu_event_filter tmp, *filter;
551 if (copy_from_user(&tmp, argp, sizeof(tmp)))
554 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
555 tmp.action != KVM_PMU_EVENT_DENY)
561 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
564 size = struct_size(filter, events, tmp.nevents);
565 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
570 if (copy_from_user(filter, argp, size))
573 /* Ensure nevents can't be changed between the user copies. */
576 mutex_lock(&kvm->lock);
577 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
578 mutex_is_locked(&kvm->lock));
579 mutex_unlock(&kvm->lock);
581 synchronize_srcu_expedited(&kvm->srcu);