KVM: x86/pmu: Prevent zero period event from being repeatedly released
[platform/kernel/linux-starfive.git] / arch / x86 / kvm / pmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4  *
5  * Copyright 2015 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  *   Wei Huang    <wei@redhat.com>
11  */
12
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <linux/bsearch.h>
17 #include <linux/sort.h>
18 #include <asm/perf_event.h>
19 #include <asm/cpu_device_id.h>
20 #include "x86.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23 #include "pmu.h"
24
25 /* This is enough to filter the vast majority of currently defined events. */
26 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
27
28 struct x86_pmu_capability __read_mostly kvm_pmu_cap;
29 EXPORT_SYMBOL_GPL(kvm_pmu_cap);
30
31 static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
32         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
33         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
34         {}
35 };
36
37 /* NOTE:
38  * - Each perf counter is defined as "struct kvm_pmc";
39  * - There are two types of perf counters: general purpose (gp) and fixed.
40  *   gp counters are stored in gp_counters[] and fixed counters are stored
41  *   in fixed_counters[] respectively. Both of them are part of "struct
42  *   kvm_pmu";
43  * - pmu.c understands the difference between gp counters and fixed counters.
44  *   However AMD doesn't support fixed-counters;
45  * - There are three types of index to access perf counters (PMC):
46  *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
47  *        has MSR_K7_PERFCTRn and, for families 15H and later,
48  *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
49  *        aliased to MSR_K7_PERFCTRn.
50  *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
51  *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
52  *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
53  *        that it also supports fixed counters. idx can be used to as index to
54  *        gp and fixed counters.
55  *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
56  *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
57  *        all perf counters (both gp and fixed). The mapping relationship
58  *        between pmc and perf counters is as the following:
59  *        * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
60  *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
61  *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
62  *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
63  */
64
65 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
66
67 #define KVM_X86_PMU_OP(func)                                         \
68         DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,                          \
69                                 *(((struct kvm_pmu_ops *)0)->func));
70 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
71 #include <asm/kvm-x86-pmu-ops.h>
72
73 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
74 {
75         memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
76
77 #define __KVM_X86_PMU_OP(func) \
78         static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
79 #define KVM_X86_PMU_OP(func) \
80         WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
81 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
82 #include <asm/kvm-x86-pmu-ops.h>
83 #undef __KVM_X86_PMU_OP
84 }
85
86 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
87 {
88         return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
89 }
90
91 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
92 {
93         struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
94         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
95
96         kvm_pmu_deliver_pmi(vcpu);
97 }
98
99 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
100 {
101         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
102         bool skip_pmi = false;
103
104         if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
105                 if (!in_pmi) {
106                         /*
107                          * TODO: KVM is currently _choosing_ to not generate records
108                          * for emulated instructions, avoiding BUFFER_OVF PMI when
109                          * there are no records. Strictly speaking, it should be done
110                          * as well in the right context to improve sampling accuracy.
111                          */
112                         skip_pmi = true;
113                 } else {
114                         /* Indicate PEBS overflow PMI to guest. */
115                         skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
116                                                       (unsigned long *)&pmu->global_status);
117                 }
118         } else {
119                 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
120         }
121
122         if (!pmc->intr || skip_pmi)
123                 return;
124
125         /*
126          * Inject PMI. If vcpu was in a guest mode during NMI PMI
127          * can be ejected on a guest mode re-entry. Otherwise we can't
128          * be sure that vcpu wasn't executing hlt instruction at the
129          * time of vmexit and is not going to re-enter guest mode until
130          * woken up. So we should wake it, but this is impossible from
131          * NMI context. Do it from irq work instead.
132          */
133         if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
134                 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
135         else
136                 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
137 }
138
139 static void kvm_perf_overflow(struct perf_event *perf_event,
140                               struct perf_sample_data *data,
141                               struct pt_regs *regs)
142 {
143         struct kvm_pmc *pmc = perf_event->overflow_handler_context;
144
145         /*
146          * Ignore overflow events for counters that are scheduled to be
147          * reprogrammed, e.g. if a PMI for the previous event races with KVM's
148          * handling of a related guest WRMSR.
149          */
150         if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
151                 return;
152
153         __kvm_perf_overflow(pmc, true);
154
155         kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
156 }
157
158 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
159                                  bool exclude_user, bool exclude_kernel,
160                                  bool intr)
161 {
162         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
163         struct perf_event *event;
164         struct perf_event_attr attr = {
165                 .type = type,
166                 .size = sizeof(attr),
167                 .pinned = true,
168                 .exclude_idle = true,
169                 .exclude_host = 1,
170                 .exclude_user = exclude_user,
171                 .exclude_kernel = exclude_kernel,
172                 .config = config,
173         };
174         bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
175
176         attr.sample_period = get_sample_period(pmc, pmc->counter);
177
178         if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
179             guest_cpuid_is_intel(pmc->vcpu)) {
180                 /*
181                  * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
182                  * period. Just clear the sample period so at least
183                  * allocating the counter doesn't fail.
184                  */
185                 attr.sample_period = 0;
186         }
187         if (pebs) {
188                 /*
189                  * The non-zero precision level of guest event makes the ordinary
190                  * guest event becomes a guest PEBS event and triggers the host
191                  * PEBS PMI handler to determine whether the PEBS overflow PMI
192                  * comes from the host counters or the guest.
193                  *
194                  * For most PEBS hardware events, the difference in the software
195                  * precision levels of guest and host PEBS events will not affect
196                  * the accuracy of the PEBS profiling result, because the "event IP"
197                  * in the PEBS record is calibrated on the guest side.
198                  *
199                  * On Icelake everything is fine. Other hardware (GLC+, TNT+) that
200                  * could possibly care here is unsupported and needs changes.
201                  */
202                 attr.precise_ip = 1;
203                 if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32)
204                         attr.precise_ip = 3;
205         }
206
207         event = perf_event_create_kernel_counter(&attr, -1, current,
208                                                  kvm_perf_overflow, pmc);
209         if (IS_ERR(event)) {
210                 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
211                             PTR_ERR(event), pmc->idx);
212                 return PTR_ERR(event);
213         }
214
215         pmc->perf_event = event;
216         pmc_to_pmu(pmc)->event_count++;
217         pmc->is_paused = false;
218         pmc->intr = intr || pebs;
219         return 0;
220 }
221
222 static void pmc_pause_counter(struct kvm_pmc *pmc)
223 {
224         u64 counter = pmc->counter;
225
226         if (!pmc->perf_event || pmc->is_paused)
227                 return;
228
229         /* update counter, reset event value to avoid redundant accumulation */
230         counter += perf_event_pause(pmc->perf_event, true);
231         pmc->counter = counter & pmc_bitmask(pmc);
232         pmc->is_paused = true;
233 }
234
235 static bool pmc_resume_counter(struct kvm_pmc *pmc)
236 {
237         if (!pmc->perf_event)
238                 return false;
239
240         /* recalibrate sample period and check if it's accepted by perf core */
241         if (is_sampling_event(pmc->perf_event) &&
242             perf_event_period(pmc->perf_event,
243                               get_sample_period(pmc, pmc->counter)))
244                 return false;
245
246         if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
247             (!!pmc->perf_event->attr.precise_ip))
248                 return false;
249
250         /* reuse perf_event to serve as pmc_reprogram_counter() does*/
251         perf_event_enable(pmc->perf_event);
252         pmc->is_paused = false;
253
254         return true;
255 }
256
257 static int cmp_u64(const void *pa, const void *pb)
258 {
259         u64 a = *(u64 *)pa;
260         u64 b = *(u64 *)pb;
261
262         return (a > b) - (a < b);
263 }
264
265 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
266 {
267         struct kvm_pmu_event_filter *filter;
268         struct kvm *kvm = pmc->vcpu->kvm;
269         bool allow_event = true;
270         __u64 key;
271         int idx;
272
273         if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
274                 return false;
275
276         filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
277         if (!filter)
278                 goto out;
279
280         if (pmc_is_gp(pmc)) {
281                 key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
282                 if (bsearch(&key, filter->events, filter->nevents,
283                             sizeof(__u64), cmp_u64))
284                         allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
285                 else
286                         allow_event = filter->action == KVM_PMU_EVENT_DENY;
287         } else {
288                 idx = pmc->idx - INTEL_PMC_IDX_FIXED;
289                 if (filter->action == KVM_PMU_EVENT_DENY &&
290                     test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
291                         allow_event = false;
292                 if (filter->action == KVM_PMU_EVENT_ALLOW &&
293                     !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
294                         allow_event = false;
295         }
296
297 out:
298         return allow_event;
299 }
300
301 static void reprogram_counter(struct kvm_pmc *pmc)
302 {
303         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
304         u64 eventsel = pmc->eventsel;
305         u64 new_config = eventsel;
306         u8 fixed_ctr_ctrl;
307
308         pmc_pause_counter(pmc);
309
310         if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
311                 goto reprogram_complete;
312
313         if (!check_pmu_event_filter(pmc))
314                 goto reprogram_complete;
315
316         if (pmc->counter < pmc->prev_counter)
317                 __kvm_perf_overflow(pmc, false);
318
319         if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
320                 printk_once("kvm pmu: pin control bit is ignored\n");
321
322         if (pmc_is_fixed(pmc)) {
323                 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
324                                                   pmc->idx - INTEL_PMC_IDX_FIXED);
325                 if (fixed_ctr_ctrl & 0x1)
326                         eventsel |= ARCH_PERFMON_EVENTSEL_OS;
327                 if (fixed_ctr_ctrl & 0x2)
328                         eventsel |= ARCH_PERFMON_EVENTSEL_USR;
329                 if (fixed_ctr_ctrl & 0x8)
330                         eventsel |= ARCH_PERFMON_EVENTSEL_INT;
331                 new_config = (u64)fixed_ctr_ctrl;
332         }
333
334         if (pmc->current_config == new_config && pmc_resume_counter(pmc))
335                 goto reprogram_complete;
336
337         pmc_release_perf_event(pmc);
338
339         pmc->current_config = new_config;
340
341         /*
342          * If reprogramming fails, e.g. due to contention, leave the counter's
343          * regprogram bit set, i.e. opportunistically try again on the next PMU
344          * refresh.  Don't make a new request as doing so can stall the guest
345          * if reprogramming repeatedly fails.
346          */
347         if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
348                                   (eventsel & pmu->raw_event_mask),
349                                   !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
350                                   !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
351                                   eventsel & ARCH_PERFMON_EVENTSEL_INT))
352                 return;
353
354 reprogram_complete:
355         clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
356         pmc->prev_counter = 0;
357 }
358
359 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
360 {
361         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
362         int bit;
363
364         for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
365                 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
366
367                 if (unlikely(!pmc)) {
368                         clear_bit(bit, pmu->reprogram_pmi);
369                         continue;
370                 }
371
372                 reprogram_counter(pmc);
373         }
374
375         /*
376          * Unused perf_events are only released if the corresponding MSRs
377          * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
378          * triggers KVM_REQ_PMU if cleanup is needed.
379          */
380         if (unlikely(pmu->need_cleanup))
381                 kvm_pmu_cleanup(vcpu);
382 }
383
384 /* check if idx is a valid index to access PMU */
385 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
386 {
387         return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
388 }
389
390 bool is_vmware_backdoor_pmc(u32 pmc_idx)
391 {
392         switch (pmc_idx) {
393         case VMWARE_BACKDOOR_PMC_HOST_TSC:
394         case VMWARE_BACKDOOR_PMC_REAL_TIME:
395         case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
396                 return true;
397         }
398         return false;
399 }
400
401 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
402 {
403         u64 ctr_val;
404
405         switch (idx) {
406         case VMWARE_BACKDOOR_PMC_HOST_TSC:
407                 ctr_val = rdtsc();
408                 break;
409         case VMWARE_BACKDOOR_PMC_REAL_TIME:
410                 ctr_val = ktime_get_boottime_ns();
411                 break;
412         case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
413                 ctr_val = ktime_get_boottime_ns() +
414                         vcpu->kvm->arch.kvmclock_offset;
415                 break;
416         default:
417                 return 1;
418         }
419
420         *data = ctr_val;
421         return 0;
422 }
423
424 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
425 {
426         bool fast_mode = idx & (1u << 31);
427         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
428         struct kvm_pmc *pmc;
429         u64 mask = fast_mode ? ~0u : ~0ull;
430
431         if (!pmu->version)
432                 return 1;
433
434         if (is_vmware_backdoor_pmc(idx))
435                 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
436
437         pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
438         if (!pmc)
439                 return 1;
440
441         if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
442             (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
443             (kvm_read_cr0(vcpu) & X86_CR0_PE))
444                 return 1;
445
446         *data = pmc_read_counter(pmc) & mask;
447         return 0;
448 }
449
450 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
451 {
452         if (lapic_in_kernel(vcpu)) {
453                 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
454                 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
455         }
456 }
457
458 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
459 {
460         return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
461                 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
462 }
463
464 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
465 {
466         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
467         struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
468
469         if (pmc)
470                 __set_bit(pmc->idx, pmu->pmc_in_use);
471 }
472
473 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
474 {
475         return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
476 }
477
478 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
479 {
480         kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
481         return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
482 }
483
484 /* refresh PMU settings. This function generally is called when underlying
485  * settings are changed (such as changes of PMU CPUID by guest VMs), which
486  * should rarely happen.
487  */
488 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
489 {
490         static_call(kvm_x86_pmu_refresh)(vcpu);
491 }
492
493 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
494 {
495         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
496
497         irq_work_sync(&pmu->irq_work);
498         static_call(kvm_x86_pmu_reset)(vcpu);
499 }
500
501 void kvm_pmu_init(struct kvm_vcpu *vcpu)
502 {
503         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
504
505         memset(pmu, 0, sizeof(*pmu));
506         static_call(kvm_x86_pmu_init)(vcpu);
507         init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
508         pmu->event_count = 0;
509         pmu->need_cleanup = false;
510         kvm_pmu_refresh(vcpu);
511 }
512
513 /* Release perf_events for vPMCs that have been unused for a full time slice.  */
514 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
515 {
516         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
517         struct kvm_pmc *pmc = NULL;
518         DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
519         int i;
520
521         pmu->need_cleanup = false;
522
523         bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
524                       pmu->pmc_in_use, X86_PMC_IDX_MAX);
525
526         for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
527                 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
528
529                 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
530                         pmc_stop_counter(pmc);
531         }
532
533         static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
534
535         bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
536 }
537
538 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
539 {
540         kvm_pmu_reset(vcpu);
541 }
542
543 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
544 {
545         pmc->prev_counter = pmc->counter;
546         pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
547         kvm_pmu_request_counter_reprogam(pmc);
548 }
549
550 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
551         unsigned int perf_hw_id)
552 {
553         return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
554                 AMD64_RAW_EVENT_MASK_NB);
555 }
556
557 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
558 {
559         bool select_os, select_user;
560         u64 config;
561
562         if (pmc_is_gp(pmc)) {
563                 config = pmc->eventsel;
564                 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
565                 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
566         } else {
567                 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
568                                           pmc->idx - INTEL_PMC_IDX_FIXED);
569                 select_os = config & 0x1;
570                 select_user = config & 0x2;
571         }
572
573         return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
574 }
575
576 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
577 {
578         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
579         struct kvm_pmc *pmc;
580         int i;
581
582         for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
583                 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
584
585                 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
586                         continue;
587
588                 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
589                 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
590                         kvm_pmu_incr_counter(pmc);
591         }
592 }
593 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
594
595 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
596 {
597         struct kvm_pmu_event_filter tmp, *filter;
598         struct kvm_vcpu *vcpu;
599         unsigned long i;
600         size_t size;
601         int r;
602
603         if (copy_from_user(&tmp, argp, sizeof(tmp)))
604                 return -EFAULT;
605
606         if (tmp.action != KVM_PMU_EVENT_ALLOW &&
607             tmp.action != KVM_PMU_EVENT_DENY)
608                 return -EINVAL;
609
610         if (tmp.flags != 0)
611                 return -EINVAL;
612
613         if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
614                 return -E2BIG;
615
616         size = struct_size(filter, events, tmp.nevents);
617         filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
618         if (!filter)
619                 return -ENOMEM;
620
621         r = -EFAULT;
622         if (copy_from_user(filter, argp, size))
623                 goto cleanup;
624
625         /* Ensure nevents can't be changed between the user copies. */
626         *filter = tmp;
627
628         /*
629          * Sort the in-kernel list so that we can search it with bsearch.
630          */
631         sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
632
633         mutex_lock(&kvm->lock);
634         filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
635                                      mutex_is_locked(&kvm->lock));
636         synchronize_srcu_expedited(&kvm->srcu);
637
638         BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
639                      sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
640
641         kvm_for_each_vcpu(i, vcpu, kvm)
642                 atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
643
644         kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
645
646         mutex_unlock(&kvm->lock);
647
648         r = 0;
649 cleanup:
650         kfree(filter);
651         return r;
652 }