Merge tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[platform/kernel/linux-starfive.git] / arch / x86 / kvm / svm / pmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "pmu.h"
21 #include "svm.h"
22
23 enum pmu_type {
24         PMU_TYPE_COUNTER = 0,
25         PMU_TYPE_EVNTSEL,
26 };
27
28 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
29 {
30         unsigned int num_counters = pmu->nr_arch_gp_counters;
31
32         if (pmc_idx >= num_counters)
33                 return NULL;
34
35         return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
36 }
37
38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
39                                              enum pmu_type type)
40 {
41         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
42         unsigned int idx;
43
44         if (!vcpu->kvm->arch.enable_pmu)
45                 return NULL;
46
47         switch (msr) {
48         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
49                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
50                         return NULL;
51                 /*
52                  * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
53                  * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
54                  */
55                 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
56                 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
57                         return NULL;
58                 break;
59         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
60                 if (type != PMU_TYPE_EVNTSEL)
61                         return NULL;
62                 idx = msr - MSR_K7_EVNTSEL0;
63                 break;
64         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
65                 if (type != PMU_TYPE_COUNTER)
66                         return NULL;
67                 idx = msr - MSR_K7_PERFCTR0;
68                 break;
69         default:
70                 return NULL;
71         }
72
73         return amd_pmc_idx_to_pmc(pmu, idx);
74 }
75
76 static bool amd_hw_event_available(struct kvm_pmc *pmc)
77 {
78         return true;
79 }
80
81 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
82 {
83         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
84
85         idx &= ~(3u << 30);
86
87         return idx < pmu->nr_arch_gp_counters;
88 }
89
90 /* idx is the ECX register of RDPMC instruction */
91 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
92         unsigned int idx, u64 *mask)
93 {
94         return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
95 }
96
97 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
98 {
99         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
100         struct kvm_pmc *pmc;
101
102         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
103         pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
104
105         return pmc;
106 }
107
108 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
109 {
110         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
111
112         switch (msr) {
113         case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
114                 return pmu->version > 0;
115         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
116                 return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
117         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
118         case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
119         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
120                 return pmu->version > 1;
121         default:
122                 if (msr > MSR_F15H_PERF_CTR5 &&
123                     msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
124                         return pmu->version > 1;
125                 break;
126         }
127
128         return amd_msr_idx_to_pmc(vcpu, msr);
129 }
130
131 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
132 {
133         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
134         struct kvm_pmc *pmc;
135         u32 msr = msr_info->index;
136
137         /* MSR_PERFCTRn */
138         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
139         if (pmc) {
140                 msr_info->data = pmc_read_counter(pmc);
141                 return 0;
142         }
143         /* MSR_EVNTSELn */
144         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
145         if (pmc) {
146                 msr_info->data = pmc->eventsel;
147                 return 0;
148         }
149
150         return 1;
151 }
152
153 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
154 {
155         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
156         struct kvm_pmc *pmc;
157         u32 msr = msr_info->index;
158         u64 data = msr_info->data;
159
160         /* MSR_PERFCTRn */
161         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
162         if (pmc) {
163                 pmc_write_counter(pmc, data);
164                 pmc_update_sample_period(pmc);
165                 return 0;
166         }
167         /* MSR_EVNTSELn */
168         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
169         if (pmc) {
170                 data &= ~pmu->reserved_bits;
171                 if (data != pmc->eventsel) {
172                         pmc->eventsel = data;
173                         kvm_pmu_request_counter_reprogram(pmc);
174                 }
175                 return 0;
176         }
177
178         return 1;
179 }
180
181 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
182 {
183         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
184         union cpuid_0x80000022_ebx ebx;
185
186         pmu->version = 1;
187         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
188                 pmu->version = 2;
189                 /*
190                  * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
191                  * CPUID entry is guaranteed to be non-NULL.
192                  */
193                 BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
194                              x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
195                 ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
196                 pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
197         } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
198                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
199         } else {
200                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
201         }
202
203         pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
204                                          kvm_pmu_cap.num_counters_gp);
205
206         if (pmu->version > 1) {
207                 pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
208                 pmu->global_status_mask = pmu->global_ctrl_mask;
209         }
210
211         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
212         pmu->reserved_bits = 0xfffffff000280000ull;
213         pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
214         /* not applicable to AMD; but clean them to prevent any fall out */
215         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
216         pmu->nr_arch_fixed_counters = 0;
217         bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
218 }
219
220 static void amd_pmu_init(struct kvm_vcpu *vcpu)
221 {
222         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
223         int i;
224
225         BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
226         BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
227
228         for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
229                 pmu->gp_counters[i].type = KVM_PMC_GP;
230                 pmu->gp_counters[i].vcpu = vcpu;
231                 pmu->gp_counters[i].idx = i;
232                 pmu->gp_counters[i].current_config = 0;
233         }
234 }
235
236 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
237 {
238         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
239         int i;
240
241         for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
242                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
243
244                 pmc_stop_counter(pmc);
245                 pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
246         }
247
248         pmu->global_ctrl = pmu->global_status = 0;
249 }
250
251 struct kvm_pmu_ops amd_pmu_ops __initdata = {
252         .hw_event_available = amd_hw_event_available,
253         .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
254         .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
255         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
256         .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
257         .is_valid_msr = amd_is_valid_msr,
258         .get_msr = amd_pmu_get_msr,
259         .set_msr = amd_pmu_set_msr,
260         .refresh = amd_pmu_refresh,
261         .init = amd_pmu_init,
262         .reset = amd_pmu_reset,
263         .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
264         .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
265         .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
266 };