Merge tag 'perf-core-2022-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / arch / x86 / kvm / cpuid.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  * cpuid support routines
5  *
6  * derived from arch/x86/kvm/x86.c
7  *
8  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9  * Copyright IBM Corporation, 2008
10  */
11
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
17
18 #include <asm/processor.h>
19 #include <asm/user.h>
20 #include <asm/fpu/xstate.h>
21 #include <asm/sgx.h>
22 #include "cpuid.h"
23 #include "lapic.h"
24 #include "mmu.h"
25 #include "trace.h"
26 #include "pmu.h"
27
28 /*
29  * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
30  * aligned to sizeof(unsigned long) because it's not accessed via bitops.
31  */
32 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
33 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
34
35 u32 xstate_required_size(u64 xstate_bv, bool compacted)
36 {
37         int feature_bit = 0;
38         u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
39
40         xstate_bv &= XFEATURE_MASK_EXTEND;
41         while (xstate_bv) {
42                 if (xstate_bv & 0x1) {
43                         u32 eax, ebx, ecx, edx, offset;
44                         cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
45                         /* ECX[1]: 64B alignment in compacted form */
46                         if (compacted)
47                                 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
48                         else
49                                 offset = ebx;
50                         ret = max(ret, offset + eax);
51                 }
52
53                 xstate_bv >>= 1;
54                 feature_bit++;
55         }
56
57         return ret;
58 }
59
60 /*
61  * This one is tied to SSB in the user API, and not
62  * visible in /proc/cpuinfo.
63  */
64 #define KVM_X86_FEATURE_PSFD            (13*32+28) /* Predictive Store Forwarding Disable */
65
66 #define F feature_bit
67 #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
68
69
70 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
71         struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
72 {
73         struct kvm_cpuid_entry2 *e;
74         int i;
75
76         for (i = 0; i < nent; i++) {
77                 e = &entries[i];
78
79                 if (e->function == function &&
80                     (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
81                         return e;
82         }
83
84         return NULL;
85 }
86
87 static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
88                            struct kvm_cpuid_entry2 *entries,
89                            int nent)
90 {
91         struct kvm_cpuid_entry2 *best;
92         u64 xfeatures;
93
94         /*
95          * The existing code assumes virtual address is 48-bit or 57-bit in the
96          * canonical address checks; exit if it is ever changed.
97          */
98         best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
99         if (best) {
100                 int vaddr_bits = (best->eax & 0xff00) >> 8;
101
102                 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
103                         return -EINVAL;
104         }
105
106         /*
107          * Exposing dynamic xfeatures to the guest requires additional
108          * enabling in the FPU, e.g. to expand the guest XSAVE state size.
109          */
110         best = cpuid_entry2_find(entries, nent, 0xd, 0);
111         if (!best)
112                 return 0;
113
114         xfeatures = best->eax | ((u64)best->edx << 32);
115         xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
116         if (!xfeatures)
117                 return 0;
118
119         return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
120 }
121
122 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
123 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
124                                  int nent)
125 {
126         struct kvm_cpuid_entry2 *orig;
127         int i;
128
129         if (nent != vcpu->arch.cpuid_nent)
130                 return -EINVAL;
131
132         for (i = 0; i < nent; i++) {
133                 orig = &vcpu->arch.cpuid_entries[i];
134                 if (e2[i].function != orig->function ||
135                     e2[i].index != orig->index ||
136                     e2[i].flags != orig->flags ||
137                     e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
138                     e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
139                         return -EINVAL;
140         }
141
142         return 0;
143 }
144
145 static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
146 {
147         u32 function;
148         struct kvm_cpuid_entry2 *entry;
149
150         vcpu->arch.kvm_cpuid_base = 0;
151
152         for_each_possible_hypervisor_cpuid_base(function) {
153                 entry = kvm_find_cpuid_entry(vcpu, function, 0);
154
155                 if (entry) {
156                         u32 signature[3];
157
158                         signature[0] = entry->ebx;
159                         signature[1] = entry->ecx;
160                         signature[2] = entry->edx;
161
162                         BUILD_BUG_ON(sizeof(signature) > sizeof(KVM_SIGNATURE));
163                         if (!memcmp(signature, KVM_SIGNATURE, sizeof(signature))) {
164                                 vcpu->arch.kvm_cpuid_base = function;
165                                 break;
166                         }
167                 }
168         }
169 }
170
171 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
172                                               struct kvm_cpuid_entry2 *entries, int nent)
173 {
174         u32 base = vcpu->arch.kvm_cpuid_base;
175
176         if (!base)
177                 return NULL;
178
179         return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES, 0);
180 }
181
182 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
183 {
184         return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
185                                              vcpu->arch.cpuid_nent);
186 }
187
188 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
189 {
190         struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
191
192         /*
193          * save the feature bitmap to avoid cpuid lookup for every PV
194          * operation
195          */
196         if (best)
197                 vcpu->arch.pv_cpuid.features = best->eax;
198 }
199
200 /*
201  * Calculate guest's supported XCR0 taking into account guest CPUID data and
202  * supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
203  */
204 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
205 {
206         struct kvm_cpuid_entry2 *best;
207
208         best = cpuid_entry2_find(entries, nent, 0xd, 0);
209         if (!best)
210                 return 0;
211
212         return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
213 }
214
215 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
216                                        int nent)
217 {
218         struct kvm_cpuid_entry2 *best;
219         u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
220
221         best = cpuid_entry2_find(entries, nent, 1, 0);
222         if (best) {
223                 /* Update OSXSAVE bit */
224                 if (boot_cpu_has(X86_FEATURE_XSAVE))
225                         cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
226                                    kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
227
228                 cpuid_entry_change(best, X86_FEATURE_APIC,
229                            vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
230         }
231
232         best = cpuid_entry2_find(entries, nent, 7, 0);
233         if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
234                 cpuid_entry_change(best, X86_FEATURE_OSPKE,
235                                    kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
236
237         best = cpuid_entry2_find(entries, nent, 0xD, 0);
238         if (best)
239                 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
240
241         best = cpuid_entry2_find(entries, nent, 0xD, 1);
242         if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
243                      cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
244                 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
245
246         best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
247         if (kvm_hlt_in_guest(vcpu->kvm) && best &&
248                 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
249                 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
250
251         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
252                 best = cpuid_entry2_find(entries, nent, 0x1, 0);
253                 if (best)
254                         cpuid_entry_change(best, X86_FEATURE_MWAIT,
255                                            vcpu->arch.ia32_misc_enable_msr &
256                                            MSR_IA32_MISC_ENABLE_MWAIT);
257         }
258
259         /*
260          * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
261          * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
262          * requested XCR0 value.  The enclave's XFRM must be a subset of XCRO
263          * at the time of EENTER, thus adjust the allowed XFRM by the guest's
264          * supported XCR0.  Similar to XCR0 handling, FP and SSE are forced to
265          * '1' even on CPUs that don't support XSAVE.
266          */
267         best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
268         if (best) {
269                 best->ecx &= guest_supported_xcr0 & 0xffffffff;
270                 best->edx &= guest_supported_xcr0 >> 32;
271                 best->ecx |= XFEATURE_MASK_FPSSE;
272         }
273 }
274
275 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
276 {
277         __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
278 }
279 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
280
281 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
282 {
283         struct kvm_lapic *apic = vcpu->arch.apic;
284         struct kvm_cpuid_entry2 *best;
285         u64 guest_supported_xcr0;
286
287         best = kvm_find_cpuid_entry(vcpu, 1, 0);
288         if (best && apic) {
289                 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
290                         apic->lapic_timer.timer_mode_mask = 3 << 17;
291                 else
292                         apic->lapic_timer.timer_mode_mask = 1 << 17;
293
294                 kvm_apic_set_version(vcpu);
295         }
296
297         guest_supported_xcr0 =
298                 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
299
300         vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
301
302         kvm_update_pv_runtime(vcpu);
303
304         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
305         vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
306
307         kvm_pmu_refresh(vcpu);
308         vcpu->arch.cr4_guest_rsvd_bits =
309             __cr4_reserved_bits(guest_cpuid_has, vcpu);
310
311         kvm_hv_set_cpuid(vcpu);
312
313         /* Invoke the vendor callback only after the above state is updated. */
314         static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
315
316         /*
317          * Except for the MMU, which needs to do its thing any vendor specific
318          * adjustments to the reserved GPA bits.
319          */
320         kvm_mmu_after_set_cpuid(vcpu);
321 }
322
323 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
324 {
325         struct kvm_cpuid_entry2 *best;
326
327         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
328         if (!best || best->eax < 0x80000008)
329                 goto not_found;
330         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
331         if (best)
332                 return best->eax & 0xff;
333 not_found:
334         return 36;
335 }
336
337 /*
338  * This "raw" version returns the reserved GPA bits without any adjustments for
339  * encryption technologies that usurp bits.  The raw mask should be used if and
340  * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
341  */
342 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
343 {
344         return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
345 }
346
347 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
348                         int nent)
349 {
350         int r;
351
352         __kvm_update_cpuid_runtime(vcpu, e2, nent);
353
354         /*
355          * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
356          * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
357          * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
358          * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
359          * the core vCPU model on the fly. It would've been better to forbid any
360          * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
361          * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
362          * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
363          * whether the supplied CPUID data is equal to what's already set.
364          */
365         if (vcpu->arch.last_vmentry_cpu != -1) {
366                 r = kvm_cpuid_check_equal(vcpu, e2, nent);
367                 if (r)
368                         return r;
369
370                 kvfree(e2);
371                 return 0;
372         }
373
374         r = kvm_check_cpuid(vcpu, e2, nent);
375         if (r)
376                 return r;
377
378         kvfree(vcpu->arch.cpuid_entries);
379         vcpu->arch.cpuid_entries = e2;
380         vcpu->arch.cpuid_nent = nent;
381
382         kvm_update_kvm_cpuid_base(vcpu);
383         kvm_vcpu_after_set_cpuid(vcpu);
384
385         return 0;
386 }
387
388 /* when an old userspace process fills a new kernel module */
389 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
390                              struct kvm_cpuid *cpuid,
391                              struct kvm_cpuid_entry __user *entries)
392 {
393         int r, i;
394         struct kvm_cpuid_entry *e = NULL;
395         struct kvm_cpuid_entry2 *e2 = NULL;
396
397         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
398                 return -E2BIG;
399
400         if (cpuid->nent) {
401                 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
402                 if (IS_ERR(e))
403                         return PTR_ERR(e);
404
405                 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
406                 if (!e2) {
407                         r = -ENOMEM;
408                         goto out_free_cpuid;
409                 }
410         }
411         for (i = 0; i < cpuid->nent; i++) {
412                 e2[i].function = e[i].function;
413                 e2[i].eax = e[i].eax;
414                 e2[i].ebx = e[i].ebx;
415                 e2[i].ecx = e[i].ecx;
416                 e2[i].edx = e[i].edx;
417                 e2[i].index = 0;
418                 e2[i].flags = 0;
419                 e2[i].padding[0] = 0;
420                 e2[i].padding[1] = 0;
421                 e2[i].padding[2] = 0;
422         }
423
424         r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
425         if (r)
426                 kvfree(e2);
427
428 out_free_cpuid:
429         kvfree(e);
430
431         return r;
432 }
433
434 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
435                               struct kvm_cpuid2 *cpuid,
436                               struct kvm_cpuid_entry2 __user *entries)
437 {
438         struct kvm_cpuid_entry2 *e2 = NULL;
439         int r;
440
441         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
442                 return -E2BIG;
443
444         if (cpuid->nent) {
445                 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
446                 if (IS_ERR(e2))
447                         return PTR_ERR(e2);
448         }
449
450         r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
451         if (r)
452                 kvfree(e2);
453
454         return r;
455 }
456
457 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
458                               struct kvm_cpuid2 *cpuid,
459                               struct kvm_cpuid_entry2 __user *entries)
460 {
461         int r;
462
463         r = -E2BIG;
464         if (cpuid->nent < vcpu->arch.cpuid_nent)
465                 goto out;
466         r = -EFAULT;
467         if (copy_to_user(entries, vcpu->arch.cpuid_entries,
468                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
469                 goto out;
470         return 0;
471
472 out:
473         cpuid->nent = vcpu->arch.cpuid_nent;
474         return r;
475 }
476
477 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
478 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
479 {
480         const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
481         struct kvm_cpuid_entry2 entry;
482
483         reverse_cpuid_check(leaf);
484
485         cpuid_count(cpuid.function, cpuid.index,
486                     &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
487
488         kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
489 }
490
491 static __always_inline
492 void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
493 {
494         /* Use kvm_cpu_cap_mask for non-scattered leafs. */
495         BUILD_BUG_ON(leaf < NCAPINTS);
496
497         kvm_cpu_caps[leaf] = mask;
498
499         __kvm_cpu_cap_mask(leaf);
500 }
501
502 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
503 {
504         /* Use kvm_cpu_cap_init_scattered for scattered leafs. */
505         BUILD_BUG_ON(leaf >= NCAPINTS);
506
507         kvm_cpu_caps[leaf] &= mask;
508
509         __kvm_cpu_cap_mask(leaf);
510 }
511
512 void kvm_set_cpu_caps(void)
513 {
514 #ifdef CONFIG_X86_64
515         unsigned int f_gbpages = F(GBPAGES);
516         unsigned int f_lm = F(LM);
517         unsigned int f_xfd = F(XFD);
518 #else
519         unsigned int f_gbpages = 0;
520         unsigned int f_lm = 0;
521         unsigned int f_xfd = 0;
522 #endif
523         memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
524
525         BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
526                      sizeof(boot_cpu_data.x86_capability));
527
528         memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
529                sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
530
531         kvm_cpu_cap_mask(CPUID_1_ECX,
532                 /*
533                  * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
534                  * advertised to guests via CPUID!
535                  */
536                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
537                 0 /* DS-CPL, VMX, SMX, EST */ |
538                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
539                 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
540                 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
541                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
542                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
543                 F(F16C) | F(RDRAND)
544         );
545         /* KVM emulates x2apic in software irrespective of host support. */
546         kvm_cpu_cap_set(X86_FEATURE_X2APIC);
547
548         kvm_cpu_cap_mask(CPUID_1_EDX,
549                 F(FPU) | F(VME) | F(DE) | F(PSE) |
550                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
551                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
552                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
553                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
554                 0 /* Reserved, DS, ACPI */ | F(MMX) |
555                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
556                 0 /* HTT, TM, Reserved, PBE */
557         );
558
559         kvm_cpu_cap_mask(CPUID_7_0_EBX,
560                 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
561                 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
562                 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
563                 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
564                 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
565                 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
566                 F(AVX512VL));
567
568         kvm_cpu_cap_mask(CPUID_7_ECX,
569                 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
570                 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
571                 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
572                 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
573                 F(SGX_LC) | F(BUS_LOCK_DETECT)
574         );
575         /* Set LA57 based on hardware capability. */
576         if (cpuid_ecx(7) & F(LA57))
577                 kvm_cpu_cap_set(X86_FEATURE_LA57);
578
579         /*
580          * PKU not yet implemented for shadow paging and requires OSPKE
581          * to be set on the host. Clear it if that is not the case
582          */
583         if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
584                 kvm_cpu_cap_clear(X86_FEATURE_PKU);
585
586         kvm_cpu_cap_mask(CPUID_7_EDX,
587                 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
588                 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
589                 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
590                 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
591                 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16)
592         );
593
594         /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
595         kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
596         kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
597
598         if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
599                 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
600         if (boot_cpu_has(X86_FEATURE_STIBP))
601                 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
602         if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
603                 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
604
605         kvm_cpu_cap_mask(CPUID_7_1_EAX,
606                 F(AVX_VNNI) | F(AVX512_BF16)
607         );
608
609         kvm_cpu_cap_mask(CPUID_D_1_EAX,
610                 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
611         );
612
613         kvm_cpu_cap_init_scattered(CPUID_12_EAX,
614                 SF(SGX1) | SF(SGX2)
615         );
616
617         kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
618                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
619                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
620                 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
621                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
622                 F(TOPOEXT) | 0 /* PERFCTR_CORE */
623         );
624
625         kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
626                 F(FPU) | F(VME) | F(DE) | F(PSE) |
627                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
628                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
629                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
630                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
631                 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
632                 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
633                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
634         );
635
636         if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
637                 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
638
639         kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
640                 F(CLZERO) | F(XSAVEERPTR) |
641                 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
642                 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
643                 __feature_bit(KVM_X86_FEATURE_PSFD)
644         );
645
646         /*
647          * AMD has separate bits for each SPEC_CTRL bit.
648          * arch/x86/kernel/cpu/bugs.c is kind enough to
649          * record that in cpufeatures so use them.
650          */
651         if (boot_cpu_has(X86_FEATURE_IBPB))
652                 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
653         if (boot_cpu_has(X86_FEATURE_IBRS))
654                 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
655         if (boot_cpu_has(X86_FEATURE_STIBP))
656                 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
657         if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
658                 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
659         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
660                 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
661         /*
662          * The preference is to use SPEC CTRL MSR instead of the
663          * VIRT_SPEC MSR.
664          */
665         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
666             !boot_cpu_has(X86_FEATURE_AMD_SSBD))
667                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
668
669         /*
670          * Hide all SVM features by default, SVM will set the cap bits for
671          * features it emulates and/or exposes for L1.
672          */
673         kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
674
675         kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
676                 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
677                 F(SME_COHERENT));
678
679         kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
680                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
681                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
682                 F(PMM) | F(PMM_EN)
683         );
684
685         /*
686          * Hide RDTSCP and RDPID if either feature is reported as supported but
687          * probing MSR_TSC_AUX failed.  This is purely a sanity check and
688          * should never happen, but the guest will likely crash if RDTSCP or
689          * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
690          * the past.  For example, the sanity check may fire if this instance of
691          * KVM is running as L1 on top of an older, broken KVM.
692          */
693         if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
694                      kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
695                      !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
696                 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
697                 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
698         }
699 }
700 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
701
702 struct kvm_cpuid_array {
703         struct kvm_cpuid_entry2 *entries;
704         int maxnent;
705         int nent;
706 };
707
708 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
709                                               u32 function, u32 index)
710 {
711         struct kvm_cpuid_entry2 *entry;
712
713         if (array->nent >= array->maxnent)
714                 return NULL;
715
716         entry = &array->entries[array->nent++];
717
718         entry->function = function;
719         entry->index = index;
720         entry->flags = 0;
721
722         cpuid_count(entry->function, entry->index,
723                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
724
725         switch (function) {
726         case 4:
727         case 7:
728         case 0xb:
729         case 0xd:
730         case 0xf:
731         case 0x10:
732         case 0x12:
733         case 0x14:
734         case 0x17:
735         case 0x18:
736         case 0x1d:
737         case 0x1e:
738         case 0x1f:
739         case 0x8000001d:
740                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
741                 break;
742         }
743
744         return entry;
745 }
746
747 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
748 {
749         struct kvm_cpuid_entry2 *entry;
750
751         if (array->nent >= array->maxnent)
752                 return -E2BIG;
753
754         entry = &array->entries[array->nent];
755         entry->function = func;
756         entry->index = 0;
757         entry->flags = 0;
758
759         switch (func) {
760         case 0:
761                 entry->eax = 7;
762                 ++array->nent;
763                 break;
764         case 1:
765                 entry->ecx = F(MOVBE);
766                 ++array->nent;
767                 break;
768         case 7:
769                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
770                 entry->eax = 0;
771                 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
772                         entry->ecx = F(RDPID);
773                 ++array->nent;
774                 break;
775         default:
776                 break;
777         }
778
779         return 0;
780 }
781
782 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
783 {
784         struct kvm_cpuid_entry2 *entry;
785         int r, i, max_idx;
786
787         /* all calls to cpuid_count() should be made on the same cpu */
788         get_cpu();
789
790         r = -E2BIG;
791
792         entry = do_host_cpuid(array, function, 0);
793         if (!entry)
794                 goto out;
795
796         switch (function) {
797         case 0:
798                 /* Limited to the highest leaf implemented in KVM. */
799                 entry->eax = min(entry->eax, 0x1fU);
800                 break;
801         case 1:
802                 cpuid_entry_override(entry, CPUID_1_EDX);
803                 cpuid_entry_override(entry, CPUID_1_ECX);
804                 break;
805         case 2:
806                 /*
807                  * On ancient CPUs, function 2 entries are STATEFUL.  That is,
808                  * CPUID(function=2, index=0) may return different results each
809                  * time, with the least-significant byte in EAX enumerating the
810                  * number of times software should do CPUID(2, 0).
811                  *
812                  * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
813                  * idiotic.  Intel's SDM states that EAX & 0xff "will always
814                  * return 01H. Software should ignore this value and not
815                  * interpret it as an informational descriptor", while AMD's
816                  * APM states that CPUID(2) is reserved.
817                  *
818                  * WARN if a frankenstein CPU that supports virtualization and
819                  * a stateful CPUID.0x2 is encountered.
820                  */
821                 WARN_ON_ONCE((entry->eax & 0xff) > 1);
822                 break;
823         /* functions 4 and 0x8000001d have additional index. */
824         case 4:
825         case 0x8000001d:
826                 /*
827                  * Read entries until the cache type in the previous entry is
828                  * zero, i.e. indicates an invalid entry.
829                  */
830                 for (i = 1; entry->eax & 0x1f; ++i) {
831                         entry = do_host_cpuid(array, function, i);
832                         if (!entry)
833                                 goto out;
834                 }
835                 break;
836         case 6: /* Thermal management */
837                 entry->eax = 0x4; /* allow ARAT */
838                 entry->ebx = 0;
839                 entry->ecx = 0;
840                 entry->edx = 0;
841                 break;
842         /* function 7 has additional index. */
843         case 7:
844                 entry->eax = min(entry->eax, 1u);
845                 cpuid_entry_override(entry, CPUID_7_0_EBX);
846                 cpuid_entry_override(entry, CPUID_7_ECX);
847                 cpuid_entry_override(entry, CPUID_7_EDX);
848
849                 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
850                 if (entry->eax == 1) {
851                         entry = do_host_cpuid(array, function, 1);
852                         if (!entry)
853                                 goto out;
854
855                         cpuid_entry_override(entry, CPUID_7_1_EAX);
856                         entry->ebx = 0;
857                         entry->ecx = 0;
858                         entry->edx = 0;
859                 }
860                 break;
861         case 9:
862                 break;
863         case 0xa: { /* Architectural Performance Monitoring */
864                 struct x86_pmu_capability cap;
865                 union cpuid10_eax eax;
866                 union cpuid10_edx edx;
867
868                 perf_get_x86_pmu_capability(&cap);
869
870                 /*
871                  * The guest architecture pmu is only supported if the architecture
872                  * pmu exists on the host and the module parameters allow it.
873                  */
874                 if (!cap.version || !enable_pmu)
875                         memset(&cap, 0, sizeof(cap));
876
877                 eax.split.version_id = min(cap.version, 2);
878                 eax.split.num_counters = cap.num_counters_gp;
879                 eax.split.bit_width = cap.bit_width_gp;
880                 eax.split.mask_length = cap.events_mask_len;
881
882                 edx.split.num_counters_fixed =
883                         min(cap.num_counters_fixed, KVM_PMC_MAX_FIXED);
884                 edx.split.bit_width_fixed = cap.bit_width_fixed;
885                 if (cap.version)
886                         edx.split.anythread_deprecated = 1;
887                 edx.split.reserved1 = 0;
888                 edx.split.reserved2 = 0;
889
890                 entry->eax = eax.full;
891                 entry->ebx = cap.events_mask;
892                 entry->ecx = 0;
893                 entry->edx = edx.full;
894                 break;
895         }
896         /*
897          * Per Intel's SDM, the 0x1f is a superset of 0xb,
898          * thus they can be handled by common code.
899          */
900         case 0x1f:
901         case 0xb:
902                 /*
903                  * Populate entries until the level type (ECX[15:8]) of the
904                  * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
905                  * the starting entry, filled by the primary do_host_cpuid().
906                  */
907                 for (i = 1; entry->ecx & 0xff00; ++i) {
908                         entry = do_host_cpuid(array, function, i);
909                         if (!entry)
910                                 goto out;
911                 }
912                 break;
913         case 0xd: {
914                 u64 permitted_xcr0 = supported_xcr0 & xstate_get_guest_group_perm();
915                 u64 permitted_xss = supported_xss;
916
917                 entry->eax &= permitted_xcr0;
918                 entry->ebx = xstate_required_size(permitted_xcr0, false);
919                 entry->ecx = entry->ebx;
920                 entry->edx &= permitted_xcr0 >> 32;
921                 if (!permitted_xcr0)
922                         break;
923
924                 entry = do_host_cpuid(array, function, 1);
925                 if (!entry)
926                         goto out;
927
928                 cpuid_entry_override(entry, CPUID_D_1_EAX);
929                 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
930                         entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
931                                                           true);
932                 else {
933                         WARN_ON_ONCE(permitted_xss != 0);
934                         entry->ebx = 0;
935                 }
936                 entry->ecx &= permitted_xss;
937                 entry->edx &= permitted_xss >> 32;
938
939                 for (i = 2; i < 64; ++i) {
940                         bool s_state;
941                         if (permitted_xcr0 & BIT_ULL(i))
942                                 s_state = false;
943                         else if (permitted_xss & BIT_ULL(i))
944                                 s_state = true;
945                         else
946                                 continue;
947
948                         entry = do_host_cpuid(array, function, i);
949                         if (!entry)
950                                 goto out;
951
952                         /*
953                          * The supported check above should have filtered out
954                          * invalid sub-leafs.  Only valid sub-leafs should
955                          * reach this point, and they should have a non-zero
956                          * save state size.  Furthermore, check whether the
957                          * processor agrees with permitted_xcr0/permitted_xss
958                          * on whether this is an XCR0- or IA32_XSS-managed area.
959                          */
960                         if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
961                                 --array->nent;
962                                 continue;
963                         }
964
965                         if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
966                                 entry->ecx &= ~BIT_ULL(2);
967                         entry->edx = 0;
968                 }
969                 break;
970         }
971         case 0x12:
972                 /* Intel SGX */
973                 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
974                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
975                         break;
976                 }
977
978                 /*
979                  * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
980                  * and max enclave sizes.   The SGX sub-features and MISCSELECT
981                  * are restricted by kernel and KVM capabilities (like most
982                  * feature flags), while enclave size is unrestricted.
983                  */
984                 cpuid_entry_override(entry, CPUID_12_EAX);
985                 entry->ebx &= SGX_MISC_EXINFO;
986
987                 entry = do_host_cpuid(array, function, 1);
988                 if (!entry)
989                         goto out;
990
991                 /*
992                  * Index 1: SECS.ATTRIBUTES.  ATTRIBUTES are restricted a la
993                  * feature flags.  Advertise all supported flags, including
994                  * privileged attributes that require explicit opt-in from
995                  * userspace.  ATTRIBUTES.XFRM is not adjusted as userspace is
996                  * expected to derive it from supported XCR0.
997                  */
998                 entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
999                               SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY |
1000                               SGX_ATTR_KSS;
1001                 entry->ebx &= 0;
1002                 break;
1003         /* Intel PT */
1004         case 0x14:
1005                 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1006                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1007                         break;
1008                 }
1009
1010                 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1011                         if (!do_host_cpuid(array, function, i))
1012                                 goto out;
1013                 }
1014                 break;
1015         /* Intel AMX TILE */
1016         case 0x1d:
1017                 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1018                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1019                         break;
1020                 }
1021
1022                 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1023                         if (!do_host_cpuid(array, function, i))
1024                                 goto out;
1025                 }
1026                 break;
1027         case 0x1e: /* TMUL information */
1028                 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1029                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1030                         break;
1031                 }
1032                 break;
1033         case KVM_CPUID_SIGNATURE: {
1034                 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1035                 entry->eax = KVM_CPUID_FEATURES;
1036                 entry->ebx = sigptr[0];
1037                 entry->ecx = sigptr[1];
1038                 entry->edx = sigptr[2];
1039                 break;
1040         }
1041         case KVM_CPUID_FEATURES:
1042                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1043                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
1044                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
1045                              (1 << KVM_FEATURE_ASYNC_PF) |
1046                              (1 << KVM_FEATURE_PV_EOI) |
1047                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1048                              (1 << KVM_FEATURE_PV_UNHALT) |
1049                              (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1050                              (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1051                              (1 << KVM_FEATURE_PV_SEND_IPI) |
1052                              (1 << KVM_FEATURE_POLL_CONTROL) |
1053                              (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1054                              (1 << KVM_FEATURE_ASYNC_PF_INT);
1055
1056                 if (sched_info_on())
1057                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1058
1059                 entry->ebx = 0;
1060                 entry->ecx = 0;
1061                 entry->edx = 0;
1062                 break;
1063         case 0x80000000:
1064                 entry->eax = min(entry->eax, 0x8000001f);
1065                 break;
1066         case 0x80000001:
1067                 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1068                 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1069                 break;
1070         case 0x80000006:
1071                 /* L2 cache and TLB: pass through host info. */
1072                 break;
1073         case 0x80000007: /* Advanced power management */
1074                 /* invariant TSC is CPUID.80000007H:EDX[8] */
1075                 entry->edx &= (1 << 8);
1076                 /* mask against host */
1077                 entry->edx &= boot_cpu_data.x86_power;
1078                 entry->eax = entry->ebx = entry->ecx = 0;
1079                 break;
1080         case 0x80000008: {
1081                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1082                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1083                 unsigned phys_as = entry->eax & 0xff;
1084
1085                 /*
1086                  * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1087                  * the guest operates in the same PA space as the host, i.e.
1088                  * reductions in MAXPHYADDR for memory encryption affect shadow
1089                  * paging, too.
1090                  *
1091                  * If TDP is enabled but an explicit guest MAXPHYADDR is not
1092                  * provided, use the raw bare metal MAXPHYADDR as reductions to
1093                  * the HPAs do not affect GPAs.
1094                  */
1095                 if (!tdp_enabled)
1096                         g_phys_as = boot_cpu_data.x86_phys_bits;
1097                 else if (!g_phys_as)
1098                         g_phys_as = phys_as;
1099
1100                 entry->eax = g_phys_as | (virt_as << 8);
1101                 entry->edx = 0;
1102                 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1103                 break;
1104         }
1105         case 0x8000000A:
1106                 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1107                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1108                         break;
1109                 }
1110                 entry->eax = 1; /* SVM revision 1 */
1111                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1112                                    ASID emulation to nested SVM */
1113                 entry->ecx = 0; /* Reserved */
1114                 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1115                 break;
1116         case 0x80000019:
1117                 entry->ecx = entry->edx = 0;
1118                 break;
1119         case 0x8000001a:
1120         case 0x8000001e:
1121                 break;
1122         case 0x8000001F:
1123                 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1124                         entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1125                 } else {
1126                         cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1127
1128                         /*
1129                          * Enumerate '0' for "PA bits reduction", the adjusted
1130                          * MAXPHYADDR is enumerated directly (see 0x80000008).
1131                          */
1132                         entry->ebx &= ~GENMASK(11, 6);
1133                 }
1134                 break;
1135         /*Add support for Centaur's CPUID instruction*/
1136         case 0xC0000000:
1137                 /*Just support up to 0xC0000004 now*/
1138                 entry->eax = min(entry->eax, 0xC0000004);
1139                 break;
1140         case 0xC0000001:
1141                 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1142                 break;
1143         case 3: /* Processor serial number */
1144         case 5: /* MONITOR/MWAIT */
1145         case 0xC0000002:
1146         case 0xC0000003:
1147         case 0xC0000004:
1148         default:
1149                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1150                 break;
1151         }
1152
1153         r = 0;
1154
1155 out:
1156         put_cpu();
1157
1158         return r;
1159 }
1160
1161 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1162                          unsigned int type)
1163 {
1164         if (type == KVM_GET_EMULATED_CPUID)
1165                 return __do_cpuid_func_emulated(array, func);
1166
1167         return __do_cpuid_func(array, func);
1168 }
1169
1170 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1171
1172 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1173                           unsigned int type)
1174 {
1175         u32 limit;
1176         int r;
1177
1178         if (func == CENTAUR_CPUID_SIGNATURE &&
1179             boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1180                 return 0;
1181
1182         r = do_cpuid_func(array, func, type);
1183         if (r)
1184                 return r;
1185
1186         limit = array->entries[array->nent - 1].eax;
1187         for (func = func + 1; func <= limit; ++func) {
1188                 r = do_cpuid_func(array, func, type);
1189                 if (r)
1190                         break;
1191         }
1192
1193         return r;
1194 }
1195
1196 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1197                                  __u32 num_entries, unsigned int ioctl_type)
1198 {
1199         int i;
1200         __u32 pad[3];
1201
1202         if (ioctl_type != KVM_GET_EMULATED_CPUID)
1203                 return false;
1204
1205         /*
1206          * We want to make sure that ->padding is being passed clean from
1207          * userspace in case we want to use it for something in the future.
1208          *
1209          * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1210          * have to give ourselves satisfied only with the emulated side. /me
1211          * sheds a tear.
1212          */
1213         for (i = 0; i < num_entries; i++) {
1214                 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1215                         return true;
1216
1217                 if (pad[0] || pad[1] || pad[2])
1218                         return true;
1219         }
1220         return false;
1221 }
1222
1223 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1224                             struct kvm_cpuid_entry2 __user *entries,
1225                             unsigned int type)
1226 {
1227         static const u32 funcs[] = {
1228                 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1229         };
1230
1231         struct kvm_cpuid_array array = {
1232                 .nent = 0,
1233         };
1234         int r, i;
1235
1236         if (cpuid->nent < 1)
1237                 return -E2BIG;
1238         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1239                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1240
1241         if (sanity_check_entries(entries, cpuid->nent, type))
1242                 return -EINVAL;
1243
1244         array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
1245                                            cpuid->nent));
1246         if (!array.entries)
1247                 return -ENOMEM;
1248
1249         array.maxnent = cpuid->nent;
1250
1251         for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1252                 r = get_cpuid_func(&array, funcs[i], type);
1253                 if (r)
1254                         goto out_free;
1255         }
1256         cpuid->nent = array.nent;
1257
1258         if (copy_to_user(entries, array.entries,
1259                          array.nent * sizeof(struct kvm_cpuid_entry2)))
1260                 r = -EFAULT;
1261
1262 out_free:
1263         vfree(array.entries);
1264         return r;
1265 }
1266
1267 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1268                                               u32 function, u32 index)
1269 {
1270         return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1271                                  function, index);
1272 }
1273 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1274
1275 /*
1276  * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1277  * highest basic leaf (i.e. CPUID.0H:EAX) were requested.  AMD CPUID semantics
1278  * returns all zeroes for any undefined leaf, whether or not the leaf is in
1279  * range.  Centaur/VIA follows Intel semantics.
1280  *
1281  * A leaf is considered out-of-range if its function is higher than the maximum
1282  * supported leaf of its associated class or if its associated class does not
1283  * exist.
1284  *
1285  * There are three primary classes to be considered, with their respective
1286  * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive.  A primary
1287  * class exists if a guest CPUID entry for its <base> leaf exists.  For a given
1288  * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1289  *
1290  *  - Basic:      0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1291  *  - Hypervisor: 0x40000000 - 0x4fffffff
1292  *  - Extended:   0x80000000 - 0xbfffffff
1293  *  - Centaur:    0xc0000000 - 0xcfffffff
1294  *
1295  * The Hypervisor class is further subdivided into sub-classes that each act as
1296  * their own independent class associated with a 0x100 byte range.  E.g. if Qemu
1297  * is advertising support for both HyperV and KVM, the resulting Hypervisor
1298  * CPUID sub-classes are:
1299  *
1300  *  - HyperV:     0x40000000 - 0x400000ff
1301  *  - KVM:        0x40000100 - 0x400001ff
1302  */
1303 static struct kvm_cpuid_entry2 *
1304 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1305 {
1306         struct kvm_cpuid_entry2 *basic, *class;
1307         u32 function = *fn_ptr;
1308
1309         basic = kvm_find_cpuid_entry(vcpu, 0, 0);
1310         if (!basic)
1311                 return NULL;
1312
1313         if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1314             is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1315                 return NULL;
1316
1317         if (function >= 0x40000000 && function <= 0x4fffffff)
1318                 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
1319         else if (function >= 0xc0000000)
1320                 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
1321         else
1322                 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
1323
1324         if (class && function <= class->eax)
1325                 return NULL;
1326
1327         /*
1328          * Leaf specific adjustments are also applied when redirecting to the
1329          * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1330          * entry for CPUID.0xb.index (see below), then the output value for EDX
1331          * needs to be pulled from CPUID.0xb.1.
1332          */
1333         *fn_ptr = basic->eax;
1334
1335         /*
1336          * The class does not exist or the requested function is out of range;
1337          * the effective CPUID entry is the max basic leaf.  Note, the index of
1338          * the original requested leaf is observed!
1339          */
1340         return kvm_find_cpuid_entry(vcpu, basic->eax, index);
1341 }
1342
1343 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1344                u32 *ecx, u32 *edx, bool exact_only)
1345 {
1346         u32 orig_function = *eax, function = *eax, index = *ecx;
1347         struct kvm_cpuid_entry2 *entry;
1348         bool exact, used_max_basic = false;
1349
1350         entry = kvm_find_cpuid_entry(vcpu, function, index);
1351         exact = !!entry;
1352
1353         if (!entry && !exact_only) {
1354                 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1355                 used_max_basic = !!entry;
1356         }
1357
1358         if (entry) {
1359                 *eax = entry->eax;
1360                 *ebx = entry->ebx;
1361                 *ecx = entry->ecx;
1362                 *edx = entry->edx;
1363                 if (function == 7 && index == 0) {
1364                         u64 data;
1365                         if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1366                             (data & TSX_CTRL_CPUID_CLEAR))
1367                                 *ebx &= ~(F(RTM) | F(HLE));
1368                 }
1369         } else {
1370                 *eax = *ebx = *ecx = *edx = 0;
1371                 /*
1372                  * When leaf 0BH or 1FH is defined, CL is pass-through
1373                  * and EDX is always the x2APIC ID, even for undefined
1374                  * subleaves. Index 1 will exist iff the leaf is
1375                  * implemented, so we pass through CL iff leaf 1
1376                  * exists. EDX can be copied from any existing index.
1377                  */
1378                 if (function == 0xb || function == 0x1f) {
1379                         entry = kvm_find_cpuid_entry(vcpu, function, 1);
1380                         if (entry) {
1381                                 *ecx = index & 0xff;
1382                                 *edx = entry->edx;
1383                         }
1384                 }
1385         }
1386         trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1387                         used_max_basic);
1388         return exact;
1389 }
1390 EXPORT_SYMBOL_GPL(kvm_cpuid);
1391
1392 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1393 {
1394         u32 eax, ebx, ecx, edx;
1395
1396         if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1397                 return 1;
1398
1399         eax = kvm_rax_read(vcpu);
1400         ecx = kvm_rcx_read(vcpu);
1401         kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1402         kvm_rax_write(vcpu, eax);
1403         kvm_rbx_write(vcpu, ebx);
1404         kvm_rcx_write(vcpu, ecx);
1405         kvm_rdx_write(vcpu, edx);
1406         return kvm_skip_emulated_instruction(vcpu);
1407 }
1408 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);