1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
6 * derived from arch/x86/kvm/x86.c
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kvm_host.h>
14 #include <linux/export.h>
15 #include <linux/vmalloc.h>
16 #include <linux/uaccess.h>
17 #include <linux/sched/stat.h>
19 #include <asm/processor.h>
21 #include <asm/fpu/xstate.h>
23 #include <asm/cpuid.h>
32 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
33 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
36 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
38 u32 xstate_required_size(u64 xstate_bv, bool compacted)
41 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
43 xstate_bv &= XFEATURE_MASK_EXTEND;
45 if (xstate_bv & 0x1) {
46 u32 eax, ebx, ecx, edx, offset;
47 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
48 /* ECX[1]: 64B alignment in compacted form */
50 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
53 ret = max(ret, offset + eax);
65 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
68 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
69 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
73 * Magic value used by KVM when querying userspace-provided CPUID entries and
74 * doesn't care about the CPIUD index because the index of the function in
75 * question is not significant. Note, this magic value must have at least one
76 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
77 * to avoid false positives when processing guest CPUID input.
79 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
81 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
82 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
84 struct kvm_cpuid_entry2 *e;
87 for (i = 0; i < nent; i++) {
90 if (e->function != function)
94 * If the index isn't significant, use the first entry with a
95 * matching function. It's userspace's responsibilty to not
96 * provide "duplicate" entries in all cases.
98 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
103 * Similarly, use the first matching entry if KVM is doing a
104 * lookup (as opposed to emulating CPUID) for a function that's
105 * architecturally defined as not having a significant index.
107 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
109 * Direct lookups from KVM should not diverge from what
110 * KVM defines internally (the architectural behavior).
112 WARN_ON_ONCE(cpuid_function_is_indexed(function));
120 static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
121 struct kvm_cpuid_entry2 *entries,
124 struct kvm_cpuid_entry2 *best;
128 * The existing code assumes virtual address is 48-bit or 57-bit in the
129 * canonical address checks; exit if it is ever changed.
131 best = cpuid_entry2_find(entries, nent, 0x80000008,
132 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
134 int vaddr_bits = (best->eax & 0xff00) >> 8;
136 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
141 * Exposing dynamic xfeatures to the guest requires additional
142 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
144 best = cpuid_entry2_find(entries, nent, 0xd, 0);
148 xfeatures = best->eax | ((u64)best->edx << 32);
149 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
153 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
156 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
157 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
160 struct kvm_cpuid_entry2 *orig;
163 if (nent != vcpu->arch.cpuid_nent)
166 for (i = 0; i < nent; i++) {
167 orig = &vcpu->arch.cpuid_entries[i];
168 if (e2[i].function != orig->function ||
169 e2[i].index != orig->index ||
170 e2[i].flags != orig->flags ||
171 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
172 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
179 static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
182 struct kvm_hypervisor_cpuid cpuid = {};
183 struct kvm_cpuid_entry2 *entry;
186 for_each_possible_hypervisor_cpuid_base(base) {
187 entry = kvm_find_cpuid_entry(vcpu, base);
192 signature[0] = entry->ebx;
193 signature[1] = entry->ecx;
194 signature[2] = entry->edx;
196 if (!memcmp(signature, sig, sizeof(signature))) {
198 cpuid.limit = entry->eax;
207 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
208 struct kvm_cpuid_entry2 *entries, int nent)
210 u32 base = vcpu->arch.kvm_cpuid.base;
215 return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
216 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
219 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
221 return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
222 vcpu->arch.cpuid_nent);
225 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
227 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
230 * save the feature bitmap to avoid cpuid lookup for every PV
234 vcpu->arch.pv_cpuid.features = best->eax;
238 * Calculate guest's supported XCR0 taking into account guest CPUID data and
239 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
241 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
243 struct kvm_cpuid_entry2 *best;
245 best = cpuid_entry2_find(entries, nent, 0xd, 0);
249 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
252 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
255 struct kvm_cpuid_entry2 *best;
257 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
259 /* Update OSXSAVE bit */
260 if (boot_cpu_has(X86_FEATURE_XSAVE))
261 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
262 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
264 cpuid_entry_change(best, X86_FEATURE_APIC,
265 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
268 best = cpuid_entry2_find(entries, nent, 7, 0);
269 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
270 cpuid_entry_change(best, X86_FEATURE_OSPKE,
271 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
273 best = cpuid_entry2_find(entries, nent, 0xD, 0);
275 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
277 best = cpuid_entry2_find(entries, nent, 0xD, 1);
278 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
279 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
280 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
282 best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
283 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
284 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
285 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
287 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
288 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
290 cpuid_entry_change(best, X86_FEATURE_MWAIT,
291 vcpu->arch.ia32_misc_enable_msr &
292 MSR_IA32_MISC_ENABLE_MWAIT);
296 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
298 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
300 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
302 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
304 struct kvm_cpuid_entry2 *entry;
306 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
307 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
308 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
311 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
313 struct kvm_lapic *apic = vcpu->arch.apic;
314 struct kvm_cpuid_entry2 *best;
316 best = kvm_find_cpuid_entry(vcpu, 1);
318 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
319 apic->lapic_timer.timer_mode_mask = 3 << 17;
321 apic->lapic_timer.timer_mode_mask = 1 << 17;
323 kvm_apic_set_version(vcpu);
326 vcpu->arch.guest_supported_xcr0 =
327 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
330 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
331 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
332 * supported by the host.
334 vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
337 kvm_update_pv_runtime(vcpu);
339 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
340 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
342 kvm_pmu_refresh(vcpu);
343 vcpu->arch.cr4_guest_rsvd_bits =
344 __cr4_reserved_bits(guest_cpuid_has, vcpu);
346 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
347 vcpu->arch.cpuid_nent));
349 /* Invoke the vendor callback only after the above state is updated. */
350 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
353 * Except for the MMU, which needs to do its thing any vendor specific
354 * adjustments to the reserved GPA bits.
356 kvm_mmu_after_set_cpuid(vcpu);
359 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
361 struct kvm_cpuid_entry2 *best;
363 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
364 if (!best || best->eax < 0x80000008)
366 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
368 return best->eax & 0xff;
374 * This "raw" version returns the reserved GPA bits without any adjustments for
375 * encryption technologies that usurp bits. The raw mask should be used if and
376 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
378 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
380 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
383 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
388 __kvm_update_cpuid_runtime(vcpu, e2, nent);
391 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
392 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
393 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
394 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
395 * the core vCPU model on the fly. It would've been better to forbid any
396 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
397 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
398 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
399 * whether the supplied CPUID data is equal to what's already set.
401 if (kvm_vcpu_has_run(vcpu)) {
402 r = kvm_cpuid_check_equal(vcpu, e2, nent);
410 if (kvm_cpuid_has_hyperv(e2, nent)) {
411 r = kvm_hv_vcpu_init(vcpu);
416 r = kvm_check_cpuid(vcpu, e2, nent);
420 kvfree(vcpu->arch.cpuid_entries);
421 vcpu->arch.cpuid_entries = e2;
422 vcpu->arch.cpuid_nent = nent;
424 vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
425 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
426 kvm_vcpu_after_set_cpuid(vcpu);
431 /* when an old userspace process fills a new kernel module */
432 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
433 struct kvm_cpuid *cpuid,
434 struct kvm_cpuid_entry __user *entries)
437 struct kvm_cpuid_entry *e = NULL;
438 struct kvm_cpuid_entry2 *e2 = NULL;
440 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
444 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
448 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
454 for (i = 0; i < cpuid->nent; i++) {
455 e2[i].function = e[i].function;
456 e2[i].eax = e[i].eax;
457 e2[i].ebx = e[i].ebx;
458 e2[i].ecx = e[i].ecx;
459 e2[i].edx = e[i].edx;
462 e2[i].padding[0] = 0;
463 e2[i].padding[1] = 0;
464 e2[i].padding[2] = 0;
467 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
477 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
478 struct kvm_cpuid2 *cpuid,
479 struct kvm_cpuid_entry2 __user *entries)
481 struct kvm_cpuid_entry2 *e2 = NULL;
484 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
488 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
493 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
500 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
501 struct kvm_cpuid2 *cpuid,
502 struct kvm_cpuid_entry2 __user *entries)
504 if (cpuid->nent < vcpu->arch.cpuid_nent)
507 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
508 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
511 cpuid->nent = vcpu->arch.cpuid_nent;
515 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
516 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
518 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
519 struct kvm_cpuid_entry2 entry;
521 reverse_cpuid_check(leaf);
523 cpuid_count(cpuid.function, cpuid.index,
524 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
526 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
529 static __always_inline
530 void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
532 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
533 BUILD_BUG_ON(leaf < NCAPINTS);
535 kvm_cpu_caps[leaf] = mask;
537 __kvm_cpu_cap_mask(leaf);
540 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
542 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
543 BUILD_BUG_ON(leaf >= NCAPINTS);
545 kvm_cpu_caps[leaf] &= mask;
547 __kvm_cpu_cap_mask(leaf);
550 void kvm_set_cpu_caps(void)
553 unsigned int f_gbpages = F(GBPAGES);
554 unsigned int f_lm = F(LM);
555 unsigned int f_xfd = F(XFD);
557 unsigned int f_gbpages = 0;
558 unsigned int f_lm = 0;
559 unsigned int f_xfd = 0;
561 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
563 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
564 sizeof(boot_cpu_data.x86_capability));
566 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
567 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
569 kvm_cpu_cap_mask(CPUID_1_ECX,
571 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
572 * advertised to guests via CPUID!
574 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
575 0 /* DS-CPL, VMX, SMX, EST */ |
576 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
577 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
578 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
579 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
580 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
583 /* KVM emulates x2apic in software irrespective of host support. */
584 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
586 kvm_cpu_cap_mask(CPUID_1_EDX,
587 F(FPU) | F(VME) | F(DE) | F(PSE) |
588 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
589 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
590 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
591 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
592 0 /* Reserved, DS, ACPI */ | F(MMX) |
593 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
594 0 /* HTT, TM, Reserved, PBE */
597 kvm_cpu_cap_mask(CPUID_7_0_EBX,
598 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
599 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
600 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
601 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
602 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
603 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
606 kvm_cpu_cap_mask(CPUID_7_ECX,
607 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
608 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
609 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
610 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
611 F(SGX_LC) | F(BUS_LOCK_DETECT)
613 /* Set LA57 based on hardware capability. */
614 if (cpuid_ecx(7) & F(LA57))
615 kvm_cpu_cap_set(X86_FEATURE_LA57);
618 * PKU not yet implemented for shadow paging and requires OSPKE
619 * to be set on the host. Clear it if that is not the case
621 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
622 kvm_cpu_cap_clear(X86_FEATURE_PKU);
624 kvm_cpu_cap_mask(CPUID_7_EDX,
625 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
626 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
627 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
628 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
629 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
632 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
633 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
634 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
636 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
637 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
638 if (boot_cpu_has(X86_FEATURE_STIBP))
639 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
640 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
641 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
643 kvm_cpu_cap_mask(CPUID_7_1_EAX,
644 F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
645 F(FZRM) | F(FSRS) | F(FSRC) |
646 F(AMX_FP16) | F(AVX_IFMA)
649 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
650 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI)
653 kvm_cpu_cap_mask(CPUID_D_1_EAX,
654 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
657 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
658 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
661 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
662 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
663 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
664 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
665 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
666 F(TOPOEXT) | 0 /* PERFCTR_CORE */
669 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
670 F(FPU) | F(VME) | F(DE) | F(PSE) |
671 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
672 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
673 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
674 F(PAT) | F(PSE36) | 0 /* Reserved */ |
675 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
676 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
677 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
680 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
681 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
683 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
687 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
688 F(CLZERO) | F(XSAVEERPTR) |
689 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
690 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
695 * AMD has separate bits for each SPEC_CTRL bit.
696 * arch/x86/kernel/cpu/bugs.c is kind enough to
697 * record that in cpufeatures so use them.
699 if (boot_cpu_has(X86_FEATURE_IBPB))
700 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
701 if (boot_cpu_has(X86_FEATURE_IBRS))
702 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
703 if (boot_cpu_has(X86_FEATURE_STIBP))
704 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
705 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
706 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
707 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
708 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
710 * The preference is to use SPEC CTRL MSR instead of the
713 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
714 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
715 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
718 * Hide all SVM features by default, SVM will set the cap bits for
719 * features it emulates and/or exposes for L1.
721 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
723 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
724 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
727 kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
728 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
729 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
732 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
737 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
738 * KVM's supported CPUID if the feature is reported as supported by the
739 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
740 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
741 * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
742 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
743 * the mask with the raw host CPUID, and reporting support in AMD's
744 * leaf can make it easier for userspace to detect the feature.
746 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
747 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
748 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
749 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
750 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
752 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
753 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
754 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
759 * Hide RDTSCP and RDPID if either feature is reported as supported but
760 * probing MSR_TSC_AUX failed. This is purely a sanity check and
761 * should never happen, but the guest will likely crash if RDTSCP or
762 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
763 * the past. For example, the sanity check may fire if this instance of
764 * KVM is running as L1 on top of an older, broken KVM.
766 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
767 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
768 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
769 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
770 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
773 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
775 struct kvm_cpuid_array {
776 struct kvm_cpuid_entry2 *entries;
781 static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
783 if (array->nent >= array->maxnent)
786 return &array->entries[array->nent++];
789 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
790 u32 function, u32 index)
792 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
797 memset(entry, 0, sizeof(*entry));
798 entry->function = function;
799 entry->index = index;
800 switch (function & 0xC0000000) {
802 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
807 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
808 * would result in out-of-bounds calls to do_host_cpuid.
811 static int max_cpuid_80000000;
812 if (!READ_ONCE(max_cpuid_80000000))
813 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
814 if (function > READ_ONCE(max_cpuid_80000000))
823 cpuid_count(entry->function, entry->index,
824 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
826 if (cpuid_function_is_indexed(function))
827 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
832 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
834 struct kvm_cpuid_entry2 *entry;
836 if (array->nent >= array->maxnent)
839 entry = &array->entries[array->nent];
840 entry->function = func;
850 entry->ecx = F(MOVBE);
854 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
856 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
857 entry->ecx = F(RDPID);
867 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
869 struct kvm_cpuid_entry2 *entry;
872 /* all calls to cpuid_count() should be made on the same cpu */
877 entry = do_host_cpuid(array, function, 0);
883 /* Limited to the highest leaf implemented in KVM. */
884 entry->eax = min(entry->eax, 0x1fU);
887 cpuid_entry_override(entry, CPUID_1_EDX);
888 cpuid_entry_override(entry, CPUID_1_ECX);
892 * On ancient CPUs, function 2 entries are STATEFUL. That is,
893 * CPUID(function=2, index=0) may return different results each
894 * time, with the least-significant byte in EAX enumerating the
895 * number of times software should do CPUID(2, 0).
897 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
898 * idiotic. Intel's SDM states that EAX & 0xff "will always
899 * return 01H. Software should ignore this value and not
900 * interpret it as an informational descriptor", while AMD's
901 * APM states that CPUID(2) is reserved.
903 * WARN if a frankenstein CPU that supports virtualization and
904 * a stateful CPUID.0x2 is encountered.
906 WARN_ON_ONCE((entry->eax & 0xff) > 1);
908 /* functions 4 and 0x8000001d have additional index. */
912 * Read entries until the cache type in the previous entry is
913 * zero, i.e. indicates an invalid entry.
915 for (i = 1; entry->eax & 0x1f; ++i) {
916 entry = do_host_cpuid(array, function, i);
921 case 6: /* Thermal management */
922 entry->eax = 0x4; /* allow ARAT */
927 /* function 7 has additional index. */
929 entry->eax = min(entry->eax, 1u);
930 cpuid_entry_override(entry, CPUID_7_0_EBX);
931 cpuid_entry_override(entry, CPUID_7_ECX);
932 cpuid_entry_override(entry, CPUID_7_EDX);
934 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
935 if (entry->eax == 1) {
936 entry = do_host_cpuid(array, function, 1);
940 cpuid_entry_override(entry, CPUID_7_1_EAX);
941 cpuid_entry_override(entry, CPUID_7_1_EDX);
946 case 0xa: { /* Architectural Performance Monitoring */
947 union cpuid10_eax eax;
948 union cpuid10_edx edx;
950 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
951 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
955 eax.split.version_id = kvm_pmu_cap.version;
956 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
957 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
958 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
959 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
960 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
962 if (kvm_pmu_cap.version)
963 edx.split.anythread_deprecated = 1;
964 edx.split.reserved1 = 0;
965 edx.split.reserved2 = 0;
967 entry->eax = eax.full;
968 entry->ebx = kvm_pmu_cap.events_mask;
970 entry->edx = edx.full;
976 * No topology; a valid topology is indicated by the presence
979 entry->eax = entry->ebx = entry->ecx = 0;
982 u64 permitted_xcr0 = kvm_get_filtered_xcr0();
983 u64 permitted_xss = kvm_caps.supported_xss;
985 entry->eax &= permitted_xcr0;
986 entry->ebx = xstate_required_size(permitted_xcr0, false);
987 entry->ecx = entry->ebx;
988 entry->edx &= permitted_xcr0 >> 32;
992 entry = do_host_cpuid(array, function, 1);
996 cpuid_entry_override(entry, CPUID_D_1_EAX);
997 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
998 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1001 WARN_ON_ONCE(permitted_xss != 0);
1004 entry->ecx &= permitted_xss;
1005 entry->edx &= permitted_xss >> 32;
1007 for (i = 2; i < 64; ++i) {
1009 if (permitted_xcr0 & BIT_ULL(i))
1011 else if (permitted_xss & BIT_ULL(i))
1016 entry = do_host_cpuid(array, function, i);
1021 * The supported check above should have filtered out
1022 * invalid sub-leafs. Only valid sub-leafs should
1023 * reach this point, and they should have a non-zero
1024 * save state size. Furthermore, check whether the
1025 * processor agrees with permitted_xcr0/permitted_xss
1026 * on whether this is an XCR0- or IA32_XSS-managed area.
1028 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1033 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1034 entry->ecx &= ~BIT_ULL(2);
1041 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1042 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1047 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1048 * and max enclave sizes. The SGX sub-features and MISCSELECT
1049 * are restricted by kernel and KVM capabilities (like most
1050 * feature flags), while enclave size is unrestricted.
1052 cpuid_entry_override(entry, CPUID_12_EAX);
1053 entry->ebx &= SGX_MISC_EXINFO;
1055 entry = do_host_cpuid(array, function, 1);
1060 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1061 * feature flags. Advertise all supported flags, including
1062 * privileged attributes that require explicit opt-in from
1063 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1064 * expected to derive it from supported XCR0.
1066 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1071 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1072 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1076 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1077 if (!do_host_cpuid(array, function, i))
1081 /* Intel AMX TILE */
1083 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1084 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1088 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1089 if (!do_host_cpuid(array, function, i))
1093 case 0x1e: /* TMUL information */
1094 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1095 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1099 case KVM_CPUID_SIGNATURE: {
1100 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1101 entry->eax = KVM_CPUID_FEATURES;
1102 entry->ebx = sigptr[0];
1103 entry->ecx = sigptr[1];
1104 entry->edx = sigptr[2];
1107 case KVM_CPUID_FEATURES:
1108 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1109 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1110 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1111 (1 << KVM_FEATURE_ASYNC_PF) |
1112 (1 << KVM_FEATURE_PV_EOI) |
1113 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1114 (1 << KVM_FEATURE_PV_UNHALT) |
1115 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1116 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1117 (1 << KVM_FEATURE_PV_SEND_IPI) |
1118 (1 << KVM_FEATURE_POLL_CONTROL) |
1119 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1120 (1 << KVM_FEATURE_ASYNC_PF_INT);
1122 if (sched_info_on())
1123 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1130 entry->eax = min(entry->eax, 0x80000022);
1132 * Serializing LFENCE is reported in a multitude of ways, and
1133 * NullSegClearsBase is not reported in CPUID on Zen2; help
1134 * userspace by providing the CPUID leaf ourselves.
1136 * However, only do it if the host has CPUID leaf 0x8000001d.
1137 * QEMU thinks that it can query the host blindly for that
1138 * CPUID leaf if KVM reports that it supports 0x8000001d or
1139 * above. The processor merrily returns values from the
1140 * highest Intel leaf which QEMU tries to use as the guest's
1141 * 0x8000001d. Even worse, this can result in an infinite
1142 * loop if said highest leaf has no subleaves indexed by ECX.
1144 if (entry->eax >= 0x8000001d &&
1145 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1146 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1147 entry->eax = max(entry->eax, 0x80000021);
1150 entry->ebx &= ~GENMASK(27, 16);
1151 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1152 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1155 /* Drop reserved bits, pass host L2 cache and TLB info. */
1156 entry->edx &= ~GENMASK(17, 16);
1158 case 0x80000007: /* Advanced power management */
1159 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1161 /* mask against host */
1162 entry->edx &= boot_cpu_data.x86_power;
1163 entry->eax = entry->ebx = entry->ecx = 0;
1166 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1167 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1168 unsigned phys_as = entry->eax & 0xff;
1171 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1172 * the guest operates in the same PA space as the host, i.e.
1173 * reductions in MAXPHYADDR for memory encryption affect shadow
1176 * If TDP is enabled but an explicit guest MAXPHYADDR is not
1177 * provided, use the raw bare metal MAXPHYADDR as reductions to
1178 * the HPAs do not affect GPAs.
1181 g_phys_as = boot_cpu_data.x86_phys_bits;
1182 else if (!g_phys_as)
1183 g_phys_as = phys_as;
1185 entry->eax = g_phys_as | (virt_as << 8);
1186 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1188 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1192 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1193 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1196 entry->eax = 1; /* SVM revision 1 */
1197 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1198 ASID emulation to nested SVM */
1199 entry->ecx = 0; /* Reserved */
1200 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1203 entry->ecx = entry->edx = 0;
1206 entry->eax &= GENMASK(2, 0);
1207 entry->ebx = entry->ecx = entry->edx = 0;
1210 /* Do not return host topology information. */
1211 entry->eax = entry->ebx = entry->ecx = 0;
1212 entry->edx = 0; /* reserved */
1215 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1216 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1218 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1219 /* Clear NumVMPL since KVM does not support VMPL. */
1220 entry->ebx &= ~GENMASK(31, 12);
1222 * Enumerate '0' for "PA bits reduction", the adjusted
1223 * MAXPHYADDR is enumerated directly (see 0x80000008).
1225 entry->ebx &= ~GENMASK(11, 6);
1229 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1232 entry->ebx = entry->ecx = entry->edx = 0;
1233 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1235 /* AMD Extended Performance Monitoring and Debug */
1237 union cpuid_0x80000022_ebx ebx;
1239 entry->ecx = entry->edx = 0;
1240 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1241 entry->eax = entry->ebx;
1245 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1247 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
1248 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1249 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
1250 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1252 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1254 entry->ebx = ebx.full;
1257 /*Add support for Centaur's CPUID instruction*/
1259 /*Just support up to 0xC0000004 now*/
1260 entry->eax = min(entry->eax, 0xC0000004);
1263 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1265 case 3: /* Processor serial number */
1266 case 5: /* MONITOR/MWAIT */
1271 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1283 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1286 if (type == KVM_GET_EMULATED_CPUID)
1287 return __do_cpuid_func_emulated(array, func);
1289 return __do_cpuid_func(array, func);
1292 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1294 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1300 if (func == CENTAUR_CPUID_SIGNATURE &&
1301 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1304 r = do_cpuid_func(array, func, type);
1308 limit = array->entries[array->nent - 1].eax;
1309 for (func = func + 1; func <= limit; ++func) {
1310 r = do_cpuid_func(array, func, type);
1318 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1319 __u32 num_entries, unsigned int ioctl_type)
1324 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1328 * We want to make sure that ->padding is being passed clean from
1329 * userspace in case we want to use it for something in the future.
1331 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1332 * have to give ourselves satisfied only with the emulated side. /me
1335 for (i = 0; i < num_entries; i++) {
1336 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1339 if (pad[0] || pad[1] || pad[2])
1345 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1346 struct kvm_cpuid_entry2 __user *entries,
1349 static const u32 funcs[] = {
1350 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1353 struct kvm_cpuid_array array = {
1358 if (cpuid->nent < 1)
1360 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1361 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1363 if (sanity_check_entries(entries, cpuid->nent, type))
1366 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1370 array.maxnent = cpuid->nent;
1372 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1373 r = get_cpuid_func(&array, funcs[i], type);
1377 cpuid->nent = array.nent;
1379 if (copy_to_user(entries, array.entries,
1380 array.nent * sizeof(struct kvm_cpuid_entry2)))
1384 kvfree(array.entries);
1388 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1389 u32 function, u32 index)
1391 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1394 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1396 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1399 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1400 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1402 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1405 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1406 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1407 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1408 * range. Centaur/VIA follows Intel semantics.
1410 * A leaf is considered out-of-range if its function is higher than the maximum
1411 * supported leaf of its associated class or if its associated class does not
1414 * There are three primary classes to be considered, with their respective
1415 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1416 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1417 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1419 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1420 * - Hypervisor: 0x40000000 - 0x4fffffff
1421 * - Extended: 0x80000000 - 0xbfffffff
1422 * - Centaur: 0xc0000000 - 0xcfffffff
1424 * The Hypervisor class is further subdivided into sub-classes that each act as
1425 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1426 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1427 * CPUID sub-classes are:
1429 * - HyperV: 0x40000000 - 0x400000ff
1430 * - KVM: 0x40000100 - 0x400001ff
1432 static struct kvm_cpuid_entry2 *
1433 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1435 struct kvm_cpuid_entry2 *basic, *class;
1436 u32 function = *fn_ptr;
1438 basic = kvm_find_cpuid_entry(vcpu, 0);
1442 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1443 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1446 if (function >= 0x40000000 && function <= 0x4fffffff)
1447 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1448 else if (function >= 0xc0000000)
1449 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1451 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1453 if (class && function <= class->eax)
1457 * Leaf specific adjustments are also applied when redirecting to the
1458 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1459 * entry for CPUID.0xb.index (see below), then the output value for EDX
1460 * needs to be pulled from CPUID.0xb.1.
1462 *fn_ptr = basic->eax;
1465 * The class does not exist or the requested function is out of range;
1466 * the effective CPUID entry is the max basic leaf. Note, the index of
1467 * the original requested leaf is observed!
1469 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1472 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1473 u32 *ecx, u32 *edx, bool exact_only)
1475 u32 orig_function = *eax, function = *eax, index = *ecx;
1476 struct kvm_cpuid_entry2 *entry;
1477 bool exact, used_max_basic = false;
1479 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1482 if (!entry && !exact_only) {
1483 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1484 used_max_basic = !!entry;
1492 if (function == 7 && index == 0) {
1494 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1495 (data & TSX_CTRL_CPUID_CLEAR))
1496 *ebx &= ~(F(RTM) | F(HLE));
1497 } else if (function == 0x80000007) {
1498 if (kvm_hv_invtsc_suppressed(vcpu))
1499 *edx &= ~SF(CONSTANT_TSC);
1502 *eax = *ebx = *ecx = *edx = 0;
1504 * When leaf 0BH or 1FH is defined, CL is pass-through
1505 * and EDX is always the x2APIC ID, even for undefined
1506 * subleaves. Index 1 will exist iff the leaf is
1507 * implemented, so we pass through CL iff leaf 1
1508 * exists. EDX can be copied from any existing index.
1510 if (function == 0xb || function == 0x1f) {
1511 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1513 *ecx = index & 0xff;
1518 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1522 EXPORT_SYMBOL_GPL(kvm_cpuid);
1524 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1526 u32 eax, ebx, ecx, edx;
1528 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1531 eax = kvm_rax_read(vcpu);
1532 ecx = kvm_rcx_read(vcpu);
1533 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1534 kvm_rax_write(vcpu, eax);
1535 kvm_rbx_write(vcpu, ebx);
1536 kvm_rcx_write(vcpu, ecx);
1537 kvm_rdx_write(vcpu, edx);
1538 return kvm_skip_emulated_instruction(vcpu);
1540 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);