1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 #include <linux/entry-kvm.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/debugreg.h>
39 #include <asm/fpu/api.h>
40 #include <asm/fpu/xstate.h>
41 #include <asm/idtentry.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/kexec.h>
45 #include <asm/perf_event.h>
46 #include <asm/mmu_context.h>
47 #include <asm/mshyperv.h>
48 #include <asm/mwait.h>
49 #include <asm/spec-ctrl.h>
50 #include <asm/virtext.h>
53 #include "capabilities.h"
56 #include "kvm_onhyperv.h"
58 #include "kvm_cache_regs.h"
71 MODULE_AUTHOR("Qumranet");
72 MODULE_LICENSE("GPL");
75 static const struct x86_cpu_id vmx_cpu_id[] = {
76 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
79 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
82 bool __read_mostly enable_vpid = 1;
83 module_param_named(vpid, enable_vpid, bool, 0444);
85 static bool __read_mostly enable_vnmi = 1;
86 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
88 bool __read_mostly flexpriority_enabled = 1;
89 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
91 bool __read_mostly enable_ept = 1;
92 module_param_named(ept, enable_ept, bool, S_IRUGO);
94 bool __read_mostly enable_unrestricted_guest = 1;
95 module_param_named(unrestricted_guest,
96 enable_unrestricted_guest, bool, S_IRUGO);
98 bool __read_mostly enable_ept_ad_bits = 1;
99 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
101 static bool __read_mostly emulate_invalid_guest_state = true;
102 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
104 static bool __read_mostly fasteoi = 1;
105 module_param(fasteoi, bool, S_IRUGO);
107 module_param(enable_apicv, bool, S_IRUGO);
109 bool __read_mostly enable_ipiv = true;
110 module_param(enable_ipiv, bool, 0444);
113 * If nested=1, nested virtualization is supported, i.e., guests may use
114 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
115 * use VMX instructions.
117 static bool __read_mostly nested = 1;
118 module_param(nested, bool, S_IRUGO);
120 bool __read_mostly enable_pml = 1;
121 module_param_named(pml, enable_pml, bool, S_IRUGO);
123 static bool __read_mostly error_on_inconsistent_vmcs_config = true;
124 module_param(error_on_inconsistent_vmcs_config, bool, 0444);
126 static bool __read_mostly dump_invalid_vmcs = 0;
127 module_param(dump_invalid_vmcs, bool, 0644);
129 #define MSR_BITMAP_MODE_X2APIC 1
130 #define MSR_BITMAP_MODE_X2APIC_APICV 2
132 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
134 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
135 static int __read_mostly cpu_preemption_timer_multi;
136 static bool __read_mostly enable_preemption_timer = 1;
138 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
141 extern bool __read_mostly allow_smaller_maxphyaddr;
142 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
144 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
145 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
146 #define KVM_VM_CR0_ALWAYS_ON \
147 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
149 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
150 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
151 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
153 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
155 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
156 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
157 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
158 RTIT_STATUS_BYTECNT))
161 * List of MSRs that can be directly passed to the guest.
162 * In addition to these x2apic and PT MSRs are handled specially.
164 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
175 MSR_IA32_SYSENTER_CS,
176 MSR_IA32_SYSENTER_ESP,
177 MSR_IA32_SYSENTER_EIP,
179 MSR_CORE_C3_RESIDENCY,
180 MSR_CORE_C6_RESIDENCY,
181 MSR_CORE_C7_RESIDENCY,
185 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
186 * ple_gap: upper bound on the amount of time between two successive
187 * executions of PAUSE in a loop. Also indicate if ple enabled.
188 * According to test, this time is usually smaller than 128 cycles.
189 * ple_window: upper bound on the amount of time a guest is allowed to execute
190 * in a PAUSE loop. Tests indicate that most spinlocks are held for
191 * less than 2^12 cycles
192 * Time is measured based on a counter that runs at the same rate as the TSC,
193 * refer SDM volume 3b section 21.6.13 & 22.1.3.
195 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
196 module_param(ple_gap, uint, 0444);
198 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
199 module_param(ple_window, uint, 0444);
201 /* Default doubles per-vcpu window every exit. */
202 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
203 module_param(ple_window_grow, uint, 0444);
205 /* Default resets per-vcpu window every exit to ple_window. */
206 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
207 module_param(ple_window_shrink, uint, 0444);
209 /* Default is to compute the maximum so we can never overflow. */
210 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
211 module_param(ple_window_max, uint, 0444);
213 /* Default is SYSTEM mode, 1 for host-guest mode */
214 int __read_mostly pt_mode = PT_MODE_SYSTEM;
215 module_param(pt_mode, int, S_IRUGO);
217 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
218 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
219 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
221 /* Storage for pre module init parameter parsing */
222 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
224 static const struct {
227 } vmentry_l1d_param[] = {
228 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
229 [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
230 [VMENTER_L1D_FLUSH_COND] = {"cond", true},
231 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
232 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
233 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
236 #define L1D_CACHE_ORDER 4
237 static void *vmx_l1d_flush_pages;
239 /* Control for disabling CPU Fill buffer clear */
240 static bool __read_mostly vmx_fb_clear_ctrl_available;
242 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
247 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
248 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
253 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
257 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
260 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
261 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
262 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
267 /* If set to auto use the default l1tf mitigation method */
268 if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
269 switch (l1tf_mitigation) {
270 case L1TF_MITIGATION_OFF:
271 l1tf = VMENTER_L1D_FLUSH_NEVER;
273 case L1TF_MITIGATION_FLUSH_NOWARN:
274 case L1TF_MITIGATION_FLUSH:
275 case L1TF_MITIGATION_FLUSH_NOSMT:
276 l1tf = VMENTER_L1D_FLUSH_COND;
278 case L1TF_MITIGATION_FULL:
279 case L1TF_MITIGATION_FULL_FORCE:
280 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
283 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
284 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
287 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
288 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
290 * This allocation for vmx_l1d_flush_pages is not tied to a VM
291 * lifetime and so should not be charged to a memcg.
293 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
296 vmx_l1d_flush_pages = page_address(page);
299 * Initialize each page with a different pattern in
300 * order to protect against KSM in the nested
301 * virtualization case.
303 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
304 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
309 l1tf_vmx_mitigation = l1tf;
311 if (l1tf != VMENTER_L1D_FLUSH_NEVER)
312 static_branch_enable(&vmx_l1d_should_flush);
314 static_branch_disable(&vmx_l1d_should_flush);
316 if (l1tf == VMENTER_L1D_FLUSH_COND)
317 static_branch_enable(&vmx_l1d_flush_cond);
319 static_branch_disable(&vmx_l1d_flush_cond);
323 static int vmentry_l1d_flush_parse(const char *s)
328 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
329 if (vmentry_l1d_param[i].for_parse &&
330 sysfs_streq(s, vmentry_l1d_param[i].option))
337 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
341 l1tf = vmentry_l1d_flush_parse(s);
345 if (!boot_cpu_has(X86_BUG_L1TF))
349 * Has vmx_init() run already? If not then this is the pre init
350 * parameter parsing. In that case just store the value and let
351 * vmx_init() do the proper setup after enable_ept has been
354 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
355 vmentry_l1d_flush_param = l1tf;
359 mutex_lock(&vmx_l1d_flush_mutex);
360 ret = vmx_setup_l1d_flush(l1tf);
361 mutex_unlock(&vmx_l1d_flush_mutex);
365 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
367 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
368 return sprintf(s, "???\n");
370 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
373 static void vmx_setup_fb_clear_ctrl(void)
377 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
378 !boot_cpu_has_bug(X86_BUG_MDS) &&
379 !boot_cpu_has_bug(X86_BUG_TAA)) {
380 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
381 if (msr & ARCH_CAP_FB_CLEAR_CTRL)
382 vmx_fb_clear_ctrl_available = true;
386 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
390 if (!vmx->disable_fb_clear)
393 msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
395 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
396 /* Cache the MSR value to avoid reading it later */
397 vmx->msr_ia32_mcu_opt_ctrl = msr;
400 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
402 if (!vmx->disable_fb_clear)
405 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
406 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
409 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
411 vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
414 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
415 * at VMEntry. Skip the MSR read/write when a guest has no use case to
418 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
419 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
420 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
421 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
422 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
423 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
424 vmx->disable_fb_clear = false;
427 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
428 .set = vmentry_l1d_flush_set,
429 .get = vmentry_l1d_flush_get,
431 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
433 static u32 vmx_segment_access_rights(struct kvm_segment *var);
435 void vmx_vmexit(void);
437 #define vmx_insn_failed(fmt...) \
440 pr_warn_ratelimited(fmt); \
443 void vmread_error(unsigned long field, bool fault)
446 kvm_spurious_fault();
448 vmx_insn_failed("vmread failed: field=%lx\n", field);
451 noinline void vmwrite_error(unsigned long field, unsigned long value)
453 vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
454 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
457 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
459 vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
460 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
463 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
465 vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
466 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
469 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
471 vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
475 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
477 vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
481 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
482 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
484 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
485 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
487 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
489 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
490 static DEFINE_SPINLOCK(vmx_vpid_lock);
492 struct vmcs_config vmcs_config __ro_after_init;
493 struct vmx_capability vmx_capability __ro_after_init;
495 #define VMX_SEGMENT_FIELD(seg) \
496 [VCPU_SREG_##seg] = { \
497 .selector = GUEST_##seg##_SELECTOR, \
498 .base = GUEST_##seg##_BASE, \
499 .limit = GUEST_##seg##_LIMIT, \
500 .ar_bytes = GUEST_##seg##_AR_BYTES, \
503 static const struct kvm_vmx_segment_field {
508 } kvm_vmx_segment_fields[] = {
509 VMX_SEGMENT_FIELD(CS),
510 VMX_SEGMENT_FIELD(DS),
511 VMX_SEGMENT_FIELD(ES),
512 VMX_SEGMENT_FIELD(FS),
513 VMX_SEGMENT_FIELD(GS),
514 VMX_SEGMENT_FIELD(SS),
515 VMX_SEGMENT_FIELD(TR),
516 VMX_SEGMENT_FIELD(LDTR),
519 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
521 vmx->segment_cache.bitmask = 0;
524 static unsigned long host_idt_base;
526 #if IS_ENABLED(CONFIG_HYPERV)
527 static struct kvm_x86_ops vmx_x86_ops __initdata;
529 static bool __read_mostly enlightened_vmcs = true;
530 module_param(enlightened_vmcs, bool, 0444);
532 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
534 struct hv_enlightened_vmcs *evmcs;
535 struct hv_partition_assist_pg **p_hv_pa_pg =
536 &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
538 * Synthetic VM-Exit is not enabled in current code and so All
539 * evmcs in singe VM shares same assist page.
542 *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
547 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
549 evmcs->partition_assist_page =
551 evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
552 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
557 static __init void hv_init_evmcs(void)
561 if (!enlightened_vmcs)
565 * Enlightened VMCS usage should be recommended and the host needs
566 * to support eVMCS v1 or above.
568 if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
569 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
572 /* Check that we have assist pages on all online CPUs */
573 for_each_online_cpu(cpu) {
574 if (!hv_get_vp_assist_page(cpu)) {
575 enlightened_vmcs = false;
580 if (enlightened_vmcs) {
581 pr_info("Using Hyper-V Enlightened VMCS\n");
582 static_branch_enable(&__kvm_is_using_evmcs);
585 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
586 vmx_x86_ops.enable_l2_tlb_flush
587 = hv_enable_l2_tlb_flush;
590 enlightened_vmcs = false;
594 static void hv_reset_evmcs(void)
596 struct hv_vp_assist_page *vp_ap;
598 if (!kvm_is_using_evmcs())
602 * KVM should enable eVMCS if and only if all CPUs have a VP assist
603 * page, and should reject CPU onlining if eVMCS is enabled the CPU
604 * doesn't have a VP assist page allocated.
606 vp_ap = hv_get_vp_assist_page(smp_processor_id());
607 if (WARN_ON_ONCE(!vp_ap))
611 * Reset everything to support using non-enlightened VMCS access later
612 * (e.g. when we reload the module with enlightened_vmcs=0)
614 vp_ap->nested_control.features.directhypercall = 0;
615 vp_ap->current_nested_vmcs = 0;
616 vp_ap->enlighten_vmentry = 0;
619 #else /* IS_ENABLED(CONFIG_HYPERV) */
620 static void hv_init_evmcs(void) {}
621 static void hv_reset_evmcs(void) {}
622 #endif /* IS_ENABLED(CONFIG_HYPERV) */
625 * Comment's format: document - errata name - stepping - processor name.
627 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
629 static u32 vmx_preemption_cpu_tfms[] = {
630 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
632 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
633 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
634 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
636 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
638 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
639 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
641 * 320767.pdf - AAP86 - B1 -
642 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
645 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
647 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
649 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
651 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
652 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
653 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
655 /* Xeon E3-1220 V2 */
659 static inline bool cpu_has_broken_vmx_preemption_timer(void)
661 u32 eax = cpuid_eax(0x00000001), i;
663 /* Clear the reserved bits */
664 eax &= ~(0x3U << 14 | 0xfU << 28);
665 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
666 if (eax == vmx_preemption_cpu_tfms[i])
672 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
674 return flexpriority_enabled && lapic_in_kernel(vcpu);
677 static int possible_passthrough_msr_slot(u32 msr)
681 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
682 if (vmx_possible_passthrough_msrs[i] == msr)
688 static bool is_valid_passthrough_msr(u32 msr)
693 case 0x800 ... 0x8ff:
694 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
696 case MSR_IA32_RTIT_STATUS:
697 case MSR_IA32_RTIT_OUTPUT_BASE:
698 case MSR_IA32_RTIT_OUTPUT_MASK:
699 case MSR_IA32_RTIT_CR3_MATCH:
700 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
701 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */
704 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
705 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
706 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
707 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
708 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
709 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
713 r = possible_passthrough_msr_slot(msr) != -ENOENT;
715 WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
720 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
724 i = kvm_find_user_return_msr(msr);
726 return &vmx->guest_uret_msrs[i];
730 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
731 struct vmx_uret_msr *msr, u64 data)
733 unsigned int slot = msr - vmx->guest_uret_msrs;
736 if (msr->load_into_hardware) {
738 ret = kvm_set_user_return_msr(slot, data, msr->mask);
746 #ifdef CONFIG_KEXEC_CORE
747 static void crash_vmclear_local_loaded_vmcss(void)
749 int cpu = raw_smp_processor_id();
750 struct loaded_vmcs *v;
752 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
753 loaded_vmcss_on_cpu_link)
756 #endif /* CONFIG_KEXEC_CORE */
758 static void __loaded_vmcs_clear(void *arg)
760 struct loaded_vmcs *loaded_vmcs = arg;
761 int cpu = raw_smp_processor_id();
763 if (loaded_vmcs->cpu != cpu)
764 return; /* vcpu migration can race with cpu offline */
765 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
766 per_cpu(current_vmcs, cpu) = NULL;
768 vmcs_clear(loaded_vmcs->vmcs);
769 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
770 vmcs_clear(loaded_vmcs->shadow_vmcs);
772 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
775 * Ensure all writes to loaded_vmcs, including deleting it from its
776 * current percpu list, complete before setting loaded_vmcs->cpu to
777 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
778 * and add loaded_vmcs to its percpu list before it's deleted from this
779 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
783 loaded_vmcs->cpu = -1;
784 loaded_vmcs->launched = 0;
787 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
789 int cpu = loaded_vmcs->cpu;
792 smp_call_function_single(cpu,
793 __loaded_vmcs_clear, loaded_vmcs, 1);
796 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
800 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
802 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
803 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
804 vmx->segment_cache.bitmask = 0;
806 ret = vmx->segment_cache.bitmask & mask;
807 vmx->segment_cache.bitmask |= mask;
811 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
813 u16 *p = &vmx->segment_cache.seg[seg].selector;
815 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
816 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
820 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
822 ulong *p = &vmx->segment_cache.seg[seg].base;
824 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
825 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
829 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
831 u32 *p = &vmx->segment_cache.seg[seg].limit;
833 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
834 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
838 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
840 u32 *p = &vmx->segment_cache.seg[seg].ar;
842 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
843 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
847 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
851 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
852 (1u << DB_VECTOR) | (1u << AC_VECTOR);
854 * Guest access to VMware backdoor ports could legitimately
855 * trigger #GP because of TSS I/O permission bitmap.
856 * We intercept those #GP and allow access to them anyway
859 if (enable_vmware_backdoor)
860 eb |= (1u << GP_VECTOR);
861 if ((vcpu->guest_debug &
862 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
863 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
864 eb |= 1u << BP_VECTOR;
865 if (to_vmx(vcpu)->rmode.vm86_active)
867 if (!vmx_need_pf_intercept(vcpu))
868 eb &= ~(1u << PF_VECTOR);
870 /* When we are running a nested L2 guest and L1 specified for it a
871 * certain exception bitmap, we must trap the same exceptions and pass
872 * them to L1. When running L2, we will only handle the exceptions
873 * specified above if L1 did not want them.
875 if (is_guest_mode(vcpu))
876 eb |= get_vmcs12(vcpu)->exception_bitmap;
878 int mask = 0, match = 0;
880 if (enable_ept && (eb & (1u << PF_VECTOR))) {
882 * If EPT is enabled, #PF is currently only intercepted
883 * if MAXPHYADDR is smaller on the guest than on the
884 * host. In that case we only care about present,
885 * non-reserved faults. For vmcs02, however, PFEC_MASK
886 * and PFEC_MATCH are set in prepare_vmcs02_rare.
888 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
889 match = PFERR_PRESENT_MASK;
891 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
892 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
896 * Disabling xfd interception indicates that dynamic xfeatures
897 * might be used in the guest. Always trap #NM in this case
898 * to save guest xfd_err timely.
900 if (vcpu->arch.xfd_no_write_intercept)
901 eb |= (1u << NM_VECTOR);
903 vmcs_write32(EXCEPTION_BITMAP, eb);
907 * Check if MSR is intercepted for currently loaded MSR bitmap.
909 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
911 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
914 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
917 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
919 unsigned int flags = 0;
921 if (vmx->loaded_vmcs->launched)
922 flags |= VMX_RUN_VMRESUME;
925 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
926 * to change it directly without causing a vmexit. In that case read
927 * it after vmexit and store it in vmx->spec_ctrl.
929 if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
930 flags |= VMX_RUN_SAVE_SPEC_CTRL;
935 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
936 unsigned long entry, unsigned long exit)
938 vm_entry_controls_clearbit(vmx, entry);
939 vm_exit_controls_clearbit(vmx, exit);
942 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
946 for (i = 0; i < m->nr; ++i) {
947 if (m->val[i].index == msr)
953 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
956 struct msr_autoload *m = &vmx->msr_autoload;
960 if (cpu_has_load_ia32_efer()) {
961 clear_atomic_switch_msr_special(vmx,
962 VM_ENTRY_LOAD_IA32_EFER,
963 VM_EXIT_LOAD_IA32_EFER);
967 case MSR_CORE_PERF_GLOBAL_CTRL:
968 if (cpu_has_load_perf_global_ctrl()) {
969 clear_atomic_switch_msr_special(vmx,
970 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
971 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
976 i = vmx_find_loadstore_msr_slot(&m->guest, msr);
980 m->guest.val[i] = m->guest.val[m->guest.nr];
981 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
984 i = vmx_find_loadstore_msr_slot(&m->host, msr);
989 m->host.val[i] = m->host.val[m->host.nr];
990 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
993 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
994 unsigned long entry, unsigned long exit,
995 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
996 u64 guest_val, u64 host_val)
998 vmcs_write64(guest_val_vmcs, guest_val);
999 if (host_val_vmcs != HOST_IA32_EFER)
1000 vmcs_write64(host_val_vmcs, host_val);
1001 vm_entry_controls_setbit(vmx, entry);
1002 vm_exit_controls_setbit(vmx, exit);
1005 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1006 u64 guest_val, u64 host_val, bool entry_only)
1009 struct msr_autoload *m = &vmx->msr_autoload;
1013 if (cpu_has_load_ia32_efer()) {
1014 add_atomic_switch_msr_special(vmx,
1015 VM_ENTRY_LOAD_IA32_EFER,
1016 VM_EXIT_LOAD_IA32_EFER,
1019 guest_val, host_val);
1023 case MSR_CORE_PERF_GLOBAL_CTRL:
1024 if (cpu_has_load_perf_global_ctrl()) {
1025 add_atomic_switch_msr_special(vmx,
1026 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1027 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1028 GUEST_IA32_PERF_GLOBAL_CTRL,
1029 HOST_IA32_PERF_GLOBAL_CTRL,
1030 guest_val, host_val);
1034 case MSR_IA32_PEBS_ENABLE:
1035 /* PEBS needs a quiescent period after being disabled (to write
1036 * a record). Disabling PEBS through VMX MSR swapping doesn't
1037 * provide that period, so a CPU could write host's record into
1040 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1043 i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1045 j = vmx_find_loadstore_msr_slot(&m->host, msr);
1047 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
1048 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
1049 printk_once(KERN_WARNING "Not enough msr switch entries. "
1050 "Can't add msr %x\n", msr);
1055 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1057 m->guest.val[i].index = msr;
1058 m->guest.val[i].value = guest_val;
1065 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1067 m->host.val[j].index = msr;
1068 m->host.val[j].value = host_val;
1071 static bool update_transition_efer(struct vcpu_vmx *vmx)
1073 u64 guest_efer = vmx->vcpu.arch.efer;
1074 u64 ignore_bits = 0;
1077 /* Shadow paging assumes NX to be available. */
1079 guest_efer |= EFER_NX;
1082 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1084 ignore_bits |= EFER_SCE;
1085 #ifdef CONFIG_X86_64
1086 ignore_bits |= EFER_LMA | EFER_LME;
1087 /* SCE is meaningful only in long mode on Intel */
1088 if (guest_efer & EFER_LMA)
1089 ignore_bits &= ~(u64)EFER_SCE;
1093 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1094 * On CPUs that support "load IA32_EFER", always switch EFER
1095 * atomically, since it's faster than switching it manually.
1097 if (cpu_has_load_ia32_efer() ||
1098 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1099 if (!(guest_efer & EFER_LMA))
1100 guest_efer &= ~EFER_LME;
1101 if (guest_efer != host_efer)
1102 add_atomic_switch_msr(vmx, MSR_EFER,
1103 guest_efer, host_efer, false);
1105 clear_atomic_switch_msr(vmx, MSR_EFER);
1109 i = kvm_find_user_return_msr(MSR_EFER);
1113 clear_atomic_switch_msr(vmx, MSR_EFER);
1115 guest_efer &= ~ignore_bits;
1116 guest_efer |= host_efer & ignore_bits;
1118 vmx->guest_uret_msrs[i].data = guest_efer;
1119 vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1124 #ifdef CONFIG_X86_32
1126 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1127 * VMCS rather than the segment table. KVM uses this helper to figure
1128 * out the current bases to poke them into the VMCS before entry.
1130 static unsigned long segment_base(u16 selector)
1132 struct desc_struct *table;
1135 if (!(selector & ~SEGMENT_RPL_MASK))
1138 table = get_current_gdt_ro();
1140 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1141 u16 ldt_selector = kvm_read_ldt();
1143 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1146 table = (struct desc_struct *)segment_base(ldt_selector);
1148 v = get_desc_base(&table[selector >> 3]);
1153 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1155 return vmx_pt_mode_is_host_guest() &&
1156 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1159 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1161 /* The base must be 128-byte aligned and a legal physical address. */
1162 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1165 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1169 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1170 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1171 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1172 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1173 for (i = 0; i < addr_range; i++) {
1174 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1175 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1179 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1183 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1184 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1185 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1186 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1187 for (i = 0; i < addr_range; i++) {
1188 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1189 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1193 static void pt_guest_enter(struct vcpu_vmx *vmx)
1195 if (vmx_pt_mode_is_system())
1199 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1200 * Save host state before VM entry.
1202 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1203 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1204 wrmsrl(MSR_IA32_RTIT_CTL, 0);
1205 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1206 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1210 static void pt_guest_exit(struct vcpu_vmx *vmx)
1212 if (vmx_pt_mode_is_system())
1215 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1216 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1217 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1221 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1222 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary.
1224 if (vmx->pt_desc.host.ctl)
1225 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1228 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1229 unsigned long fs_base, unsigned long gs_base)
1231 if (unlikely(fs_sel != host->fs_sel)) {
1233 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1235 vmcs_write16(HOST_FS_SELECTOR, 0);
1236 host->fs_sel = fs_sel;
1238 if (unlikely(gs_sel != host->gs_sel)) {
1240 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1242 vmcs_write16(HOST_GS_SELECTOR, 0);
1243 host->gs_sel = gs_sel;
1245 if (unlikely(fs_base != host->fs_base)) {
1246 vmcs_writel(HOST_FS_BASE, fs_base);
1247 host->fs_base = fs_base;
1249 if (unlikely(gs_base != host->gs_base)) {
1250 vmcs_writel(HOST_GS_BASE, gs_base);
1251 host->gs_base = gs_base;
1255 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1257 struct vcpu_vmx *vmx = to_vmx(vcpu);
1258 struct vmcs_host_state *host_state;
1259 #ifdef CONFIG_X86_64
1260 int cpu = raw_smp_processor_id();
1262 unsigned long fs_base, gs_base;
1266 vmx->req_immediate_exit = false;
1269 * Note that guest MSRs to be saved/restored can also be changed
1270 * when guest state is loaded. This happens when guest transitions
1271 * to/from long-mode by setting MSR_EFER.LMA.
1273 if (!vmx->guest_uret_msrs_loaded) {
1274 vmx->guest_uret_msrs_loaded = true;
1275 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1276 if (!vmx->guest_uret_msrs[i].load_into_hardware)
1279 kvm_set_user_return_msr(i,
1280 vmx->guest_uret_msrs[i].data,
1281 vmx->guest_uret_msrs[i].mask);
1285 if (vmx->nested.need_vmcs12_to_shadow_sync)
1286 nested_sync_vmcs12_to_shadow(vcpu);
1288 if (vmx->guest_state_loaded)
1291 host_state = &vmx->loaded_vmcs->host_state;
1294 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1295 * allow segment selectors with cpl > 0 or ti == 1.
1297 host_state->ldt_sel = kvm_read_ldt();
1299 #ifdef CONFIG_X86_64
1300 savesegment(ds, host_state->ds_sel);
1301 savesegment(es, host_state->es_sel);
1303 gs_base = cpu_kernelmode_gs_base(cpu);
1304 if (likely(is_64bit_mm(current->mm))) {
1305 current_save_fsgs();
1306 fs_sel = current->thread.fsindex;
1307 gs_sel = current->thread.gsindex;
1308 fs_base = current->thread.fsbase;
1309 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1311 savesegment(fs, fs_sel);
1312 savesegment(gs, gs_sel);
1313 fs_base = read_msr(MSR_FS_BASE);
1314 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1317 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1319 savesegment(fs, fs_sel);
1320 savesegment(gs, gs_sel);
1321 fs_base = segment_base(fs_sel);
1322 gs_base = segment_base(gs_sel);
1325 vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1326 vmx->guest_state_loaded = true;
1329 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1331 struct vmcs_host_state *host_state;
1333 if (!vmx->guest_state_loaded)
1336 host_state = &vmx->loaded_vmcs->host_state;
1338 ++vmx->vcpu.stat.host_state_reload;
1340 #ifdef CONFIG_X86_64
1341 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1343 if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1344 kvm_load_ldt(host_state->ldt_sel);
1345 #ifdef CONFIG_X86_64
1346 load_gs_index(host_state->gs_sel);
1348 loadsegment(gs, host_state->gs_sel);
1351 if (host_state->fs_sel & 7)
1352 loadsegment(fs, host_state->fs_sel);
1353 #ifdef CONFIG_X86_64
1354 if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1355 loadsegment(ds, host_state->ds_sel);
1356 loadsegment(es, host_state->es_sel);
1359 invalidate_tss_limit();
1360 #ifdef CONFIG_X86_64
1361 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1363 load_fixmap_gdt(raw_smp_processor_id());
1364 vmx->guest_state_loaded = false;
1365 vmx->guest_uret_msrs_loaded = false;
1368 #ifdef CONFIG_X86_64
1369 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1372 if (vmx->guest_state_loaded)
1373 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1375 return vmx->msr_guest_kernel_gs_base;
1378 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1381 if (vmx->guest_state_loaded)
1382 wrmsrl(MSR_KERNEL_GS_BASE, data);
1384 vmx->msr_guest_kernel_gs_base = data;
1388 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1389 struct loaded_vmcs *buddy)
1391 struct vcpu_vmx *vmx = to_vmx(vcpu);
1392 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1395 if (!already_loaded) {
1396 loaded_vmcs_clear(vmx->loaded_vmcs);
1397 local_irq_disable();
1400 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1401 * this cpu's percpu list, otherwise it may not yet be deleted
1402 * from its previous cpu's percpu list. Pairs with the
1403 * smb_wmb() in __loaded_vmcs_clear().
1407 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1408 &per_cpu(loaded_vmcss_on_cpu, cpu));
1412 prev = per_cpu(current_vmcs, cpu);
1413 if (prev != vmx->loaded_vmcs->vmcs) {
1414 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1415 vmcs_load(vmx->loaded_vmcs->vmcs);
1418 * No indirect branch prediction barrier needed when switching
1419 * the active VMCS within a vCPU, unless IBRS is advertised to
1420 * the vCPU. To minimize the number of IBPBs executed, KVM
1421 * performs IBPB on nested VM-Exit (a single nested transition
1422 * may switch the active VMCS multiple times).
1424 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1425 indirect_branch_prediction_barrier();
1428 if (!already_loaded) {
1429 void *gdt = get_current_gdt_ro();
1432 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1433 * TLB entries from its previous association with the vCPU.
1435 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1438 * Linux uses per-cpu TSS and GDT, so set these when switching
1439 * processors. See 22.2.4.
1441 vmcs_writel(HOST_TR_BASE,
1442 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1443 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
1445 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1447 vmcs_writel(HOST_IA32_SYSENTER_ESP,
1448 (unsigned long)(cpu_entry_stack(cpu) + 1));
1451 vmx->loaded_vmcs->cpu = cpu;
1456 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1457 * vcpu mutex is already taken.
1459 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1461 struct vcpu_vmx *vmx = to_vmx(vcpu);
1463 vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1465 vmx_vcpu_pi_load(vcpu, cpu);
1467 vmx->host_debugctlmsr = get_debugctlmsr();
1470 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1472 vmx_vcpu_pi_put(vcpu);
1474 vmx_prepare_switch_to_host(to_vmx(vcpu));
1477 bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1479 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1482 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1484 struct vcpu_vmx *vmx = to_vmx(vcpu);
1485 unsigned long rflags, save_rflags;
1487 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1488 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1489 rflags = vmcs_readl(GUEST_RFLAGS);
1490 if (vmx->rmode.vm86_active) {
1491 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1492 save_rflags = vmx->rmode.save_rflags;
1493 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1495 vmx->rflags = rflags;
1500 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1502 struct vcpu_vmx *vmx = to_vmx(vcpu);
1503 unsigned long old_rflags;
1505 if (is_unrestricted_guest(vcpu)) {
1506 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1507 vmx->rflags = rflags;
1508 vmcs_writel(GUEST_RFLAGS, rflags);
1512 old_rflags = vmx_get_rflags(vcpu);
1513 vmx->rflags = rflags;
1514 if (vmx->rmode.vm86_active) {
1515 vmx->rmode.save_rflags = rflags;
1516 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1518 vmcs_writel(GUEST_RFLAGS, rflags);
1520 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1521 vmx->emulation_required = vmx_emulation_required(vcpu);
1524 static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1526 return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1529 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1531 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1534 if (interruptibility & GUEST_INTR_STATE_STI)
1535 ret |= KVM_X86_SHADOW_INT_STI;
1536 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1537 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1542 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1544 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1545 u32 interruptibility = interruptibility_old;
1547 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1549 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1550 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1551 else if (mask & KVM_X86_SHADOW_INT_STI)
1552 interruptibility |= GUEST_INTR_STATE_STI;
1554 if ((interruptibility != interruptibility_old))
1555 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1558 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1560 struct vcpu_vmx *vmx = to_vmx(vcpu);
1561 unsigned long value;
1564 * Any MSR write that attempts to change bits marked reserved will
1567 if (data & vmx->pt_desc.ctl_bitmask)
1571 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1572 * result in a #GP unless the same write also clears TraceEn.
1574 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1575 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1579 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1580 * and FabricEn would cause #GP, if
1581 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1583 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1584 !(data & RTIT_CTL_FABRIC_EN) &&
1585 !intel_pt_validate_cap(vmx->pt_desc.caps,
1586 PT_CAP_single_range_output))
1590 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1591 * utilize encodings marked reserved will cause a #GP fault.
1593 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1594 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1595 !test_bit((data & RTIT_CTL_MTC_RANGE) >>
1596 RTIT_CTL_MTC_RANGE_OFFSET, &value))
1598 value = intel_pt_validate_cap(vmx->pt_desc.caps,
1599 PT_CAP_cycle_thresholds);
1600 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1601 !test_bit((data & RTIT_CTL_CYC_THRESH) >>
1602 RTIT_CTL_CYC_THRESH_OFFSET, &value))
1604 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1605 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1606 !test_bit((data & RTIT_CTL_PSB_FREQ) >>
1607 RTIT_CTL_PSB_FREQ_OFFSET, &value))
1611 * If ADDRx_CFG is reserved or the encodings is >2 will
1612 * cause a #GP fault.
1614 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1615 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1617 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1618 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1620 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1621 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1623 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1624 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1630 static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1631 void *insn, int insn_len)
1634 * Emulation of instructions in SGX enclaves is impossible as RIP does
1635 * not point at the failing instruction, and even if it did, the code
1636 * stream is inaccessible. Inject #UD instead of exiting to userspace
1637 * so that guest userspace can't DoS the guest simply by triggering
1638 * emulation (enclaves are CPL3 only).
1640 if (to_vmx(vcpu)->exit_reason.enclave_mode) {
1641 kvm_queue_exception(vcpu, UD_VECTOR);
1647 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1649 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
1650 unsigned long rip, orig_rip;
1654 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1655 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1656 * set when EPT misconfig occurs. In practice, real hardware updates
1657 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1658 * (namely Hyper-V) don't set it due to it being undefined behavior,
1659 * i.e. we end up advancing IP with some random value.
1661 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1662 exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1663 instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1666 * Emulating an enclave's instructions isn't supported as KVM
1667 * cannot access the enclave's memory or its true RIP, e.g. the
1668 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1669 * the RIP that actually triggered the VM-Exit. But, because
1670 * most instructions that cause VM-Exit will #UD in an enclave,
1671 * most instruction-based VM-Exits simply do not occur.
1673 * There are a few exceptions, notably the debug instructions
1674 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1675 * and generate #DB/#BP as expected, which KVM might intercept.
1676 * But again, the CPU does the dirty work and saves an instr
1677 * length of zero so VMMs don't shoot themselves in the foot.
1678 * WARN if KVM tries to skip a non-zero length instruction on
1679 * a VM-Exit from an enclave.
1684 WARN_ONCE(exit_reason.enclave_mode,
1685 "skipping instruction after SGX enclave VM-Exit");
1687 orig_rip = kvm_rip_read(vcpu);
1688 rip = orig_rip + instr_len;
1689 #ifdef CONFIG_X86_64
1691 * We need to mask out the high 32 bits of RIP if not in 64-bit
1692 * mode, but just finding out that we are in 64-bit mode is
1693 * quite expensive. Only do it if there was a carry.
1695 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1698 kvm_rip_write(vcpu, rip);
1700 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1705 /* skipping an emulated instruction also counts */
1706 vmx_set_interrupt_shadow(vcpu, 0);
1712 * Recognizes a pending MTF VM-exit and records the nested state for later
1715 static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1717 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1718 struct vcpu_vmx *vmx = to_vmx(vcpu);
1720 if (!is_guest_mode(vcpu))
1724 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1725 * TSS T-bit traps and ICEBP (INT1). KVM doesn't emulate T-bit traps
1726 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1727 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1728 * as ICEBP is higher priority than both. As instruction emulation is
1729 * completed at this point (i.e. KVM is at the instruction boundary),
1730 * any #DB exception pending delivery must be a debug-trap of lower
1731 * priority than MTF. Record the pending MTF state to be delivered in
1732 * vmx_check_nested_events().
1734 if (nested_cpu_has_mtf(vmcs12) &&
1735 (!vcpu->arch.exception.pending ||
1736 vcpu->arch.exception.vector == DB_VECTOR) &&
1737 (!vcpu->arch.exception_vmexit.pending ||
1738 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1739 vmx->nested.mtf_pending = true;
1740 kvm_make_request(KVM_REQ_EVENT, vcpu);
1742 vmx->nested.mtf_pending = false;
1746 static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1748 vmx_update_emulated_instruction(vcpu);
1749 return skip_emulated_instruction(vcpu);
1752 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1755 * Ensure that we clear the HLT state in the VMCS. We don't need to
1756 * explicitly skip the instruction because if the HLT state is set,
1757 * then the instruction is already executing and RIP has already been
1760 if (kvm_hlt_in_guest(vcpu->kvm) &&
1761 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1762 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1765 static void vmx_inject_exception(struct kvm_vcpu *vcpu)
1767 struct kvm_queued_exception *ex = &vcpu->arch.exception;
1768 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1769 struct vcpu_vmx *vmx = to_vmx(vcpu);
1771 kvm_deliver_exception_payload(vcpu, ex);
1773 if (ex->has_error_code) {
1775 * Despite the error code being architecturally defined as 32
1776 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1777 * VMX don't actually supporting setting bits 31:16. Hardware
1778 * will (should) never provide a bogus error code, but AMD CPUs
1779 * do generate error codes with bits 31:16 set, and so KVM's
1780 * ABI lets userspace shove in arbitrary 32-bit values. Drop
1781 * the upper bits to avoid VM-Fail, losing information that
1782 * does't really exist is preferable to killing the VM.
1784 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1785 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1788 if (vmx->rmode.vm86_active) {
1790 if (kvm_exception_is_soft(ex->vector))
1791 inc_eip = vcpu->arch.event_exit_inst_len;
1792 kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1796 WARN_ON_ONCE(vmx->emulation_required);
1798 if (kvm_exception_is_soft(ex->vector)) {
1799 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1800 vmx->vcpu.arch.event_exit_inst_len);
1801 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1803 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1805 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1807 vmx_clear_hlt(vcpu);
1810 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1811 bool load_into_hardware)
1813 struct vmx_uret_msr *uret_msr;
1815 uret_msr = vmx_find_uret_msr(vmx, msr);
1819 uret_msr->load_into_hardware = load_into_hardware;
1823 * Configuring user return MSRs to automatically save, load, and restore MSRs
1824 * that need to be shoved into hardware when running the guest. Note, omitting
1825 * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1826 * loaded into hardware when running the guest.
1828 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1830 #ifdef CONFIG_X86_64
1831 bool load_syscall_msrs;
1834 * The SYSCALL MSRs are only needed on long mode guests, and only
1835 * when EFER.SCE is set.
1837 load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1838 (vmx->vcpu.arch.efer & EFER_SCE);
1840 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1841 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1842 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1844 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1846 vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1847 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1848 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
1851 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1852 * kernel and old userspace. If those guests run on a tsx=off host, do
1853 * allow guests to use TSX_CTRL, but don't change the value in hardware
1854 * so that TSX remains always disabled.
1856 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1859 * The set of MSRs to load may have changed, reload MSRs before the
1862 vmx->guest_uret_msrs_loaded = false;
1865 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1867 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1869 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
1870 return vmcs12->tsc_offset;
1875 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1877 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1879 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
1880 nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
1881 return vmcs12->tsc_multiplier;
1883 return kvm_caps.default_tsc_scaling_ratio;
1886 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1888 vmcs_write64(TSC_OFFSET, offset);
1891 static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1893 vmcs_write64(TSC_MULTIPLIER, multiplier);
1897 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1898 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1899 * all guests if the "nested" module option is off, and can also be disabled
1900 * for a single guest by disabling its VMX cpuid bit.
1902 bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
1904 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
1908 * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
1909 * guest CPUID. Note, KVM allows userspace to set "VMX in SMX" to maintain
1910 * backwards compatibility even though KVM doesn't support emulating SMX. And
1911 * because userspace set "VMX in SMX", the guest must also be allowed to set it,
1912 * e.g. if the MSR is left unlocked and the guest does a RMW operation.
1914 #define KVM_SUPPORTED_FEATURE_CONTROL (FEAT_CTL_LOCKED | \
1915 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | \
1916 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1917 FEAT_CTL_SGX_LC_ENABLED | \
1918 FEAT_CTL_SGX_ENABLED | \
1919 FEAT_CTL_LMCE_ENABLED)
1921 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1922 struct msr_data *msr)
1924 uint64_t valid_bits;
1927 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
1928 * exposed to the guest.
1930 WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1931 ~KVM_SUPPORTED_FEATURE_CONTROL);
1933 if (!msr->host_initiated &&
1934 (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1937 if (msr->host_initiated)
1938 valid_bits = KVM_SUPPORTED_FEATURE_CONTROL;
1940 valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1942 return !(msr->data & ~valid_bits);
1945 static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
1947 switch (msr->index) {
1948 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
1951 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
1953 return KVM_MSR_RET_INVALID;
1958 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
1959 * Returns 0 on success, non-0 otherwise.
1960 * Assumes vcpu_load() was already called.
1962 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1964 struct vcpu_vmx *vmx = to_vmx(vcpu);
1965 struct vmx_uret_msr *msr;
1968 switch (msr_info->index) {
1969 #ifdef CONFIG_X86_64
1971 msr_info->data = vmcs_readl(GUEST_FS_BASE);
1974 msr_info->data = vmcs_readl(GUEST_GS_BASE);
1976 case MSR_KERNEL_GS_BASE:
1977 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
1981 return kvm_get_msr_common(vcpu, msr_info);
1982 case MSR_IA32_TSX_CTRL:
1983 if (!msr_info->host_initiated &&
1984 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
1987 case MSR_IA32_UMWAIT_CONTROL:
1988 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
1991 msr_info->data = vmx->msr_ia32_umwait_control;
1993 case MSR_IA32_SPEC_CTRL:
1994 if (!msr_info->host_initiated &&
1995 !guest_has_spec_ctrl_msr(vcpu))
1998 msr_info->data = to_vmx(vcpu)->spec_ctrl;
2000 case MSR_IA32_SYSENTER_CS:
2001 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2003 case MSR_IA32_SYSENTER_EIP:
2004 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2006 case MSR_IA32_SYSENTER_ESP:
2007 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2009 case MSR_IA32_BNDCFGS:
2010 if (!kvm_mpx_supported() ||
2011 (!msr_info->host_initiated &&
2012 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2014 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2016 case MSR_IA32_MCG_EXT_CTL:
2017 if (!msr_info->host_initiated &&
2018 !(vmx->msr_ia32_feature_control &
2019 FEAT_CTL_LMCE_ENABLED))
2021 msr_info->data = vcpu->arch.mcg_ext_ctl;
2023 case MSR_IA32_FEAT_CTL:
2024 msr_info->data = vmx->msr_ia32_feature_control;
2026 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2027 if (!msr_info->host_initiated &&
2028 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
2030 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2031 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2033 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2034 if (!nested_vmx_allowed(vcpu))
2036 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2040 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2041 * instead of just ignoring the features, different Hyper-V
2042 * versions are either trying to use them and fail or do some
2043 * sanity checking and refuse to boot. Filter all unsupported
2046 if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
2047 nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2050 case MSR_IA32_RTIT_CTL:
2051 if (!vmx_pt_mode_is_host_guest())
2053 msr_info->data = vmx->pt_desc.guest.ctl;
2055 case MSR_IA32_RTIT_STATUS:
2056 if (!vmx_pt_mode_is_host_guest())
2058 msr_info->data = vmx->pt_desc.guest.status;
2060 case MSR_IA32_RTIT_CR3_MATCH:
2061 if (!vmx_pt_mode_is_host_guest() ||
2062 !intel_pt_validate_cap(vmx->pt_desc.caps,
2063 PT_CAP_cr3_filtering))
2065 msr_info->data = vmx->pt_desc.guest.cr3_match;
2067 case MSR_IA32_RTIT_OUTPUT_BASE:
2068 if (!vmx_pt_mode_is_host_guest() ||
2069 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2070 PT_CAP_topa_output) &&
2071 !intel_pt_validate_cap(vmx->pt_desc.caps,
2072 PT_CAP_single_range_output)))
2074 msr_info->data = vmx->pt_desc.guest.output_base;
2076 case MSR_IA32_RTIT_OUTPUT_MASK:
2077 if (!vmx_pt_mode_is_host_guest() ||
2078 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2079 PT_CAP_topa_output) &&
2080 !intel_pt_validate_cap(vmx->pt_desc.caps,
2081 PT_CAP_single_range_output)))
2083 msr_info->data = vmx->pt_desc.guest.output_mask;
2085 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2086 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2087 if (!vmx_pt_mode_is_host_guest() ||
2088 (index >= 2 * vmx->pt_desc.num_address_ranges))
2091 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2093 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2095 case MSR_IA32_DEBUGCTLMSR:
2096 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
2100 msr = vmx_find_uret_msr(vmx, msr_info->index);
2102 msr_info->data = msr->data;
2105 return kvm_get_msr_common(vcpu, msr_info);
2111 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2114 #ifdef CONFIG_X86_64
2115 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
2118 return (unsigned long)data;
2121 static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2125 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2126 (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2127 debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2129 if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
2130 (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2131 debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2136 static int vmx_set_msr_ia32_cmd(struct kvm_vcpu *vcpu,
2137 struct msr_data *msr_info,
2138 bool guest_has_feat, u64 cmd,
2139 int x86_feature_bit)
2141 if (!msr_info->host_initiated && !guest_has_feat)
2144 if (!(msr_info->data & ~cmd))
2146 if (!boot_cpu_has(x86_feature_bit))
2148 if (!msr_info->data)
2151 wrmsrl(msr_info->index, cmd);
2155 * When it's written (to non-zero) for the first time, pass
2159 * The handling of the MSR bitmap for L2 guests is done in
2160 * nested_vmx_prepare_msr_bitmap. We should not touch the
2161 * vmcs02.msr_bitmap here since it gets completely overwritten
2164 vmx_disable_intercept_for_msr(vcpu, msr_info->index, MSR_TYPE_W);
2170 * Writes msr value into the appropriate "register".
2171 * Returns 0 on success, non-0 otherwise.
2172 * Assumes vcpu_load() was already called.
2174 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2176 struct vcpu_vmx *vmx = to_vmx(vcpu);
2177 struct vmx_uret_msr *msr;
2179 u32 msr_index = msr_info->index;
2180 u64 data = msr_info->data;
2183 switch (msr_index) {
2185 ret = kvm_set_msr_common(vcpu, msr_info);
2187 #ifdef CONFIG_X86_64
2189 vmx_segment_cache_clear(vmx);
2190 vmcs_writel(GUEST_FS_BASE, data);
2193 vmx_segment_cache_clear(vmx);
2194 vmcs_writel(GUEST_GS_BASE, data);
2196 case MSR_KERNEL_GS_BASE:
2197 vmx_write_guest_kernel_gs_base(vmx, data);
2200 ret = kvm_set_msr_common(vcpu, msr_info);
2202 * Always intercepting WRMSR could incur non-negligible
2203 * overhead given xfd might be changed frequently in
2204 * guest context switch. Disable write interception
2205 * upon the first write with a non-zero value (indicating
2206 * potential usage on dynamic xfeatures). Also update
2207 * exception bitmap to trap #NM for proper virtualization
2211 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2213 vcpu->arch.xfd_no_write_intercept = true;
2214 vmx_update_exception_bitmap(vcpu);
2218 case MSR_IA32_SYSENTER_CS:
2219 if (is_guest_mode(vcpu))
2220 get_vmcs12(vcpu)->guest_sysenter_cs = data;
2221 vmcs_write32(GUEST_SYSENTER_CS, data);
2223 case MSR_IA32_SYSENTER_EIP:
2224 if (is_guest_mode(vcpu)) {
2225 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2226 get_vmcs12(vcpu)->guest_sysenter_eip = data;
2228 vmcs_writel(GUEST_SYSENTER_EIP, data);
2230 case MSR_IA32_SYSENTER_ESP:
2231 if (is_guest_mode(vcpu)) {
2232 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2233 get_vmcs12(vcpu)->guest_sysenter_esp = data;
2235 vmcs_writel(GUEST_SYSENTER_ESP, data);
2237 case MSR_IA32_DEBUGCTLMSR: {
2240 invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2241 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
2242 kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
2243 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2244 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2250 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2251 VM_EXIT_SAVE_DEBUG_CONTROLS)
2252 get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2254 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
2255 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2256 (data & DEBUGCTLMSR_LBR))
2257 intel_pmu_create_guest_lbr_event(vcpu);
2260 case MSR_IA32_BNDCFGS:
2261 if (!kvm_mpx_supported() ||
2262 (!msr_info->host_initiated &&
2263 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2265 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2266 (data & MSR_IA32_BNDCFGS_RSVD))
2269 if (is_guest_mode(vcpu) &&
2270 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2271 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2272 get_vmcs12(vcpu)->guest_bndcfgs = data;
2274 vmcs_write64(GUEST_BNDCFGS, data);
2276 case MSR_IA32_UMWAIT_CONTROL:
2277 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2280 /* The reserved bit 1 and non-32 bit [63:32] should be zero */
2281 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2284 vmx->msr_ia32_umwait_control = data;
2286 case MSR_IA32_SPEC_CTRL:
2287 if (!msr_info->host_initiated &&
2288 !guest_has_spec_ctrl_msr(vcpu))
2291 if (kvm_spec_ctrl_test_value(data))
2294 vmx->spec_ctrl = data;
2300 * When it's written (to non-zero) for the first time, pass
2304 * The handling of the MSR bitmap for L2 guests is done in
2305 * nested_vmx_prepare_msr_bitmap. We should not touch the
2306 * vmcs02.msr_bitmap here since it gets completely overwritten
2307 * in the merging. We update the vmcs01 here for L1 as well
2308 * since it will end up touching the MSR anyway now.
2310 vmx_disable_intercept_for_msr(vcpu,
2314 case MSR_IA32_TSX_CTRL:
2315 if (!msr_info->host_initiated &&
2316 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2318 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2321 case MSR_IA32_PRED_CMD:
2322 ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
2323 guest_has_pred_cmd_msr(vcpu),
2327 case MSR_IA32_FLUSH_CMD:
2328 ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
2329 guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D),
2331 X86_FEATURE_FLUSH_L1D);
2333 case MSR_IA32_CR_PAT:
2334 if (!kvm_pat_valid(data))
2337 if (is_guest_mode(vcpu) &&
2338 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2339 get_vmcs12(vcpu)->guest_ia32_pat = data;
2341 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2342 vmcs_write64(GUEST_IA32_PAT, data);
2343 vcpu->arch.pat = data;
2346 ret = kvm_set_msr_common(vcpu, msr_info);
2348 case MSR_IA32_MCG_EXT_CTL:
2349 if ((!msr_info->host_initiated &&
2350 !(to_vmx(vcpu)->msr_ia32_feature_control &
2351 FEAT_CTL_LMCE_ENABLED)) ||
2352 (data & ~MCG_EXT_CTL_LMCE_EN))
2354 vcpu->arch.mcg_ext_ctl = data;
2356 case MSR_IA32_FEAT_CTL:
2357 if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2360 vmx->msr_ia32_feature_control = data;
2361 if (msr_info->host_initiated && data == 0)
2362 vmx_leave_nested(vcpu);
2364 /* SGX may be enabled/disabled by guest's firmware */
2365 vmx_write_encls_bitmap(vcpu, NULL);
2367 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2369 * On real hardware, the LE hash MSRs are writable before
2370 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2371 * at which point SGX related bits in IA32_FEATURE_CONTROL
2374 * KVM does not emulate SGX activation for simplicity, so
2375 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2376 * is unlocked. This is technically not architectural
2377 * behavior, but it's close enough.
2379 if (!msr_info->host_initiated &&
2380 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
2381 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2382 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2384 vmx->msr_ia32_sgxlepubkeyhash
2385 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
2387 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2388 if (!msr_info->host_initiated)
2389 return 1; /* they are read-only */
2390 if (!nested_vmx_allowed(vcpu))
2392 return vmx_set_vmx_msr(vcpu, msr_index, data);
2393 case MSR_IA32_RTIT_CTL:
2394 if (!vmx_pt_mode_is_host_guest() ||
2395 vmx_rtit_ctl_check(vcpu, data) ||
2398 vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2399 vmx->pt_desc.guest.ctl = data;
2400 pt_update_intercept_for_msr(vcpu);
2402 case MSR_IA32_RTIT_STATUS:
2403 if (!pt_can_write_msr(vmx))
2405 if (data & MSR_IA32_RTIT_STATUS_MASK)
2407 vmx->pt_desc.guest.status = data;
2409 case MSR_IA32_RTIT_CR3_MATCH:
2410 if (!pt_can_write_msr(vmx))
2412 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2413 PT_CAP_cr3_filtering))
2415 vmx->pt_desc.guest.cr3_match = data;
2417 case MSR_IA32_RTIT_OUTPUT_BASE:
2418 if (!pt_can_write_msr(vmx))
2420 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2421 PT_CAP_topa_output) &&
2422 !intel_pt_validate_cap(vmx->pt_desc.caps,
2423 PT_CAP_single_range_output))
2425 if (!pt_output_base_valid(vcpu, data))
2427 vmx->pt_desc.guest.output_base = data;
2429 case MSR_IA32_RTIT_OUTPUT_MASK:
2430 if (!pt_can_write_msr(vmx))
2432 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2433 PT_CAP_topa_output) &&
2434 !intel_pt_validate_cap(vmx->pt_desc.caps,
2435 PT_CAP_single_range_output))
2437 vmx->pt_desc.guest.output_mask = data;
2439 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2440 if (!pt_can_write_msr(vmx))
2442 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2443 if (index >= 2 * vmx->pt_desc.num_address_ranges)
2445 if (is_noncanonical_address(data, vcpu))
2448 vmx->pt_desc.guest.addr_b[index / 2] = data;
2450 vmx->pt_desc.guest.addr_a[index / 2] = data;
2452 case MSR_IA32_PERF_CAPABILITIES:
2453 if (data && !vcpu_to_pmu(vcpu)->version)
2455 if (data & PMU_CAP_LBR_FMT) {
2456 if ((data & PMU_CAP_LBR_FMT) !=
2457 (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
2459 if (!cpuid_model_is_consistent(vcpu))
2462 if (data & PERF_CAP_PEBS_FORMAT) {
2463 if ((data & PERF_CAP_PEBS_MASK) !=
2464 (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
2466 if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
2468 if (!guest_cpuid_has(vcpu, X86_FEATURE_DTES64))
2470 if (!cpuid_model_is_consistent(vcpu))
2473 ret = kvm_set_msr_common(vcpu, msr_info);
2478 msr = vmx_find_uret_msr(vmx, msr_index);
2480 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2482 ret = kvm_set_msr_common(vcpu, msr_info);
2485 /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2486 if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2487 vmx_update_fb_clear_dis(vcpu, vmx);
2492 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2494 unsigned long guest_owned_bits;
2496 kvm_register_mark_available(vcpu, reg);
2500 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2503 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2505 case VCPU_EXREG_PDPTR:
2507 ept_save_pdptrs(vcpu);
2509 case VCPU_EXREG_CR0:
2510 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2512 vcpu->arch.cr0 &= ~guest_owned_bits;
2513 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2515 case VCPU_EXREG_CR3:
2517 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2518 * CR3 is loaded into hardware, not the guest's CR3.
2520 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2521 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2523 case VCPU_EXREG_CR4:
2524 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2526 vcpu->arch.cr4 &= ~guest_owned_bits;
2527 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2530 KVM_BUG_ON(1, vcpu->kvm);
2536 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2537 * directly instead of going through cpu_has(), to ensure KVM is trapping
2538 * ENCLS whenever it's supported in hardware. It does not matter whether
2539 * the host OS supports or has enabled SGX.
2541 static bool cpu_has_sgx(void)
2543 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2547 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2548 * can't be used due to errata where VM Exit may incorrectly clear
2549 * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
2550 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2552 static bool cpu_has_perf_global_ctrl_bug(void)
2554 if (boot_cpu_data.x86 == 0x6) {
2555 switch (boot_cpu_data.x86_model) {
2556 case INTEL_FAM6_NEHALEM_EP: /* AAK155 */
2557 case INTEL_FAM6_NEHALEM: /* AAP115 */
2558 case INTEL_FAM6_WESTMERE: /* AAT100 */
2559 case INTEL_FAM6_WESTMERE_EP: /* BC86,AAY89,BD102 */
2560 case INTEL_FAM6_NEHALEM_EX: /* BA97 */
2570 static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
2572 u32 vmx_msr_low, vmx_msr_high;
2573 u32 ctl = ctl_min | ctl_opt;
2575 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2577 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2578 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
2580 /* Ensure minimum (required) set of control bits are supported. */
2588 static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
2592 rdmsrl(msr, allowed);
2594 return ctl_opt & allowed;
2597 static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2598 struct vmx_capability *vmx_cap)
2600 u32 vmx_msr_low, vmx_msr_high;
2601 u32 _pin_based_exec_control = 0;
2602 u32 _cpu_based_exec_control = 0;
2603 u32 _cpu_based_2nd_exec_control = 0;
2604 u64 _cpu_based_3rd_exec_control = 0;
2605 u32 _vmexit_control = 0;
2606 u32 _vmentry_control = 0;
2611 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2612 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2613 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2618 } const vmcs_entry_exit_pairs[] = {
2619 { VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2620 { VM_ENTRY_LOAD_IA32_PAT, VM_EXIT_LOAD_IA32_PAT },
2621 { VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER },
2622 { VM_ENTRY_LOAD_BNDCFGS, VM_EXIT_CLEAR_BNDCFGS },
2623 { VM_ENTRY_LOAD_IA32_RTIT_CTL, VM_EXIT_CLEAR_IA32_RTIT_CTL },
2626 memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2628 if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
2629 KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
2630 MSR_IA32_VMX_PROCBASED_CTLS,
2631 &_cpu_based_exec_control))
2633 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2634 if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
2635 KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
2636 MSR_IA32_VMX_PROCBASED_CTLS2,
2637 &_cpu_based_2nd_exec_control))
2640 #ifndef CONFIG_X86_64
2641 if (!(_cpu_based_2nd_exec_control &
2642 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2643 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2646 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2647 _cpu_based_2nd_exec_control &= ~(
2648 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2649 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2650 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2652 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2653 &vmx_cap->ept, &vmx_cap->vpid);
2655 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2657 pr_warn_once("EPT CAP should not exist if not support "
2658 "1-setting enable EPT VM-execution control\n");
2660 if (error_on_inconsistent_vmcs_config)
2665 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2667 pr_warn_once("VPID CAP should not exist if not support "
2668 "1-setting enable VPID VM-execution control\n");
2670 if (error_on_inconsistent_vmcs_config)
2677 _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2679 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2680 _cpu_based_3rd_exec_control =
2681 adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
2682 MSR_IA32_VMX_PROCBASED_CTLS3);
2684 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
2685 KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
2686 MSR_IA32_VMX_EXIT_CTLS,
2690 if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
2691 KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
2692 MSR_IA32_VMX_PINBASED_CTLS,
2693 &_pin_based_exec_control))
2696 if (cpu_has_broken_vmx_preemption_timer())
2697 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2698 if (!(_cpu_based_2nd_exec_control &
2699 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2700 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2702 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
2703 KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
2704 MSR_IA32_VMX_ENTRY_CTLS,
2708 for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
2709 u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
2710 u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
2712 if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
2715 pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2716 _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
2718 if (error_on_inconsistent_vmcs_config)
2721 _vmentry_control &= ~n_ctrl;
2722 _vmexit_control &= ~x_ctrl;
2725 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2727 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2728 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2731 #ifdef CONFIG_X86_64
2732 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2733 if (vmx_msr_high & (1u<<16))
2737 /* Require Write-Back (WB) memory type for VMCS accesses. */
2738 if (((vmx_msr_high >> 18) & 15) != 6)
2741 rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
2743 vmcs_conf->size = vmx_msr_high & 0x1fff;
2744 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
2746 vmcs_conf->revision_id = vmx_msr_low;
2748 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2749 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2750 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2751 vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
2752 vmcs_conf->vmexit_ctrl = _vmexit_control;
2753 vmcs_conf->vmentry_ctrl = _vmentry_control;
2754 vmcs_conf->misc = misc_msr;
2756 #if IS_ENABLED(CONFIG_HYPERV)
2757 if (enlightened_vmcs)
2758 evmcs_sanitize_exec_ctrls(vmcs_conf);
2764 static bool kvm_is_vmx_supported(void)
2766 int cpu = raw_smp_processor_id();
2768 if (!cpu_has_vmx()) {
2769 pr_err("VMX not supported by CPU %d\n", cpu);
2773 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2774 !this_cpu_has(X86_FEATURE_VMX)) {
2775 pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2782 static int vmx_check_processor_compat(void)
2784 int cpu = raw_smp_processor_id();
2785 struct vmcs_config vmcs_conf;
2786 struct vmx_capability vmx_cap;
2788 if (!kvm_is_vmx_supported())
2791 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
2792 pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
2796 nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
2797 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config))) {
2798 pr_err("Inconsistent VMCS config on CPU %d\n", cpu);
2804 static int kvm_cpu_vmxon(u64 vmxon_pointer)
2808 cr4_set_bits(X86_CR4_VMXE);
2810 asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
2811 _ASM_EXTABLE(1b, %l[fault])
2812 : : [vmxon_pointer] "m"(vmxon_pointer)
2817 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2818 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2819 cr4_clear_bits(X86_CR4_VMXE);
2824 static int vmx_hardware_enable(void)
2826 int cpu = raw_smp_processor_id();
2827 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2830 if (cr4_read_shadow() & X86_CR4_VMXE)
2834 * This can happen if we hot-added a CPU but failed to allocate
2835 * VP assist page for it.
2837 if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
2840 intel_pt_handle_vmx(1);
2842 r = kvm_cpu_vmxon(phys_addr);
2844 intel_pt_handle_vmx(0);
2854 static void vmclear_local_loaded_vmcss(void)
2856 int cpu = raw_smp_processor_id();
2857 struct loaded_vmcs *v, *n;
2859 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2860 loaded_vmcss_on_cpu_link)
2861 __loaded_vmcs_clear(v);
2864 static void vmx_hardware_disable(void)
2866 vmclear_local_loaded_vmcss();
2869 kvm_spurious_fault();
2873 intel_pt_handle_vmx(0);
2876 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2878 int node = cpu_to_node(cpu);
2882 pages = __alloc_pages_node(node, flags, 0);
2885 vmcs = page_address(pages);
2886 memset(vmcs, 0, vmcs_config.size);
2888 /* KVM supports Enlightened VMCS v1 only */
2889 if (kvm_is_using_evmcs())
2890 vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
2892 vmcs->hdr.revision_id = vmcs_config.revision_id;
2895 vmcs->hdr.shadow_vmcs = 1;
2899 void free_vmcs(struct vmcs *vmcs)
2901 free_page((unsigned long)vmcs);
2905 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2907 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2909 if (!loaded_vmcs->vmcs)
2911 loaded_vmcs_clear(loaded_vmcs);
2912 free_vmcs(loaded_vmcs->vmcs);
2913 loaded_vmcs->vmcs = NULL;
2914 if (loaded_vmcs->msr_bitmap)
2915 free_page((unsigned long)loaded_vmcs->msr_bitmap);
2916 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2919 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2921 loaded_vmcs->vmcs = alloc_vmcs(false);
2922 if (!loaded_vmcs->vmcs)
2925 vmcs_clear(loaded_vmcs->vmcs);
2927 loaded_vmcs->shadow_vmcs = NULL;
2928 loaded_vmcs->hv_timer_soft_disabled = false;
2929 loaded_vmcs->cpu = -1;
2930 loaded_vmcs->launched = 0;
2932 if (cpu_has_vmx_msr_bitmap()) {
2933 loaded_vmcs->msr_bitmap = (unsigned long *)
2934 __get_free_page(GFP_KERNEL_ACCOUNT);
2935 if (!loaded_vmcs->msr_bitmap)
2937 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2940 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2941 memset(&loaded_vmcs->controls_shadow, 0,
2942 sizeof(struct vmcs_controls_shadow));
2947 free_loaded_vmcs(loaded_vmcs);
2951 static void free_kvm_area(void)
2955 for_each_possible_cpu(cpu) {
2956 free_vmcs(per_cpu(vmxarea, cpu));
2957 per_cpu(vmxarea, cpu) = NULL;
2961 static __init int alloc_kvm_area(void)
2965 for_each_possible_cpu(cpu) {
2968 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
2975 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2976 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2977 * revision_id reported by MSR_IA32_VMX_BASIC.
2979 * However, even though not explicitly documented by
2980 * TLFS, VMXArea passed as VMXON argument should
2981 * still be marked with revision_id reported by
2984 if (kvm_is_using_evmcs())
2985 vmcs->hdr.revision_id = vmcs_config.revision_id;
2987 per_cpu(vmxarea, cpu) = vmcs;
2992 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2993 struct kvm_segment *save)
2995 if (!emulate_invalid_guest_state) {
2997 * CS and SS RPL should be equal during guest entry according
2998 * to VMX spec, but in reality it is not always so. Since vcpu
2999 * is in the middle of the transition from real mode to
3000 * protected mode it is safe to assume that RPL 0 is a good
3003 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3004 save->selector &= ~SEGMENT_RPL_MASK;
3005 save->dpl = save->selector & SEGMENT_RPL_MASK;
3008 __vmx_set_segment(vcpu, save, seg);
3011 static void enter_pmode(struct kvm_vcpu *vcpu)
3013 unsigned long flags;
3014 struct vcpu_vmx *vmx = to_vmx(vcpu);
3017 * Update real mode segment cache. It may be not up-to-date if segment
3018 * register was written while vcpu was in a guest mode.
3020 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3021 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3022 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3023 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3024 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3025 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3027 vmx->rmode.vm86_active = 0;
3029 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3031 flags = vmcs_readl(GUEST_RFLAGS);
3032 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3033 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3034 vmcs_writel(GUEST_RFLAGS, flags);
3036 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3037 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3039 vmx_update_exception_bitmap(vcpu);
3041 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3042 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3043 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3044 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3045 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3046 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3049 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3051 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3052 struct kvm_segment var = *save;
3055 if (seg == VCPU_SREG_CS)
3058 if (!emulate_invalid_guest_state) {
3059 var.selector = var.base >> 4;
3060 var.base = var.base & 0xffff0;
3070 if (save->base & 0xf)
3071 pr_warn_once("segment base is not paragraph aligned "
3072 "when entering protected mode (seg=%d)", seg);
3075 vmcs_write16(sf->selector, var.selector);
3076 vmcs_writel(sf->base, var.base);
3077 vmcs_write32(sf->limit, var.limit);
3078 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3081 static void enter_rmode(struct kvm_vcpu *vcpu)
3083 unsigned long flags;
3084 struct vcpu_vmx *vmx = to_vmx(vcpu);
3085 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3087 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3088 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3089 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3090 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3091 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3092 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3093 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3095 vmx->rmode.vm86_active = 1;
3098 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
3099 * vcpu. Warn the user that an update is overdue.
3101 if (!kvm_vmx->tss_addr)
3102 pr_warn_once("KVM_SET_TSS_ADDR needs to be called before running vCPU\n");
3104 vmx_segment_cache_clear(vmx);
3106 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
3107 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3108 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3110 flags = vmcs_readl(GUEST_RFLAGS);
3111 vmx->rmode.save_rflags = flags;
3113 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3115 vmcs_writel(GUEST_RFLAGS, flags);
3116 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3117 vmx_update_exception_bitmap(vcpu);
3119 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3120 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3121 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3122 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3123 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3124 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3127 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3129 struct vcpu_vmx *vmx = to_vmx(vcpu);
3131 /* Nothing to do if hardware doesn't support EFER. */
3132 if (!vmx_find_uret_msr(vmx, MSR_EFER))
3135 vcpu->arch.efer = efer;
3136 #ifdef CONFIG_X86_64
3137 if (efer & EFER_LMA)
3138 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3140 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3142 if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3146 vmx_setup_uret_msrs(vmx);
3150 #ifdef CONFIG_X86_64
3152 static void enter_lmode(struct kvm_vcpu *vcpu)
3156 vmx_segment_cache_clear(to_vmx(vcpu));
3158 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3159 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3160 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3162 vmcs_write32(GUEST_TR_AR_BYTES,
3163 (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3164 | VMX_AR_TYPE_BUSY_64_TSS);
3166 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3169 static void exit_lmode(struct kvm_vcpu *vcpu)
3171 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3176 static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3178 struct vcpu_vmx *vmx = to_vmx(vcpu);
3181 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3182 * the CPU is not required to invalidate guest-physical mappings on
3183 * VM-Entry, even if VPID is disabled. Guest-physical mappings are
3184 * associated with the root EPT structure and not any particular VPID
3185 * (INVVPID also isn't required to invalidate guest-physical mappings).
3189 } else if (enable_vpid) {
3190 if (cpu_has_vmx_invvpid_global()) {
3191 vpid_sync_vcpu_global();
3193 vpid_sync_vcpu_single(vmx->vpid);
3194 vpid_sync_vcpu_single(vmx->nested.vpid02);
3199 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3201 if (is_guest_mode(vcpu))
3202 return nested_get_vpid02(vcpu);
3203 return to_vmx(vcpu)->vpid;
3206 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3208 struct kvm_mmu *mmu = vcpu->arch.mmu;
3209 u64 root_hpa = mmu->root.hpa;
3211 /* No flush required if the current context is invalid. */
3212 if (!VALID_PAGE(root_hpa))
3216 ept_sync_context(construct_eptp(vcpu, root_hpa,
3217 mmu->root_role.level));
3219 vpid_sync_context(vmx_get_current_vpid(vcpu));
3222 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3225 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3226 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3228 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3231 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3234 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3235 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
3236 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3237 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3238 * i.e. no explicit INVVPID is necessary.
3240 vpid_sync_context(vmx_get_current_vpid(vcpu));
3243 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3245 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3247 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3250 if (is_pae_paging(vcpu)) {
3251 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3252 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3253 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3254 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3258 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3260 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3262 if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3265 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3266 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3267 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3268 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3270 kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3273 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3274 CPU_BASED_CR3_STORE_EXITING)
3276 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3278 struct vcpu_vmx *vmx = to_vmx(vcpu);
3279 unsigned long hw_cr0, old_cr0_pg;
3282 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3284 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3285 if (is_unrestricted_guest(vcpu))
3286 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3288 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3290 hw_cr0 |= X86_CR0_WP;
3292 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3295 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3299 vmcs_writel(CR0_READ_SHADOW, cr0);
3300 vmcs_writel(GUEST_CR0, hw_cr0);
3301 vcpu->arch.cr0 = cr0;
3302 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3304 #ifdef CONFIG_X86_64
3305 if (vcpu->arch.efer & EFER_LME) {
3306 if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3308 else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3313 if (enable_ept && !is_unrestricted_guest(vcpu)) {
3315 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
3316 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3317 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3318 * KVM's CR3 is installed.
3320 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3321 vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3324 * When running with EPT but not unrestricted guest, KVM must
3325 * intercept CR3 accesses when paging is _disabled_. This is
3326 * necessary because restricted guests can't actually run with
3327 * paging disabled, and so KVM stuffs its own CR3 in order to
3328 * run the guest when identity mapped page tables.
3330 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3331 * update, it may be stale with respect to CR3 interception,
3332 * e.g. after nested VM-Enter.
3334 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3335 * stores to forward them to L1, even if KVM does not need to
3336 * intercept them to preserve its identity mapped page tables.
3338 if (!(cr0 & X86_CR0_PG)) {
3339 exec_controls_setbit(vmx, CR3_EXITING_BITS);
3340 } else if (!is_guest_mode(vcpu)) {
3341 exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3343 tmp = exec_controls_get(vmx);
3344 tmp &= ~CR3_EXITING_BITS;
3345 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3346 exec_controls_set(vmx, tmp);
3349 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3350 if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3351 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3354 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3355 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3357 if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3358 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3361 /* depends on vcpu->arch.cr0 to be set to a new value */
3362 vmx->emulation_required = vmx_emulation_required(vcpu);
3365 static int vmx_get_max_tdp_level(void)
3367 if (cpu_has_vmx_ept_5levels())
3372 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3374 u64 eptp = VMX_EPTP_MT_WB;
3376 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3378 if (enable_ept_ad_bits &&
3379 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3380 eptp |= VMX_EPTP_AD_ENABLE_BIT;
3386 static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
3389 struct kvm *kvm = vcpu->kvm;
3390 bool update_guest_cr3 = true;
3391 unsigned long guest_cr3;
3395 eptp = construct_eptp(vcpu, root_hpa, root_level);
3396 vmcs_write64(EPT_POINTER, eptp);
3398 hv_track_root_tdp(vcpu, root_hpa);
3400 if (!enable_unrestricted_guest && !is_paging(vcpu))
3401 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3402 else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3403 guest_cr3 = vcpu->arch.cr3;
3404 else /* vmcs.GUEST_CR3 is already up-to-date. */
3405 update_guest_cr3 = false;
3406 vmx_ept_load_pdptrs(vcpu);
3408 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
3411 if (update_guest_cr3)
3412 vmcs_writel(GUEST_CR3, guest_cr3);
3416 static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3419 * We operate under the default treatment of SMM, so VMX cannot be
3420 * enabled under SMM. Note, whether or not VMXE is allowed at all,
3421 * i.e. is a reserved bit, is handled by common x86 code.
3423 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3426 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3432 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3434 unsigned long old_cr4 = vcpu->arch.cr4;
3435 struct vcpu_vmx *vmx = to_vmx(vcpu);
3437 * Pass through host's Machine Check Enable value to hw_cr4, which
3438 * is in force while we are in guest mode. Do not let guests control
3439 * this bit, even if host CR4.MCE == 0.
3441 unsigned long hw_cr4;
3443 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3444 if (is_unrestricted_guest(vcpu))
3445 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3446 else if (vmx->rmode.vm86_active)
3447 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3449 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3451 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
3452 if (cr4 & X86_CR4_UMIP) {
3453 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3454 hw_cr4 &= ~X86_CR4_UMIP;
3455 } else if (!is_guest_mode(vcpu) ||
3456 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3457 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3461 vcpu->arch.cr4 = cr4;
3462 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3464 if (!is_unrestricted_guest(vcpu)) {
3466 if (!is_paging(vcpu)) {
3467 hw_cr4 &= ~X86_CR4_PAE;
3468 hw_cr4 |= X86_CR4_PSE;
3469 } else if (!(cr4 & X86_CR4_PAE)) {
3470 hw_cr4 &= ~X86_CR4_PAE;
3475 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3476 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
3477 * to be manually disabled when guest switches to non-paging
3480 * If !enable_unrestricted_guest, the CPU is always running
3481 * with CR0.PG=1 and CR4 needs to be modified.
3482 * If enable_unrestricted_guest, the CPU automatically
3483 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3485 if (!is_paging(vcpu))
3486 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3489 vmcs_writel(CR4_READ_SHADOW, cr4);
3490 vmcs_writel(GUEST_CR4, hw_cr4);
3492 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3493 kvm_update_cpuid_runtime(vcpu);
3496 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3498 struct vcpu_vmx *vmx = to_vmx(vcpu);
3501 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3502 *var = vmx->rmode.segs[seg];
3503 if (seg == VCPU_SREG_TR
3504 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3506 var->base = vmx_read_guest_seg_base(vmx, seg);
3507 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3510 var->base = vmx_read_guest_seg_base(vmx, seg);
3511 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3512 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3513 ar = vmx_read_guest_seg_ar(vmx, seg);
3514 var->unusable = (ar >> 16) & 1;
3515 var->type = ar & 15;
3516 var->s = (ar >> 4) & 1;
3517 var->dpl = (ar >> 5) & 3;
3519 * Some userspaces do not preserve unusable property. Since usable
3520 * segment has to be present according to VMX spec we can use present
3521 * property to amend userspace bug by making unusable segment always
3522 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3523 * segment as unusable.
3525 var->present = !var->unusable;
3526 var->avl = (ar >> 12) & 1;
3527 var->l = (ar >> 13) & 1;
3528 var->db = (ar >> 14) & 1;
3529 var->g = (ar >> 15) & 1;
3532 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3534 struct kvm_segment s;
3536 if (to_vmx(vcpu)->rmode.vm86_active) {
3537 vmx_get_segment(vcpu, &s, seg);
3540 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3543 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3545 struct vcpu_vmx *vmx = to_vmx(vcpu);
3547 if (unlikely(vmx->rmode.vm86_active))
3550 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3551 return VMX_AR_DPL(ar);
3555 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3559 ar = var->type & 15;
3560 ar |= (var->s & 1) << 4;
3561 ar |= (var->dpl & 3) << 5;
3562 ar |= (var->present & 1) << 7;
3563 ar |= (var->avl & 1) << 12;
3564 ar |= (var->l & 1) << 13;
3565 ar |= (var->db & 1) << 14;
3566 ar |= (var->g & 1) << 15;
3567 ar |= (var->unusable || !var->present) << 16;
3572 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3574 struct vcpu_vmx *vmx = to_vmx(vcpu);
3575 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3577 vmx_segment_cache_clear(vmx);
3579 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3580 vmx->rmode.segs[seg] = *var;
3581 if (seg == VCPU_SREG_TR)
3582 vmcs_write16(sf->selector, var->selector);
3584 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3588 vmcs_writel(sf->base, var->base);
3589 vmcs_write32(sf->limit, var->limit);
3590 vmcs_write16(sf->selector, var->selector);
3593 * Fix the "Accessed" bit in AR field of segment registers for older
3595 * IA32 arch specifies that at the time of processor reset the
3596 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3597 * is setting it to 0 in the userland code. This causes invalid guest
3598 * state vmexit when "unrestricted guest" mode is turned on.
3599 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3600 * tree. Newer qemu binaries with that qemu fix would not need this
3603 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3604 var->type |= 0x1; /* Accessed */
3606 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3609 static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3611 __vmx_set_segment(vcpu, var, seg);
3613 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
3616 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3618 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3620 *db = (ar >> 14) & 1;
3621 *l = (ar >> 13) & 1;
3624 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3626 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3627 dt->address = vmcs_readl(GUEST_IDTR_BASE);
3630 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3632 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3633 vmcs_writel(GUEST_IDTR_BASE, dt->address);
3636 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3638 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3639 dt->address = vmcs_readl(GUEST_GDTR_BASE);
3642 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3644 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3645 vmcs_writel(GUEST_GDTR_BASE, dt->address);
3648 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3650 struct kvm_segment var;
3653 vmx_get_segment(vcpu, &var, seg);
3655 if (seg == VCPU_SREG_CS)
3657 ar = vmx_segment_access_rights(&var);
3659 if (var.base != (var.selector << 4))
3661 if (var.limit != 0xffff)
3669 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3671 struct kvm_segment cs;
3672 unsigned int cs_rpl;
3674 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3675 cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3679 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3683 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3684 if (cs.dpl > cs_rpl)
3687 if (cs.dpl != cs_rpl)
3693 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3697 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3699 struct kvm_segment ss;
3700 unsigned int ss_rpl;
3702 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3703 ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3707 if (ss.type != 3 && ss.type != 7)
3711 if (ss.dpl != ss_rpl) /* DPL != RPL */
3719 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3721 struct kvm_segment var;
3724 vmx_get_segment(vcpu, &var, seg);
3725 rpl = var.selector & SEGMENT_RPL_MASK;
3733 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3734 if (var.dpl < rpl) /* DPL < RPL */
3738 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3744 static bool tr_valid(struct kvm_vcpu *vcpu)
3746 struct kvm_segment tr;
3748 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3752 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
3754 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3762 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3764 struct kvm_segment ldtr;
3766 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3770 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
3780 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3782 struct kvm_segment cs, ss;
3784 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3785 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3787 return ((cs.selector & SEGMENT_RPL_MASK) ==
3788 (ss.selector & SEGMENT_RPL_MASK));
3792 * Check if guest state is valid. Returns true if valid, false if
3794 * We assume that registers are always usable
3796 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3798 /* real mode guest state checks */
3799 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3800 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3802 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3804 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3806 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3808 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3810 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3813 /* protected mode guest state checks */
3814 if (!cs_ss_rpl_check(vcpu))
3816 if (!code_segment_valid(vcpu))
3818 if (!stack_segment_valid(vcpu))
3820 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3822 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3824 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3826 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3828 if (!tr_valid(vcpu))
3830 if (!ldtr_valid(vcpu))
3834 * - Add checks on RIP
3835 * - Add checks on RFLAGS
3841 static int init_rmode_tss(struct kvm *kvm, void __user *ua)
3843 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3847 for (i = 0; i < 3; i++) {
3848 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3852 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3853 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16)))
3857 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8)))
3863 static int init_rmode_identity_map(struct kvm *kvm)
3865 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3870 /* Protect kvm_vmx->ept_identity_pagetable_done. */
3871 mutex_lock(&kvm->slots_lock);
3873 if (likely(kvm_vmx->ept_identity_pagetable_done))
3876 if (!kvm_vmx->ept_identity_map_addr)
3877 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3879 uaddr = __x86_set_memory_region(kvm,
3880 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3881 kvm_vmx->ept_identity_map_addr,
3883 if (IS_ERR(uaddr)) {
3888 /* Set up identity-mapping pagetable for EPT in real mode */
3889 for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
3890 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3891 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3892 if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) {
3897 kvm_vmx->ept_identity_pagetable_done = true;
3900 mutex_unlock(&kvm->slots_lock);
3904 static void seg_setup(int seg)
3906 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3909 vmcs_write16(sf->selector, 0);
3910 vmcs_writel(sf->base, 0);
3911 vmcs_write32(sf->limit, 0xffff);
3913 if (seg == VCPU_SREG_CS)
3914 ar |= 0x08; /* code segment */
3916 vmcs_write32(sf->ar_bytes, ar);
3919 int allocate_vpid(void)
3925 spin_lock(&vmx_vpid_lock);
3926 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3927 if (vpid < VMX_NR_VPIDS)
3928 __set_bit(vpid, vmx_vpid_bitmap);
3931 spin_unlock(&vmx_vpid_lock);
3935 void free_vpid(int vpid)
3937 if (!enable_vpid || vpid == 0)
3939 spin_lock(&vmx_vpid_lock);
3940 __clear_bit(vpid, vmx_vpid_bitmap);
3941 spin_unlock(&vmx_vpid_lock);
3944 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3947 * When KVM is a nested hypervisor on top of Hyper-V and uses
3948 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
3949 * bitmap has changed.
3951 if (kvm_is_using_evmcs()) {
3952 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3954 if (evmcs->hv_enlightenments_control.msr_bitmap)
3955 evmcs->hv_clean_fields &=
3956 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
3959 vmx->nested.force_msr_bitmap_recalc = true;
3962 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3964 struct vcpu_vmx *vmx = to_vmx(vcpu);
3965 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3967 if (!cpu_has_vmx_msr_bitmap())
3970 vmx_msr_bitmap_l01_changed(vmx);
3973 * Mark the desired intercept state in shadow bitmap, this is needed
3974 * for resync when the MSR filters change.
3976 if (is_valid_passthrough_msr(msr)) {
3977 int idx = possible_passthrough_msr_slot(msr);
3979 if (idx != -ENOENT) {
3980 if (type & MSR_TYPE_R)
3981 clear_bit(idx, vmx->shadow_msr_intercept.read);
3982 if (type & MSR_TYPE_W)
3983 clear_bit(idx, vmx->shadow_msr_intercept.write);
3987 if ((type & MSR_TYPE_R) &&
3988 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
3989 vmx_set_msr_bitmap_read(msr_bitmap, msr);
3990 type &= ~MSR_TYPE_R;
3993 if ((type & MSR_TYPE_W) &&
3994 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
3995 vmx_set_msr_bitmap_write(msr_bitmap, msr);
3996 type &= ~MSR_TYPE_W;
3999 if (type & MSR_TYPE_R)
4000 vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4002 if (type & MSR_TYPE_W)
4003 vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4006 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4008 struct vcpu_vmx *vmx = to_vmx(vcpu);
4009 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4011 if (!cpu_has_vmx_msr_bitmap())
4014 vmx_msr_bitmap_l01_changed(vmx);
4017 * Mark the desired intercept state in shadow bitmap, this is needed
4018 * for resync when the MSR filter changes.
4020 if (is_valid_passthrough_msr(msr)) {
4021 int idx = possible_passthrough_msr_slot(msr);
4023 if (idx != -ENOENT) {
4024 if (type & MSR_TYPE_R)
4025 set_bit(idx, vmx->shadow_msr_intercept.read);
4026 if (type & MSR_TYPE_W)
4027 set_bit(idx, vmx->shadow_msr_intercept.write);
4031 if (type & MSR_TYPE_R)
4032 vmx_set_msr_bitmap_read(msr_bitmap, msr);
4034 if (type & MSR_TYPE_W)
4035 vmx_set_msr_bitmap_write(msr_bitmap, msr);
4038 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4041 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4042 * of the MSR bitmap. KVM emulates APIC registers up through 0x3f0,
4043 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4045 const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4046 const int write_idx = read_idx + (0x800 / sizeof(u64));
4047 struct vcpu_vmx *vmx = to_vmx(vcpu);
4048 u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4051 if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4054 if (cpu_has_secondary_exec_ctrls() &&
4055 (secondary_exec_controls_get(vmx) &
4056 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4057 mode = MSR_BITMAP_MODE_X2APIC;
4058 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4059 mode |= MSR_BITMAP_MODE_X2APIC_APICV;
4064 if (mode == vmx->x2apic_msr_bitmap_mode)
4067 vmx->x2apic_msr_bitmap_mode = mode;
4070 * Reset the bitmap for MSRs 0x800 - 0x83f. Leave AMD's uber-extended
4071 * registers (0x840 and above) intercepted, KVM doesn't support them.
4072 * Intercept all writes by default and poke holes as needed. Pass
4073 * through reads for all valid registers by default in x2APIC+APICv
4074 * mode, only the current timer count needs on-demand emulation by KVM.
4076 if (mode & MSR_BITMAP_MODE_X2APIC_APICV)
4077 msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4079 msr_bitmap[read_idx] = ~0ull;
4080 msr_bitmap[write_idx] = ~0ull;
4083 * TPR reads and writes can be virtualized even if virtual interrupt
4084 * delivery is not in use.
4086 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4087 !(mode & MSR_BITMAP_MODE_X2APIC));
4089 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
4090 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4091 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4092 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4094 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4098 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4100 struct vcpu_vmx *vmx = to_vmx(vcpu);
4101 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4104 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4105 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4106 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4107 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4108 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4109 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4110 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4114 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
4116 struct vcpu_vmx *vmx = to_vmx(vcpu);
4121 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
4122 !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
4123 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
4126 rvi = vmx_get_rvi();
4128 vapic_page = vmx->nested.virtual_apic_map.hva;
4129 vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
4131 return ((rvi & 0xf0) > (vppr & 0xf0));
4134 static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4136 struct vcpu_vmx *vmx = to_vmx(vcpu);
4140 * Redo intercept permissions for MSRs that KVM is passing through to
4141 * the guest. Disabling interception will check the new MSR filter and
4142 * ensure that KVM enables interception if usersepace wants to filter
4143 * the MSR. MSRs that KVM is already intercepting don't need to be
4144 * refreshed since KVM is going to intercept them regardless of what
4147 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
4148 u32 msr = vmx_possible_passthrough_msrs[i];
4150 if (!test_bit(i, vmx->shadow_msr_intercept.read))
4151 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
4153 if (!test_bit(i, vmx->shadow_msr_intercept.write))
4154 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
4157 /* PT MSRs can be passed through iff PT is exposed to the guest. */
4158 if (vmx_pt_mode_is_host_guest())
4159 pt_update_intercept_for_msr(vcpu);
4162 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4166 if (vcpu->mode == IN_GUEST_MODE) {
4168 * The vector of the virtual has already been set in the PIR.
4169 * Send a notification event to deliver the virtual interrupt
4170 * unless the vCPU is the currently running vCPU, i.e. the
4171 * event is being sent from a fastpath VM-Exit handler, in
4172 * which case the PIR will be synced to the vIRR before
4173 * re-entering the guest.
4175 * When the target is not the running vCPU, the following
4176 * possibilities emerge:
4178 * Case 1: vCPU stays in non-root mode. Sending a notification
4179 * event posts the interrupt to the vCPU.
4181 * Case 2: vCPU exits to root mode and is still runnable. The
4182 * PIR will be synced to the vIRR before re-entering the guest.
4183 * Sending a notification event is ok as the host IRQ handler
4184 * will ignore the spurious event.
4186 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
4187 * has already synced PIR to vIRR and never blocks the vCPU if
4188 * the vIRR is not empty. Therefore, a blocked vCPU here does
4189 * not wait for any requested interrupts in PIR, and sending a
4190 * notification event also results in a benign, spurious event.
4193 if (vcpu != kvm_get_running_vcpu())
4194 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4199 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
4200 * otherwise do nothing as KVM will grab the highest priority pending
4201 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
4203 kvm_vcpu_wake_up(vcpu);
4206 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4209 struct vcpu_vmx *vmx = to_vmx(vcpu);
4211 if (is_guest_mode(vcpu) &&
4212 vector == vmx->nested.posted_intr_nv) {
4214 * If a posted intr is not recognized by hardware,
4215 * we will accomplish it in the next vmentry.
4217 vmx->nested.pi_pending = true;
4218 kvm_make_request(KVM_REQ_EVENT, vcpu);
4221 * This pairs with the smp_mb_*() after setting vcpu->mode in
4222 * vcpu_enter_guest() to guarantee the vCPU sees the event
4223 * request if triggering a posted interrupt "fails" because
4224 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
4225 * the smb_wmb() in kvm_make_request() only ensures everything
4226 * done before making the request is visible when the request
4227 * is visible, it doesn't ensure ordering between the store to
4228 * vcpu->requests and the load from vcpu->mode.
4230 smp_mb__after_atomic();
4232 /* the PIR and ON have been set by L1. */
4233 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4239 * Send interrupt to vcpu via posted interrupt way.
4240 * 1. If target vcpu is running(non-root mode), send posted interrupt
4241 * notification to vcpu and hardware will sync PIR to vIRR atomically.
4242 * 2. If target vcpu isn't running(root mode), kick it to pick up the
4243 * interrupt from PIR in next vmentry.
4245 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4247 struct vcpu_vmx *vmx = to_vmx(vcpu);
4250 r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4254 /* Note, this is called iff the local APIC is in-kernel. */
4255 if (!vcpu->arch.apic->apicv_active)
4258 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4261 /* If a previous notification has sent the IPI, nothing to do. */
4262 if (pi_test_and_set_on(&vmx->pi_desc))
4266 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
4267 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4268 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
4269 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4271 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
4275 static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
4276 int trig_mode, int vector)
4278 struct kvm_vcpu *vcpu = apic->vcpu;
4280 if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4281 kvm_lapic_set_irr(vector, apic);
4282 kvm_make_request(KVM_REQ_EVENT, vcpu);
4283 kvm_vcpu_kick(vcpu);
4285 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4291 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4292 * will not change in the lifetime of the guest.
4293 * Note that host-state that does change is set elsewhere. E.g., host-state
4294 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4296 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4300 unsigned long cr0, cr3, cr4;
4303 WARN_ON(cr0 & X86_CR0_TS);
4304 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
4307 * Save the most likely value for this task's CR3 in the VMCS.
4308 * We can't use __get_current_cr3_fast() because we're not atomic.
4311 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
4312 vmx->loaded_vmcs->host_state.cr3 = cr3;
4314 /* Save the most likely value for this task's CR4 in the VMCS. */
4315 cr4 = cr4_read_shadow();
4316 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
4317 vmx->loaded_vmcs->host_state.cr4 = cr4;
4319 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
4320 #ifdef CONFIG_X86_64
4322 * Load null selectors, so we can avoid reloading them in
4323 * vmx_prepare_switch_to_host(), in case userspace uses
4324 * the null selectors too (the expected case).
4326 vmcs_write16(HOST_DS_SELECTOR, 0);
4327 vmcs_write16(HOST_ES_SELECTOR, 0);
4329 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4330 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4332 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4333 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
4335 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */
4337 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4339 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4340 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4343 * SYSENTER is used for 32-bit system calls on either 32-bit or
4344 * 64-bit kernels. It is always zero If neither is allowed, otherwise
4345 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4346 * have already done so!).
4348 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4349 vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4351 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4352 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
4354 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4355 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4356 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4359 if (cpu_has_load_ia32_efer())
4360 vmcs_write64(HOST_IA32_EFER, host_efer);
4363 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4365 struct kvm_vcpu *vcpu = &vmx->vcpu;
4367 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4368 ~vcpu->arch.cr4_guest_rsvd_bits;
4370 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4371 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4373 if (is_guest_mode(&vmx->vcpu))
4374 vcpu->arch.cr4_guest_owned_bits &=
4375 ~get_vmcs12(vcpu)->cr4_guest_host_mask;
4376 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4379 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4381 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4383 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4384 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4387 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4389 if (!enable_preemption_timer)
4390 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4392 return pin_based_exec_ctrl;
4395 static u32 vmx_vmentry_ctrl(void)
4397 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
4399 if (vmx_pt_mode_is_system())
4400 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4401 VM_ENTRY_LOAD_IA32_RTIT_CTL);
4403 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4405 vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4406 VM_ENTRY_LOAD_IA32_EFER |
4407 VM_ENTRY_IA32E_MODE);
4409 if (cpu_has_perf_global_ctrl_bug())
4410 vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4412 return vmentry_ctrl;
4415 static u32 vmx_vmexit_ctrl(void)
4417 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
4420 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4421 * nested virtualization and thus allowed to be set in vmcs12.
4423 vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4424 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4426 if (vmx_pt_mode_is_system())
4427 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4428 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4430 if (cpu_has_perf_global_ctrl_bug())
4431 vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4433 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4434 return vmexit_ctrl &
4435 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
4438 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4440 struct vcpu_vmx *vmx = to_vmx(vcpu);
4442 if (is_guest_mode(vcpu)) {
4443 vmx->nested.update_vmcs01_apicv_status = true;
4447 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4449 if (kvm_vcpu_apicv_active(vcpu)) {
4450 secondary_exec_controls_setbit(vmx,
4451 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4452 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4454 tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4456 secondary_exec_controls_clearbit(vmx,
4457 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4458 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4460 tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4463 vmx_update_msr_bitmap_x2apic(vcpu);
4466 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4468 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4471 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4472 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4474 exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4475 CPU_BASED_USE_IO_BITMAPS |
4476 CPU_BASED_MONITOR_TRAP_FLAG |
4477 CPU_BASED_PAUSE_EXITING);
4479 /* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4480 exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4481 CPU_BASED_NMI_WINDOW_EXITING);
4483 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4484 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4486 if (!cpu_need_tpr_shadow(&vmx->vcpu))
4487 exec_control &= ~CPU_BASED_TPR_SHADOW;
4489 #ifdef CONFIG_X86_64
4490 if (exec_control & CPU_BASED_TPR_SHADOW)
4491 exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4492 CPU_BASED_CR8_STORE_EXITING);
4494 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4495 CPU_BASED_CR8_LOAD_EXITING;
4497 /* No need to intercept CR3 access or INVPLG when using EPT. */
4499 exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4500 CPU_BASED_CR3_STORE_EXITING |
4501 CPU_BASED_INVLPG_EXITING);
4502 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4503 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4504 CPU_BASED_MONITOR_EXITING);
4505 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4506 exec_control &= ~CPU_BASED_HLT_EXITING;
4507 return exec_control;
4510 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4512 u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
4515 * IPI virtualization relies on APICv. Disable IPI virtualization if
4516 * APICv is inhibited.
4518 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4519 exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4521 return exec_control;
4525 * Adjust a single secondary execution control bit to intercept/allow an
4526 * instruction in the guest. This is usually done based on whether or not a
4527 * feature has been exposed to the guest in order to correctly emulate faults.
4530 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4531 u32 control, bool enabled, bool exiting)
4534 * If the control is for an opt-in feature, clear the control if the
4535 * feature is not exposed to the guest, i.e. not enabled. If the
4536 * control is opt-out, i.e. an exiting control, clear the control if
4537 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4538 * disabled for the associated instruction. Note, the caller is
4539 * responsible presetting exec_control to set all supported bits.
4541 if (enabled == exiting)
4542 *exec_control &= ~control;
4545 * Update the nested MSR settings so that a nested VMM can/can't set
4546 * controls for features that are/aren't exposed to the guest.
4550 * All features that can be added or removed to VMX MSRs must
4551 * be supported in the first place for nested virtualization.
4553 if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
4557 vmx->nested.msrs.secondary_ctls_high |= control;
4559 vmx->nested.msrs.secondary_ctls_high &= ~control;
4564 * Wrapper macro for the common case of adjusting a secondary execution control
4565 * based on a single guest CPUID bit, with a dedicated feature bit. This also
4566 * verifies that the control is actually supported by KVM and hardware.
4568 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4572 if (cpu_has_vmx_##name()) { \
4573 __enabled = guest_cpuid_has(&(vmx)->vcpu, \
4574 X86_FEATURE_##feat_name); \
4575 vmx_adjust_secondary_exec_control(vmx, exec_control, \
4576 SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \
4580 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4581 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4582 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4584 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4585 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4587 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4589 struct kvm_vcpu *vcpu = &vmx->vcpu;
4591 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4593 if (vmx_pt_mode_is_system())
4594 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4595 if (!cpu_need_virtualize_apic_accesses(vcpu))
4596 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4598 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4600 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4601 enable_unrestricted_guest = 0;
4603 if (!enable_unrestricted_guest)
4604 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4605 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4606 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4607 if (!kvm_vcpu_apicv_active(vcpu))
4608 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4609 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4610 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4613 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4614 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4616 exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4618 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4619 * in vmx_set_cr4. */
4620 exec_control &= ~SECONDARY_EXEC_DESC;
4622 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4624 We can NOT enable shadow_vmcs here because we don't have yet
4627 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4630 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4631 * it needs to be set here when dirty logging is already active, e.g.
4632 * if this vCPU was created after dirty logging was enabled.
4634 if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4635 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4637 if (cpu_has_vmx_xsaves()) {
4638 /* Exposing XSAVES only when XSAVE is exposed */
4639 bool xsaves_enabled =
4640 boot_cpu_has(X86_FEATURE_XSAVE) &&
4641 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
4642 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
4644 vcpu->arch.xsaves_enabled = xsaves_enabled;
4646 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4647 SECONDARY_EXEC_XSAVES,
4648 xsaves_enabled, false);
4652 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4653 * feature is exposed to the guest. This creates a virtualization hole
4654 * if both are supported in hardware but only one is exposed to the
4655 * guest, but letting the guest execute RDTSCP or RDPID when either one
4656 * is advertised is preferable to emulating the advertised instruction
4657 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4659 if (cpu_has_vmx_rdtscp()) {
4660 bool rdpid_or_rdtscp_enabled =
4661 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
4662 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
4664 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4665 SECONDARY_EXEC_ENABLE_RDTSCP,
4666 rdpid_or_rdtscp_enabled, false);
4668 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4670 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4671 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4673 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4674 ENABLE_USR_WAIT_PAUSE, false);
4676 if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4677 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4679 if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4680 exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4682 return exec_control;
4685 static inline int vmx_get_pid_table_order(struct kvm *kvm)
4687 return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4690 static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
4693 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4695 if (!irqchip_in_kernel(kvm) || !enable_ipiv)
4698 if (kvm_vmx->pid_table)
4701 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, vmx_get_pid_table_order(kvm));
4705 kvm_vmx->pid_table = (void *)page_address(pages);
4709 static int vmx_vcpu_precreate(struct kvm *kvm)
4711 return vmx_alloc_ipiv_pid_table(kvm);
4714 #define VMX_XSS_EXIT_BITMAP 0
4716 static void init_vmcs(struct vcpu_vmx *vmx)
4718 struct kvm *kvm = vmx->vcpu.kvm;
4719 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4722 nested_vmx_set_vmcs_shadowing_bitmap();
4724 if (cpu_has_vmx_msr_bitmap())
4725 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4727 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */
4730 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4732 exec_controls_set(vmx, vmx_exec_control(vmx));
4734 if (cpu_has_secondary_exec_ctrls())
4735 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4737 if (cpu_has_tertiary_exec_ctrls())
4738 tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4740 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4741 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4742 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4743 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4744 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4746 vmcs_write16(GUEST_INTR_STATUS, 0);
4748 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4749 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4752 if (vmx_can_use_ipiv(&vmx->vcpu)) {
4753 vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
4754 vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4757 if (!kvm_pause_in_guest(kvm)) {
4758 vmcs_write32(PLE_GAP, ple_gap);
4759 vmx->ple_window = ple_window;
4760 vmx->ple_window_dirty = true;
4763 if (kvm_notify_vmexit_enabled(kvm))
4764 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4766 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4767 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4768 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
4770 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
4771 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
4772 vmx_set_constant_host_state(vmx);
4773 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4774 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4776 if (cpu_has_vmx_vmfunc())
4777 vmcs_write64(VM_FUNCTION_CONTROL, 0);
4779 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4780 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4781 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4782 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4783 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4785 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4786 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4788 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4790 /* 22.2.1, 20.8.1 */
4791 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4793 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4794 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4796 set_cr4_guest_host_mask(vmx);
4799 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4801 if (cpu_has_vmx_xsaves())
4802 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4805 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4806 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4809 vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4811 if (vmx_pt_mode_is_host_guest()) {
4812 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4813 /* Bit[6~0] are forced to 1, writes are ignored. */
4814 vmx->pt_desc.guest.output_mask = 0x7F;
4815 vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4818 vmcs_write32(GUEST_SYSENTER_CS, 0);
4819 vmcs_writel(GUEST_SYSENTER_ESP, 0);
4820 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4821 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4823 if (cpu_has_vmx_tpr_shadow()) {
4824 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4825 if (cpu_need_tpr_shadow(&vmx->vcpu))
4826 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4827 __pa(vmx->vcpu.arch.apic->regs));
4828 vmcs_write32(TPR_THRESHOLD, 0);
4831 vmx_setup_uret_msrs(vmx);
4834 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4836 struct vcpu_vmx *vmx = to_vmx(vcpu);
4841 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4843 vcpu_setup_sgx_lepubkeyhash(vcpu);
4845 vmx->nested.posted_intr_nv = -1;
4846 vmx->nested.vmxon_ptr = INVALID_GPA;
4847 vmx->nested.current_vmptr = INVALID_GPA;
4848 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4850 vcpu->arch.microcode_version = 0x100000000ULL;
4851 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4854 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4855 * or POSTED_INTR_WAKEUP_VECTOR.
4857 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
4858 vmx->pi_desc.sn = 1;
4861 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4863 struct vcpu_vmx *vmx = to_vmx(vcpu);
4866 __vmx_vcpu_reset(vcpu);
4868 vmx->rmode.vm86_active = 0;
4871 vmx->msr_ia32_umwait_control = 0;
4873 vmx->hv_deadline_tsc = -1;
4874 kvm_set_cr8(vcpu, 0);
4876 vmx_segment_cache_clear(vmx);
4877 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
4879 seg_setup(VCPU_SREG_CS);
4880 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4881 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
4883 seg_setup(VCPU_SREG_DS);
4884 seg_setup(VCPU_SREG_ES);
4885 seg_setup(VCPU_SREG_FS);
4886 seg_setup(VCPU_SREG_GS);
4887 seg_setup(VCPU_SREG_SS);
4889 vmcs_write16(GUEST_TR_SELECTOR, 0);
4890 vmcs_writel(GUEST_TR_BASE, 0);
4891 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4892 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4894 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4895 vmcs_writel(GUEST_LDTR_BASE, 0);
4896 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4897 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4899 vmcs_writel(GUEST_GDTR_BASE, 0);
4900 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4902 vmcs_writel(GUEST_IDTR_BASE, 0);
4903 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4905 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4906 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4907 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4908 if (kvm_mpx_supported())
4909 vmcs_write64(GUEST_BNDCFGS, 0);
4911 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
4913 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4915 vpid_sync_context(vmx->vpid);
4917 vmx_update_fb_clear_dis(vcpu, vmx);
4920 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
4922 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4925 static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
4928 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4929 vmx_enable_irq_window(vcpu);
4933 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4936 static void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
4938 struct vcpu_vmx *vmx = to_vmx(vcpu);
4940 int irq = vcpu->arch.interrupt.nr;
4942 trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4944 ++vcpu->stat.irq_injections;
4945 if (vmx->rmode.vm86_active) {
4947 if (vcpu->arch.interrupt.soft)
4948 inc_eip = vcpu->arch.event_exit_inst_len;
4949 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4952 intr = irq | INTR_INFO_VALID_MASK;
4953 if (vcpu->arch.interrupt.soft) {
4954 intr |= INTR_TYPE_SOFT_INTR;
4955 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4956 vmx->vcpu.arch.event_exit_inst_len);
4958 intr |= INTR_TYPE_EXT_INTR;
4959 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4961 vmx_clear_hlt(vcpu);
4964 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4966 struct vcpu_vmx *vmx = to_vmx(vcpu);
4970 * Tracking the NMI-blocked state in software is built upon
4971 * finding the next open IRQ window. This, in turn, depends on
4972 * well-behaving guests: They have to keep IRQs disabled at
4973 * least as long as the NMI handler runs. Otherwise we may
4974 * cause NMI nesting, maybe breaking the guest. But as this is
4975 * highly unlikely, we can live with the residual risk.
4977 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4978 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4981 ++vcpu->stat.nmi_injections;
4982 vmx->loaded_vmcs->nmi_known_unmasked = false;
4984 if (vmx->rmode.vm86_active) {
4985 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
4989 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4990 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4992 vmx_clear_hlt(vcpu);
4995 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4997 struct vcpu_vmx *vmx = to_vmx(vcpu);
5001 return vmx->loaded_vmcs->soft_vnmi_blocked;
5002 if (vmx->loaded_vmcs->nmi_known_unmasked)
5004 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5005 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5009 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5011 struct vcpu_vmx *vmx = to_vmx(vcpu);
5014 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5015 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5016 vmx->loaded_vmcs->vnmi_blocked_time = 0;
5019 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5021 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5022 GUEST_INTR_STATE_NMI);
5024 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5025 GUEST_INTR_STATE_NMI);
5029 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5031 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5034 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5037 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5038 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5039 GUEST_INTR_STATE_NMI));
5042 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5044 if (to_vmx(vcpu)->nested.nested_run_pending)
5047 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
5048 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5051 return !vmx_nmi_blocked(vcpu);
5054 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5056 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5059 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5060 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5061 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5064 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5066 if (to_vmx(vcpu)->nested.nested_run_pending)
5070 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5071 * e.g. if the IRQ arrived asynchronously after checking nested events.
5073 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5076 return !vmx_interrupt_blocked(vcpu);
5079 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5083 if (enable_unrestricted_guest)
5086 mutex_lock(&kvm->slots_lock);
5087 ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5089 mutex_unlock(&kvm->slots_lock);
5092 return PTR_ERR(ret);
5094 to_kvm_vmx(kvm)->tss_addr = addr;
5096 return init_rmode_tss(kvm, ret);
5099 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5101 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
5105 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5110 * Update instruction length as we may reinject the exception
5111 * from user space while in guest debugging mode.
5113 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5114 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5115 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5119 return !(vcpu->guest_debug &
5120 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5134 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5135 int vec, u32 err_code)
5138 * Instruction with address size override prefix opcode 0x67
5139 * Cause the #SS fault with 0 error code in VM86 mode.
5141 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5142 if (kvm_emulate_instruction(vcpu, 0)) {
5143 if (vcpu->arch.halt_request) {
5144 vcpu->arch.halt_request = 0;
5145 return kvm_emulate_halt_noskip(vcpu);
5153 * Forward all other exceptions that are valid in real mode.
5154 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5155 * the required debugging infrastructure rework.
5157 kvm_queue_exception(vcpu, vec);
5161 static int handle_machine_check(struct kvm_vcpu *vcpu)
5163 /* handled by vmx_vcpu_run() */
5168 * If the host has split lock detection disabled, then #AC is
5169 * unconditionally injected into the guest, which is the pre split lock
5170 * detection behaviour.
5172 * If the host has split lock detection enabled then #AC is
5173 * only injected into the guest when:
5174 * - Guest CPL == 3 (user mode)
5175 * - Guest has #AC detection enabled in CR0
5176 * - Guest EFLAGS has AC bit set
5178 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5180 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5183 return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
5184 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5187 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5189 struct vcpu_vmx *vmx = to_vmx(vcpu);
5190 struct kvm_run *kvm_run = vcpu->run;
5191 u32 intr_info, ex_no, error_code;
5192 unsigned long cr2, dr6;
5195 vect_info = vmx->idt_vectoring_info;
5196 intr_info = vmx_get_intr_info(vcpu);
5199 * Machine checks are handled by handle_exception_irqoff(), or by
5200 * vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by
5201 * vmx_vcpu_enter_exit().
5203 if (is_machine_check(intr_info) || is_nmi(intr_info))
5207 * Queue the exception here instead of in handle_nm_fault_irqoff().
5208 * This ensures the nested_vmx check is not skipped so vmexit can
5209 * be reflected to L1 (when it intercepts #NM) before reaching this
5212 if (is_nm_fault(intr_info)) {
5213 kvm_queue_exception(vcpu, NM_VECTOR);
5217 if (is_invalid_opcode(intr_info))
5218 return handle_ud(vcpu);
5221 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5222 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5224 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5225 WARN_ON_ONCE(!enable_vmware_backdoor);
5228 * VMware backdoor emulation on #GP interception only handles
5229 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5230 * error code on #GP.
5233 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5236 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5240 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5241 * MMIO, it is better to report an internal error.
5242 * See the comments in vmx_handle_exit.
5244 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5245 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5246 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5247 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5248 vcpu->run->internal.ndata = 4;
5249 vcpu->run->internal.data[0] = vect_info;
5250 vcpu->run->internal.data[1] = intr_info;
5251 vcpu->run->internal.data[2] = error_code;
5252 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5256 if (is_page_fault(intr_info)) {
5257 cr2 = vmx_get_exit_qual(vcpu);
5258 if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5260 * EPT will cause page fault only if we need to
5261 * detect illegal GPAs.
5263 WARN_ON_ONCE(!allow_smaller_maxphyaddr);
5264 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5267 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5270 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5272 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5273 return handle_rmode_exception(vcpu, ex_no, error_code);
5277 dr6 = vmx_get_exit_qual(vcpu);
5278 if (!(vcpu->guest_debug &
5279 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5281 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5282 * instruction. ICEBP generates a trap-like #DB, but
5283 * despite its interception control being tied to #DB,
5284 * is an instruction intercept, i.e. the VM-Exit occurs
5285 * on the ICEBP itself. Use the inner "skip" helper to
5286 * avoid single-step #DB and MTF updates, as ICEBP is
5287 * higher priority. Note, skipping ICEBP still clears
5288 * STI and MOVSS blocking.
5290 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5291 * if single-step is enabled in RFLAGS and STI or MOVSS
5292 * blocking is active, as the CPU doesn't set the bit
5293 * on VM-Exit due to #DB interception. VM-Entry has a
5294 * consistency check that a single-step #DB is pending
5295 * in this scenario as the previous instruction cannot
5296 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5297 * don't modify RFLAGS), therefore the one instruction
5298 * delay when activating single-step breakpoints must
5299 * have already expired. Note, the CPU sets/clears BS
5300 * as appropriate for all other VM-Exits types.
5302 if (is_icebp(intr_info))
5303 WARN_ON(!skip_emulated_instruction(vcpu));
5304 else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5305 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5306 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5307 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5308 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5310 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5313 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5314 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5318 * Update instruction length as we may reinject #BP from
5319 * user space while in guest debugging mode. Reading it for
5320 * #DB as well causes no harm, it is not used in that case.
5322 vmx->vcpu.arch.event_exit_inst_len =
5323 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5324 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5325 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5326 kvm_run->debug.arch.exception = ex_no;
5329 if (vmx_guest_inject_ac(vcpu)) {
5330 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5335 * Handle split lock. Depending on detection mode this will
5336 * either warn and disable split lock detection for this
5337 * task or force SIGBUS on it.
5339 if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5343 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5344 kvm_run->ex.exception = ex_no;
5345 kvm_run->ex.error_code = error_code;
5351 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5353 ++vcpu->stat.irq_exits;
5357 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5359 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5360 vcpu->mmio_needed = 0;
5364 static int handle_io(struct kvm_vcpu *vcpu)
5366 unsigned long exit_qualification;
5367 int size, in, string;
5370 exit_qualification = vmx_get_exit_qual(vcpu);
5371 string = (exit_qualification & 16) != 0;
5373 ++vcpu->stat.io_exits;
5376 return kvm_emulate_instruction(vcpu, 0);
5378 port = exit_qualification >> 16;
5379 size = (exit_qualification & 7) + 1;
5380 in = (exit_qualification & 8) != 0;
5382 return kvm_fast_pio(vcpu, size, port, in);
5386 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5389 * Patch in the VMCALL instruction:
5391 hypercall[0] = 0x0f;
5392 hypercall[1] = 0x01;
5393 hypercall[2] = 0xc1;
5396 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5397 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5399 if (is_guest_mode(vcpu)) {
5400 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5401 unsigned long orig_val = val;
5404 * We get here when L2 changed cr0 in a way that did not change
5405 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5406 * but did change L0 shadowed bits. So we first calculate the
5407 * effective cr0 value that L1 would like to write into the
5408 * hardware. It consists of the L2-owned bits from the new
5409 * value combined with the L1-owned bits from L1's guest_cr0.
5411 val = (val & ~vmcs12->cr0_guest_host_mask) |
5412 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5414 if (!nested_guest_cr0_valid(vcpu, val))
5417 if (kvm_set_cr0(vcpu, val))
5419 vmcs_writel(CR0_READ_SHADOW, orig_val);
5422 if (to_vmx(vcpu)->nested.vmxon &&
5423 !nested_host_cr0_valid(vcpu, val))
5426 return kvm_set_cr0(vcpu, val);
5430 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5432 if (is_guest_mode(vcpu)) {
5433 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5434 unsigned long orig_val = val;
5436 /* analogously to handle_set_cr0 */
5437 val = (val & ~vmcs12->cr4_guest_host_mask) |
5438 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5439 if (kvm_set_cr4(vcpu, val))
5441 vmcs_writel(CR4_READ_SHADOW, orig_val);
5444 return kvm_set_cr4(vcpu, val);
5447 static int handle_desc(struct kvm_vcpu *vcpu)
5449 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
5450 return kvm_emulate_instruction(vcpu, 0);
5453 static int handle_cr(struct kvm_vcpu *vcpu)
5455 unsigned long exit_qualification, val;
5461 exit_qualification = vmx_get_exit_qual(vcpu);
5462 cr = exit_qualification & 15;
5463 reg = (exit_qualification >> 8) & 15;
5464 switch ((exit_qualification >> 4) & 3) {
5465 case 0: /* mov to cr */
5466 val = kvm_register_read(vcpu, reg);
5467 trace_kvm_cr_write(cr, val);
5470 err = handle_set_cr0(vcpu, val);
5471 return kvm_complete_insn_gp(vcpu, err);
5473 WARN_ON_ONCE(enable_unrestricted_guest);
5475 err = kvm_set_cr3(vcpu, val);
5476 return kvm_complete_insn_gp(vcpu, err);
5478 err = handle_set_cr4(vcpu, val);
5479 return kvm_complete_insn_gp(vcpu, err);
5481 u8 cr8_prev = kvm_get_cr8(vcpu);
5483 err = kvm_set_cr8(vcpu, cr8);
5484 ret = kvm_complete_insn_gp(vcpu, err);
5485 if (lapic_in_kernel(vcpu))
5487 if (cr8_prev <= cr8)
5490 * TODO: we might be squashing a
5491 * KVM_GUESTDBG_SINGLESTEP-triggered
5492 * KVM_EXIT_DEBUG here.
5494 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5500 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5502 case 1: /*mov from cr*/
5505 WARN_ON_ONCE(enable_unrestricted_guest);
5507 val = kvm_read_cr3(vcpu);
5508 kvm_register_write(vcpu, reg, val);
5509 trace_kvm_cr_read(cr, val);
5510 return kvm_skip_emulated_instruction(vcpu);
5512 val = kvm_get_cr8(vcpu);
5513 kvm_register_write(vcpu, reg, val);
5514 trace_kvm_cr_read(cr, val);
5515 return kvm_skip_emulated_instruction(vcpu);
5519 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5520 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5521 kvm_lmsw(vcpu, val);
5523 return kvm_skip_emulated_instruction(vcpu);
5527 vcpu->run->exit_reason = 0;
5528 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5529 (int)(exit_qualification >> 4) & 3, cr);
5533 static int handle_dr(struct kvm_vcpu *vcpu)
5535 unsigned long exit_qualification;
5539 exit_qualification = vmx_get_exit_qual(vcpu);
5540 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5542 /* First, if DR does not exist, trigger UD */
5543 if (!kvm_require_dr(vcpu, dr))
5546 if (vmx_get_cpl(vcpu) > 0)
5549 dr7 = vmcs_readl(GUEST_DR7);
5552 * As the vm-exit takes precedence over the debug trap, we
5553 * need to emulate the latter, either for the host or the
5554 * guest debugging itself.
5556 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5557 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5558 vcpu->run->debug.arch.dr7 = dr7;
5559 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5560 vcpu->run->debug.arch.exception = DB_VECTOR;
5561 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5564 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5569 if (vcpu->guest_debug == 0) {
5570 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5573 * No more DR vmexits; force a reload of the debug registers
5574 * and reenter on this instruction. The next vmexit will
5575 * retrieve the full state of the debug registers.
5577 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5581 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5582 if (exit_qualification & TYPE_MOV_FROM_DR) {
5585 kvm_get_dr(vcpu, dr, &val);
5586 kvm_register_write(vcpu, reg, val);
5589 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5593 return kvm_complete_insn_gp(vcpu, err);
5596 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5598 get_debugreg(vcpu->arch.db[0], 0);
5599 get_debugreg(vcpu->arch.db[1], 1);
5600 get_debugreg(vcpu->arch.db[2], 2);
5601 get_debugreg(vcpu->arch.db[3], 3);
5602 get_debugreg(vcpu->arch.dr6, 6);
5603 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5605 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5606 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5609 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5610 * a stale dr6 from the guest.
5612 set_debugreg(DR6_RESERVED, 6);
5615 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5617 vmcs_writel(GUEST_DR7, val);
5620 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5622 kvm_apic_update_ppr(vcpu);
5626 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5628 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5630 kvm_make_request(KVM_REQ_EVENT, vcpu);
5632 ++vcpu->stat.irq_window_exits;
5636 static int handle_invlpg(struct kvm_vcpu *vcpu)
5638 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5640 kvm_mmu_invlpg(vcpu, exit_qualification);
5641 return kvm_skip_emulated_instruction(vcpu);
5644 static int handle_apic_access(struct kvm_vcpu *vcpu)
5646 if (likely(fasteoi)) {
5647 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5648 int access_type, offset;
5650 access_type = exit_qualification & APIC_ACCESS_TYPE;
5651 offset = exit_qualification & APIC_ACCESS_OFFSET;
5653 * Sane guest uses MOV to write EOI, with written value
5654 * not cared. So make a short-circuit here by avoiding
5655 * heavy instruction emulation.
5657 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5658 (offset == APIC_EOI)) {
5659 kvm_lapic_set_eoi(vcpu);
5660 return kvm_skip_emulated_instruction(vcpu);
5663 return kvm_emulate_instruction(vcpu, 0);
5666 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5668 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5669 int vector = exit_qualification & 0xff;
5671 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5672 kvm_apic_set_eoi_accelerated(vcpu, vector);
5676 static int handle_apic_write(struct kvm_vcpu *vcpu)
5678 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5681 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5682 * hardware has done any necessary aliasing, offset adjustments, etc...
5683 * for the access. I.e. the correct value has already been written to
5684 * the vAPIC page for the correct 16-byte chunk. KVM needs only to
5685 * retrieve the register value and emulate the access.
5687 u32 offset = exit_qualification & 0xff0;
5689 kvm_apic_write_nodecode(vcpu, offset);
5693 static int handle_task_switch(struct kvm_vcpu *vcpu)
5695 struct vcpu_vmx *vmx = to_vmx(vcpu);
5696 unsigned long exit_qualification;
5697 bool has_error_code = false;
5700 int reason, type, idt_v, idt_index;
5702 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5703 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5704 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5706 exit_qualification = vmx_get_exit_qual(vcpu);
5708 reason = (u32)exit_qualification >> 30;
5709 if (reason == TASK_SWITCH_GATE && idt_v) {
5711 case INTR_TYPE_NMI_INTR:
5712 vcpu->arch.nmi_injected = false;
5713 vmx_set_nmi_mask(vcpu, true);
5715 case INTR_TYPE_EXT_INTR:
5716 case INTR_TYPE_SOFT_INTR:
5717 kvm_clear_interrupt_queue(vcpu);
5719 case INTR_TYPE_HARD_EXCEPTION:
5720 if (vmx->idt_vectoring_info &
5721 VECTORING_INFO_DELIVER_CODE_MASK) {
5722 has_error_code = true;
5724 vmcs_read32(IDT_VECTORING_ERROR_CODE);
5727 case INTR_TYPE_SOFT_EXCEPTION:
5728 kvm_clear_exception_queue(vcpu);
5734 tss_selector = exit_qualification;
5736 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5737 type != INTR_TYPE_EXT_INTR &&
5738 type != INTR_TYPE_NMI_INTR))
5739 WARN_ON(!skip_emulated_instruction(vcpu));
5742 * TODO: What about debug traps on tss switch?
5743 * Are we supposed to inject them and update dr6?
5745 return kvm_task_switch(vcpu, tss_selector,
5746 type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5747 reason, has_error_code, error_code);
5750 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5752 unsigned long exit_qualification;
5756 exit_qualification = vmx_get_exit_qual(vcpu);
5759 * EPT violation happened while executing iret from NMI,
5760 * "blocked by NMI" bit has to be set before next VM entry.
5761 * There are errata that may cause this bit to not be set:
5764 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5766 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5767 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5769 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5770 trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5772 /* Is it a read fault? */
5773 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
5774 ? PFERR_USER_MASK : 0;
5775 /* Is it a write fault? */
5776 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
5777 ? PFERR_WRITE_MASK : 0;
5778 /* Is it a fetch fault? */
5779 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
5780 ? PFERR_FETCH_MASK : 0;
5781 /* ept page table entry is present? */
5782 error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
5783 ? PFERR_PRESENT_MASK : 0;
5785 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
5786 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5788 vcpu->arch.exit_qualification = exit_qualification;
5791 * Check that the GPA doesn't exceed physical memory limits, as that is
5792 * a guest page fault. We have to emulate the instruction here, because
5793 * if the illegal address is that of a paging structure, then
5794 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we
5795 * would also use advanced VM-exit information for EPT violations to
5796 * reconstruct the page fault error code.
5798 if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
5799 return kvm_emulate_instruction(vcpu, 0);
5801 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5804 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5808 if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5812 * A nested guest cannot optimize MMIO vmexits, because we have an
5813 * nGPA here instead of the required GPA.
5815 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5816 if (!is_guest_mode(vcpu) &&
5817 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5818 trace_kvm_fast_mmio(gpa);
5819 return kvm_skip_emulated_instruction(vcpu);
5822 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5825 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5827 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5830 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5831 ++vcpu->stat.nmi_window_exits;
5832 kvm_make_request(KVM_REQ_EVENT, vcpu);
5837 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
5839 struct vcpu_vmx *vmx = to_vmx(vcpu);
5841 return vmx->emulation_required && !vmx->rmode.vm86_active &&
5842 (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5845 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5847 struct vcpu_vmx *vmx = to_vmx(vcpu);
5848 bool intr_window_requested;
5849 unsigned count = 130;
5851 intr_window_requested = exec_controls_get(vmx) &
5852 CPU_BASED_INTR_WINDOW_EXITING;
5854 while (vmx->emulation_required && count-- != 0) {
5855 if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5856 return handle_interrupt_window(&vmx->vcpu);
5858 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5861 if (!kvm_emulate_instruction(vcpu, 0))
5864 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5865 kvm_prepare_emulation_failure_exit(vcpu);
5869 if (vcpu->arch.halt_request) {
5870 vcpu->arch.halt_request = 0;
5871 return kvm_emulate_halt_noskip(vcpu);
5875 * Note, return 1 and not 0, vcpu_run() will invoke
5876 * xfer_to_guest_mode() which will create a proper return
5879 if (__xfer_to_guest_mode_work_pending())
5886 static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
5888 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5889 kvm_prepare_emulation_failure_exit(vcpu);
5896 static void grow_ple_window(struct kvm_vcpu *vcpu)
5898 struct vcpu_vmx *vmx = to_vmx(vcpu);
5899 unsigned int old = vmx->ple_window;
5901 vmx->ple_window = __grow_ple_window(old, ple_window,
5905 if (vmx->ple_window != old) {
5906 vmx->ple_window_dirty = true;
5907 trace_kvm_ple_window_update(vcpu->vcpu_id,
5908 vmx->ple_window, old);
5912 static void shrink_ple_window(struct kvm_vcpu *vcpu)
5914 struct vcpu_vmx *vmx = to_vmx(vcpu);
5915 unsigned int old = vmx->ple_window;
5917 vmx->ple_window = __shrink_ple_window(old, ple_window,
5921 if (vmx->ple_window != old) {
5922 vmx->ple_window_dirty = true;
5923 trace_kvm_ple_window_update(vcpu->vcpu_id,
5924 vmx->ple_window, old);
5929 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5930 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5932 static int handle_pause(struct kvm_vcpu *vcpu)
5934 if (!kvm_pause_in_guest(vcpu->kvm))
5935 grow_ple_window(vcpu);
5938 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5939 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5940 * never set PAUSE_EXITING and just set PLE if supported,
5941 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5943 kvm_vcpu_on_spin(vcpu, true);
5944 return kvm_skip_emulated_instruction(vcpu);
5947 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5952 static int handle_invpcid(struct kvm_vcpu *vcpu)
5954 u32 vmx_instruction_info;
5963 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
5964 kvm_queue_exception(vcpu, UD_VECTOR);
5968 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5969 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5970 type = kvm_register_read(vcpu, gpr_index);
5972 /* According to the Intel instruction reference, the memory operand
5973 * is read even if it isn't needed (e.g., for type==all)
5975 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5976 vmx_instruction_info, false,
5977 sizeof(operand), &gva))
5980 return kvm_handle_invpcid(vcpu, type, gva);
5983 static int handle_pml_full(struct kvm_vcpu *vcpu)
5985 unsigned long exit_qualification;
5987 trace_kvm_pml_full(vcpu->vcpu_id);
5989 exit_qualification = vmx_get_exit_qual(vcpu);
5992 * PML buffer FULL happened while executing iret from NMI,
5993 * "blocked by NMI" bit has to be set before next VM entry.
5995 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5997 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5998 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5999 GUEST_INTR_STATE_NMI);
6002 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
6003 * here.., and there's no userspace involvement needed for PML.
6008 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
6010 struct vcpu_vmx *vmx = to_vmx(vcpu);
6012 if (!vmx->req_immediate_exit &&
6013 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
6014 kvm_lapic_expired_hv_timer(vcpu);
6015 return EXIT_FASTPATH_REENTER_GUEST;
6018 return EXIT_FASTPATH_NONE;
6021 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6023 handle_fastpath_preemption_timer(vcpu);
6028 * When nested=0, all VMX instruction VM Exits filter here. The handlers
6029 * are overwritten by nested_vmx_setup() when nested=1.
6031 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6033 kvm_queue_exception(vcpu, UD_VECTOR);
6037 #ifndef CONFIG_X86_SGX_KVM
6038 static int handle_encls(struct kvm_vcpu *vcpu)
6041 * SGX virtualization is disabled. There is no software enable bit for
6042 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6043 * the guest from executing ENCLS (when SGX is supported by hardware).
6045 kvm_queue_exception(vcpu, UD_VECTOR);
6048 #endif /* CONFIG_X86_SGX_KVM */
6050 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6053 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6054 * VM-Exits. Unconditionally set the flag here and leave the handling to
6055 * vmx_handle_exit().
6057 to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
6061 static int handle_notify(struct kvm_vcpu *vcpu)
6063 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6064 bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6066 ++vcpu->stat.notify_window_exits;
6069 * Notify VM exit happened while executing iret from NMI,
6070 * "blocked by NMI" bit has to be set before next VM entry.
6072 if (enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6073 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6074 GUEST_INTR_STATE_NMI);
6076 if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6078 vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6079 vcpu->run->notify.flags = context_invalid ?
6080 KVM_NOTIFY_CONTEXT_INVALID : 0;
6088 * The exit handlers return 1 if the exit was handled fully and guest execution
6089 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
6090 * to be done to userspace and return 0.
6092 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6093 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi,
6094 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
6095 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
6096 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6097 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6098 [EXIT_REASON_CR_ACCESS] = handle_cr,
6099 [EXIT_REASON_DR_ACCESS] = handle_dr,
6100 [EXIT_REASON_CPUID] = kvm_emulate_cpuid,
6101 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr,
6102 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
6103 [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window,
6104 [EXIT_REASON_HLT] = kvm_emulate_halt,
6105 [EXIT_REASON_INVD] = kvm_emulate_invd,
6106 [EXIT_REASON_INVLPG] = handle_invlpg,
6107 [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc,
6108 [EXIT_REASON_VMCALL] = kvm_emulate_hypercall,
6109 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction,
6110 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction,
6111 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction,
6112 [EXIT_REASON_VMPTRST] = handle_vmx_instruction,
6113 [EXIT_REASON_VMREAD] = handle_vmx_instruction,
6114 [EXIT_REASON_VMRESUME] = handle_vmx_instruction,
6115 [EXIT_REASON_VMWRITE] = handle_vmx_instruction,
6116 [EXIT_REASON_VMOFF] = handle_vmx_instruction,
6117 [EXIT_REASON_VMON] = handle_vmx_instruction,
6118 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
6119 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
6120 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
6121 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
6122 [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd,
6123 [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv,
6124 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
6125 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
6126 [EXIT_REASON_GDTR_IDTR] = handle_desc,
6127 [EXIT_REASON_LDTR_TR] = handle_desc,
6128 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
6129 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
6130 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
6131 [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait,
6132 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
6133 [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor,
6134 [EXIT_REASON_INVEPT] = handle_vmx_instruction,
6135 [EXIT_REASON_INVVPID] = handle_vmx_instruction,
6136 [EXIT_REASON_RDRAND] = kvm_handle_invalid_op,
6137 [EXIT_REASON_RDSEED] = kvm_handle_invalid_op,
6138 [EXIT_REASON_PML_FULL] = handle_pml_full,
6139 [EXIT_REASON_INVPCID] = handle_invpcid,
6140 [EXIT_REASON_VMFUNC] = handle_vmx_instruction,
6141 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
6142 [EXIT_REASON_ENCLS] = handle_encls,
6143 [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit,
6144 [EXIT_REASON_NOTIFY] = handle_notify,
6147 static const int kvm_vmx_max_exit_handlers =
6148 ARRAY_SIZE(kvm_vmx_exit_handlers);
6150 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6151 u64 *info1, u64 *info2,
6152 u32 *intr_info, u32 *error_code)
6154 struct vcpu_vmx *vmx = to_vmx(vcpu);
6156 *reason = vmx->exit_reason.full;
6157 *info1 = vmx_get_exit_qual(vcpu);
6158 if (!(vmx->exit_reason.failed_vmentry)) {
6159 *info2 = vmx->idt_vectoring_info;
6160 *intr_info = vmx_get_intr_info(vcpu);
6161 if (is_exception_with_error_code(*intr_info))
6162 *error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6172 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6175 __free_page(vmx->pml_pg);
6180 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6182 struct vcpu_vmx *vmx = to_vmx(vcpu);
6186 pml_idx = vmcs_read16(GUEST_PML_INDEX);
6188 /* Do nothing if PML buffer is empty */
6189 if (pml_idx == (PML_ENTITY_NUM - 1))
6192 /* PML index always points to next available PML buffer entity */
6193 if (pml_idx >= PML_ENTITY_NUM)
6198 pml_buf = page_address(vmx->pml_pg);
6199 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
6202 gpa = pml_buf[pml_idx];
6203 WARN_ON(gpa & (PAGE_SIZE - 1));
6204 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6207 /* reset PML index */
6208 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6211 static void vmx_dump_sel(char *name, uint32_t sel)
6213 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6214 name, vmcs_read16(sel),
6215 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6216 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6217 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6220 static void vmx_dump_dtsel(char *name, uint32_t limit)
6222 pr_err("%s limit=0x%08x, base=0x%016lx\n",
6223 name, vmcs_read32(limit),
6224 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
6227 static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
6230 struct vmx_msr_entry *e;
6232 pr_err("MSR %s:\n", name);
6233 for (i = 0, e = m->val; i < m->nr; ++i, ++e)
6234 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6237 void dump_vmcs(struct kvm_vcpu *vcpu)
6239 struct vcpu_vmx *vmx = to_vmx(vcpu);
6240 u32 vmentry_ctl, vmexit_ctl;
6241 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6242 u64 tertiary_exec_control;
6246 if (!dump_invalid_vmcs) {
6247 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6251 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6252 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6253 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6254 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6255 cr4 = vmcs_readl(GUEST_CR4);
6257 if (cpu_has_secondary_exec_ctrls())
6258 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6260 secondary_exec_control = 0;
6262 if (cpu_has_tertiary_exec_ctrls())
6263 tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6265 tertiary_exec_control = 0;
6267 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6268 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6269 pr_err("*** Guest State ***\n");
6270 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6271 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6272 vmcs_readl(CR0_GUEST_HOST_MASK));
6273 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6274 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6275 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6276 if (cpu_has_vmx_ept()) {
6277 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6278 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6279 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6280 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6282 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
6283 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6284 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6285 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6286 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6287 vmcs_readl(GUEST_SYSENTER_ESP),
6288 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6289 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
6290 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
6291 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
6292 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
6293 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
6294 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
6295 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6296 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6297 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6298 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
6299 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6300 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6301 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6302 else if (efer_slot >= 0)
6303 pr_err("EFER= 0x%016llx (autoload)\n",
6304 vmx->msr_autoload.guest.val[efer_slot].value);
6305 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6306 pr_err("EFER= 0x%016llx (effective)\n",
6307 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6309 pr_err("EFER= 0x%016llx (effective)\n",
6310 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6311 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6312 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6313 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6314 vmcs_read64(GUEST_IA32_DEBUGCTL),
6315 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6316 if (cpu_has_load_perf_global_ctrl() &&
6317 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6318 pr_err("PerfGlobCtl = 0x%016llx\n",
6319 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6320 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6321 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6322 pr_err("Interruptibility = %08x ActivityState = %08x\n",
6323 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6324 vmcs_read32(GUEST_ACTIVITY_STATE));
6325 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6326 pr_err("InterruptStatus = %04x\n",
6327 vmcs_read16(GUEST_INTR_STATUS));
6328 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6329 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6330 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6331 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6333 pr_err("*** Host State ***\n");
6334 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
6335 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6336 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6337 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6338 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6339 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6340 vmcs_read16(HOST_TR_SELECTOR));
6341 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6342 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6343 vmcs_readl(HOST_TR_BASE));
6344 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6345 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6346 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6347 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6348 vmcs_readl(HOST_CR4));
6349 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6350 vmcs_readl(HOST_IA32_SYSENTER_ESP),
6351 vmcs_read32(HOST_IA32_SYSENTER_CS),
6352 vmcs_readl(HOST_IA32_SYSENTER_EIP));
6353 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6354 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6355 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6356 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6357 if (cpu_has_load_perf_global_ctrl() &&
6358 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6359 pr_err("PerfGlobCtl = 0x%016llx\n",
6360 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6361 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6362 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6364 pr_err("*** Control State ***\n");
6365 pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6366 cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6367 pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6368 pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6369 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6370 vmcs_read32(EXCEPTION_BITMAP),
6371 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6372 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6373 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6374 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6375 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6376 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6377 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6378 vmcs_read32(VM_EXIT_INTR_INFO),
6379 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6380 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6381 pr_err(" reason=%08x qualification=%016lx\n",
6382 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6383 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6384 vmcs_read32(IDT_VECTORING_INFO_FIELD),
6385 vmcs_read32(IDT_VECTORING_ERROR_CODE));
6386 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6387 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6388 pr_err("TSC Multiplier = 0x%016llx\n",
6389 vmcs_read64(TSC_MULTIPLIER));
6390 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6391 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6392 u16 status = vmcs_read16(GUEST_INTR_STATUS);
6393 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6395 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6396 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6397 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6398 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6400 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6401 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6402 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6403 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6404 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6405 pr_err("PLE Gap=%08x Window=%08x\n",
6406 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6407 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6408 pr_err("Virtual processor ID = 0x%04x\n",
6409 vmcs_read16(VIRTUAL_PROCESSOR_ID));
6413 * The guest has exited. See if we can fix it or if we need userspace
6416 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6418 struct vcpu_vmx *vmx = to_vmx(vcpu);
6419 union vmx_exit_reason exit_reason = vmx->exit_reason;
6420 u32 vectoring_info = vmx->idt_vectoring_info;
6421 u16 exit_handler_index;
6424 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6425 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6426 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6427 * mode as if vcpus is in root mode, the PML buffer must has been
6428 * flushed already. Note, PML is never enabled in hardware while
6431 if (enable_pml && !is_guest_mode(vcpu))
6432 vmx_flush_pml_buffer(vcpu);
6435 * KVM should never reach this point with a pending nested VM-Enter.
6436 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6437 * invalid guest state should never happen as that means KVM knowingly
6438 * allowed a nested VM-Enter with an invalid vmcs12. More below.
6440 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6443 if (is_guest_mode(vcpu)) {
6445 * PML is never enabled when running L2, bail immediately if a
6446 * PML full exit occurs as something is horribly wrong.
6448 if (exit_reason.basic == EXIT_REASON_PML_FULL)
6449 goto unexpected_vmexit;
6452 * The host physical addresses of some pages of guest memory
6453 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6454 * Page). The CPU may write to these pages via their host
6455 * physical address while L2 is running, bypassing any
6456 * address-translation-based dirty tracking (e.g. EPT write
6459 * Mark them dirty on every exit from L2 to prevent them from
6460 * getting out of sync with dirty tracking.
6462 nested_mark_vmcs12_pages_dirty(vcpu);
6465 * Synthesize a triple fault if L2 state is invalid. In normal
6466 * operation, nested VM-Enter rejects any attempt to enter L2
6467 * with invalid state. However, those checks are skipped if
6468 * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
6469 * L2 state is invalid, it means either L1 modified SMRAM state
6470 * or userspace provided bad state. Synthesize TRIPLE_FAULT as
6471 * doing so is architecturally allowed in the RSM case, and is
6472 * the least awful solution for the userspace case without
6473 * risking false positives.
6475 if (vmx->emulation_required) {
6476 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6480 if (nested_vmx_reflect_vmexit(vcpu))
6484 /* If guest state is invalid, start emulating. L2 is handled above. */
6485 if (vmx->emulation_required)
6486 return handle_invalid_guest_state(vcpu);
6488 if (exit_reason.failed_vmentry) {
6490 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6491 vcpu->run->fail_entry.hardware_entry_failure_reason
6493 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6497 if (unlikely(vmx->fail)) {
6499 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6500 vcpu->run->fail_entry.hardware_entry_failure_reason
6501 = vmcs_read32(VM_INSTRUCTION_ERROR);
6502 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6508 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6509 * delivery event since it indicates guest is accessing MMIO.
6510 * The vm-exit can be triggered again after return to guest that
6511 * will cause infinite loop.
6513 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6514 (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6515 exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6516 exit_reason.basic != EXIT_REASON_PML_FULL &&
6517 exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6518 exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
6519 exit_reason.basic != EXIT_REASON_NOTIFY)) {
6522 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6523 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6524 vcpu->run->internal.data[0] = vectoring_info;
6525 vcpu->run->internal.data[1] = exit_reason.full;
6526 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6527 if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
6528 vcpu->run->internal.data[ndata++] =
6529 vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6531 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6532 vcpu->run->internal.ndata = ndata;
6536 if (unlikely(!enable_vnmi &&
6537 vmx->loaded_vmcs->soft_vnmi_blocked)) {
6538 if (!vmx_interrupt_blocked(vcpu)) {
6539 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6540 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6541 vcpu->arch.nmi_pending) {
6543 * This CPU don't support us in finding the end of an
6544 * NMI-blocked window if the guest runs with IRQs
6545 * disabled. So we pull the trigger after 1 s of
6546 * futile waiting, but inform the user about this.
6548 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6549 "state on VCPU %d after 1 s timeout\n",
6550 __func__, vcpu->vcpu_id);
6551 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6555 if (exit_fastpath != EXIT_FASTPATH_NONE)
6558 if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6559 goto unexpected_vmexit;
6560 #ifdef CONFIG_RETPOLINE
6561 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6562 return kvm_emulate_wrmsr(vcpu);
6563 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6564 return handle_preemption_timer(vcpu);
6565 else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6566 return handle_interrupt_window(vcpu);
6567 else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6568 return handle_external_interrupt(vcpu);
6569 else if (exit_reason.basic == EXIT_REASON_HLT)
6570 return kvm_emulate_halt(vcpu);
6571 else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6572 return handle_ept_misconfig(vcpu);
6575 exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6576 kvm_vmx_max_exit_handlers);
6577 if (!kvm_vmx_exit_handlers[exit_handler_index])
6578 goto unexpected_vmexit;
6580 return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6583 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6586 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6587 vcpu->run->internal.suberror =
6588 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6589 vcpu->run->internal.ndata = 2;
6590 vcpu->run->internal.data[0] = exit_reason.full;
6591 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6595 static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6597 int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6600 * Exit to user space when bus lock detected to inform that there is
6601 * a bus lock in guest.
6603 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6605 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6607 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6614 * Software based L1D cache flush which is used when microcode providing
6615 * the cache control MSR is not loaded.
6617 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6618 * flush it is required to read in 64 KiB because the replacement algorithm
6619 * is not exactly LRU. This could be sized at runtime via topology
6620 * information but as all relevant affected CPUs have 32KiB L1D cache size
6621 * there is no point in doing so.
6623 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6625 int size = PAGE_SIZE << L1D_CACHE_ORDER;
6628 * This code is only executed when the flush mode is 'cond' or
6631 if (static_branch_likely(&vmx_l1d_flush_cond)) {
6635 * Clear the per-vcpu flush bit, it gets set again
6636 * either from vcpu_run() or from one of the unsafe
6639 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6640 vcpu->arch.l1tf_flush_l1d = false;
6643 * Clear the per-cpu flush bit, it gets set again from
6644 * the interrupt handlers.
6646 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6647 kvm_clear_cpu_l1tf_flush_l1d();
6653 vcpu->stat.l1d_flush++;
6655 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6656 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6661 /* First ensure the pages are in the TLB */
6662 "xorl %%eax, %%eax\n"
6663 ".Lpopulate_tlb:\n\t"
6664 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6665 "addl $4096, %%eax\n\t"
6666 "cmpl %%eax, %[size]\n\t"
6667 "jne .Lpopulate_tlb\n\t"
6668 "xorl %%eax, %%eax\n\t"
6670 /* Now fill the cache */
6671 "xorl %%eax, %%eax\n"
6673 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6674 "addl $64, %%eax\n\t"
6675 "cmpl %%eax, %[size]\n\t"
6676 "jne .Lfill_cache\n\t"
6678 :: [flush_pages] "r" (vmx_l1d_flush_pages),
6680 : "eax", "ebx", "ecx", "edx");
6683 static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6685 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6688 if (is_guest_mode(vcpu) &&
6689 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6692 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6693 if (is_guest_mode(vcpu))
6694 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6696 vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6699 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6701 struct vcpu_vmx *vmx = to_vmx(vcpu);
6702 u32 sec_exec_control;
6704 if (!lapic_in_kernel(vcpu))
6707 if (!flexpriority_enabled &&
6708 !cpu_has_vmx_virtualize_x2apic_mode())
6711 /* Postpone execution until vmcs01 is the current VMCS. */
6712 if (is_guest_mode(vcpu)) {
6713 vmx->nested.change_vmcs01_virtual_apic_mode = true;
6717 sec_exec_control = secondary_exec_controls_get(vmx);
6718 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6719 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6721 switch (kvm_get_apic_mode(vcpu)) {
6722 case LAPIC_MODE_INVALID:
6723 WARN_ONCE(true, "Invalid local APIC state");
6725 case LAPIC_MODE_DISABLED:
6727 case LAPIC_MODE_XAPIC:
6728 if (flexpriority_enabled) {
6730 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6731 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6734 * Flush the TLB, reloading the APIC access page will
6735 * only do so if its physical address has changed, but
6736 * the guest may have inserted a non-APIC mapping into
6737 * the TLB while the APIC access page was disabled.
6739 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6742 case LAPIC_MODE_X2APIC:
6743 if (cpu_has_vmx_virtualize_x2apic_mode())
6745 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6748 secondary_exec_controls_set(vmx, sec_exec_control);
6750 vmx_update_msr_bitmap_x2apic(vcpu);
6753 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6757 /* Defer reload until vmcs01 is the current VMCS. */
6758 if (is_guest_mode(vcpu)) {
6759 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6763 if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6764 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6767 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6768 if (is_error_page(page))
6771 vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page));
6772 vmx_flush_tlb_current(vcpu);
6775 * Do not pin apic access page in memory, the MMU notifier
6776 * will call us again if it is migrated or swapped out.
6781 static void vmx_hwapic_isr_update(int max_isr)
6789 status = vmcs_read16(GUEST_INTR_STATUS);
6791 if (max_isr != old) {
6793 status |= max_isr << 8;
6794 vmcs_write16(GUEST_INTR_STATUS, status);
6798 static void vmx_set_rvi(int vector)
6806 status = vmcs_read16(GUEST_INTR_STATUS);
6807 old = (u8)status & 0xff;
6808 if ((u8)vector != old) {
6810 status |= (u8)vector;
6811 vmcs_write16(GUEST_INTR_STATUS, status);
6815 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6818 * When running L2, updating RVI is only relevant when
6819 * vmcs12 virtual-interrupt-delivery enabled.
6820 * However, it can be enabled only when L1 also
6821 * intercepts external-interrupts and in that case
6822 * we should not update vmcs02 RVI but instead intercept
6823 * interrupt. Therefore, do nothing when running L2.
6825 if (!is_guest_mode(vcpu))
6826 vmx_set_rvi(max_irr);
6829 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6831 struct vcpu_vmx *vmx = to_vmx(vcpu);
6833 bool got_posted_interrupt;
6835 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
6838 if (pi_test_on(&vmx->pi_desc)) {
6839 pi_clear_on(&vmx->pi_desc);
6841 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6842 * But on x86 this is just a compiler barrier anyway.
6844 smp_mb__after_atomic();
6845 got_posted_interrupt =
6846 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6848 max_irr = kvm_lapic_find_highest_irr(vcpu);
6849 got_posted_interrupt = false;
6853 * Newly recognized interrupts are injected via either virtual interrupt
6854 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
6855 * disabled in two cases:
6857 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1
6858 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
6859 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
6860 * into L2, but KVM doesn't use virtual interrupt delivery to inject
6861 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
6863 * 2) If APICv is disabled for this vCPU, assigned devices may still
6864 * attempt to post interrupts. The posted interrupt vector will cause
6865 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
6867 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
6868 vmx_set_rvi(max_irr);
6869 else if (got_posted_interrupt)
6870 kvm_make_request(KVM_REQ_EVENT, vcpu);
6875 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6877 if (!kvm_vcpu_apicv_active(vcpu))
6880 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6881 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6882 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6883 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6886 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
6888 struct vcpu_vmx *vmx = to_vmx(vcpu);
6890 pi_clear_on(&vmx->pi_desc);
6891 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6894 void vmx_do_interrupt_irqoff(unsigned long entry);
6895 void vmx_do_nmi_irqoff(void);
6897 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
6900 * Save xfd_err to guest_fpu before interrupt is enabled, so the
6901 * MSR value is not clobbered by the host activity before the guest
6902 * has chance to consume it.
6904 * Do not blindly read xfd_err here, since this exception might
6905 * be caused by L1 interception on a platform which doesn't
6906 * support xfd at all.
6908 * Do it conditionally upon guest_fpu::xfd. xfd_err matters
6909 * only when xfd contains a non-zero value.
6911 * Queuing exception is done in vmx_handle_exit. See comment there.
6913 if (vcpu->arch.guest_fpu.fpstate->xfd)
6914 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
6917 static void handle_exception_irqoff(struct vcpu_vmx *vmx)
6919 u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
6921 /* if exit due to PF check for async PF */
6922 if (is_page_fault(intr_info))
6923 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6924 /* if exit due to NM, handle before interrupts are enabled */
6925 else if (is_nm_fault(intr_info))
6926 handle_nm_fault_irqoff(&vmx->vcpu);
6927 /* Handle machine checks before interrupts are enabled */
6928 else if (is_machine_check(intr_info))
6929 kvm_machine_check();
6932 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
6934 u32 intr_info = vmx_get_intr_info(vcpu);
6935 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
6936 gate_desc *desc = (gate_desc *)host_idt_base + vector;
6938 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
6939 "unexpected VM-Exit interrupt info: 0x%x", intr_info))
6942 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
6943 vmx_do_interrupt_irqoff(gate_offset(desc));
6944 kvm_after_interrupt(vcpu);
6946 vcpu->arch.at_instruction_boundary = true;
6949 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6951 struct vcpu_vmx *vmx = to_vmx(vcpu);
6953 if (vmx->emulation_required)
6956 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6957 handle_external_interrupt_irqoff(vcpu);
6958 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
6959 handle_exception_irqoff(vmx);
6963 * The kvm parameter can be NULL (module initialization, or invocation before
6964 * VM creation). Be sure to check the kvm parameter before using it.
6966 static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
6969 case MSR_IA32_SMBASE:
6970 if (!IS_ENABLED(CONFIG_KVM_SMM))
6973 * We cannot do SMM unless we can run the guest in big
6976 return enable_unrestricted_guest || emulate_invalid_guest_state;
6977 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
6979 case MSR_AMD64_VIRT_SPEC_CTRL:
6980 case MSR_AMD64_TSC_RATIO:
6981 /* This is AMD only. */
6988 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6993 bool idtv_info_valid;
6995 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6998 if (vmx->loaded_vmcs->nmi_known_unmasked)
7001 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7002 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7003 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7005 * SDM 3: 27.7.1.2 (September 2008)
7006 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7007 * a guest IRET fault.
7008 * SDM 3: 23.2.2 (September 2008)
7009 * Bit 12 is undefined in any of the following cases:
7010 * If the VM exit sets the valid bit in the IDT-vectoring
7011 * information field.
7012 * If the VM exit is due to a double fault.
7014 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7015 vector != DF_VECTOR && !idtv_info_valid)
7016 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7017 GUEST_INTR_STATE_NMI);
7019 vmx->loaded_vmcs->nmi_known_unmasked =
7020 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7021 & GUEST_INTR_STATE_NMI);
7022 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7023 vmx->loaded_vmcs->vnmi_blocked_time +=
7024 ktime_to_ns(ktime_sub(ktime_get(),
7025 vmx->loaded_vmcs->entry_time));
7028 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7029 u32 idt_vectoring_info,
7030 int instr_len_field,
7031 int error_code_field)
7035 bool idtv_info_valid;
7037 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7039 vcpu->arch.nmi_injected = false;
7040 kvm_clear_exception_queue(vcpu);
7041 kvm_clear_interrupt_queue(vcpu);
7043 if (!idtv_info_valid)
7046 kvm_make_request(KVM_REQ_EVENT, vcpu);
7048 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7049 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7052 case INTR_TYPE_NMI_INTR:
7053 vcpu->arch.nmi_injected = true;
7055 * SDM 3: 27.7.1.2 (September 2008)
7056 * Clear bit "block by NMI" before VM entry if a NMI
7059 vmx_set_nmi_mask(vcpu, false);
7061 case INTR_TYPE_SOFT_EXCEPTION:
7062 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7064 case INTR_TYPE_HARD_EXCEPTION:
7065 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7066 u32 err = vmcs_read32(error_code_field);
7067 kvm_requeue_exception_e(vcpu, vector, err);
7069 kvm_requeue_exception(vcpu, vector);
7071 case INTR_TYPE_SOFT_INTR:
7072 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7074 case INTR_TYPE_EXT_INTR:
7075 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7082 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7084 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7085 VM_EXIT_INSTRUCTION_LEN,
7086 IDT_VECTORING_ERROR_CODE);
7089 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7091 __vmx_complete_interrupts(vcpu,
7092 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7093 VM_ENTRY_INSTRUCTION_LEN,
7094 VM_ENTRY_EXCEPTION_ERROR_CODE);
7096 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7099 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7102 struct perf_guest_switch_msr *msrs;
7103 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7105 pmu->host_cross_mapped_mask = 0;
7106 if (pmu->pebs_enable & pmu->global_ctrl)
7107 intel_pmu_cross_mapped_check(pmu);
7109 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7110 msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
7114 for (i = 0; i < nr_msrs; i++)
7115 if (msrs[i].host == msrs[i].guest)
7116 clear_atomic_switch_msr(vmx, msrs[i].msr);
7118 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7119 msrs[i].host, false);
7122 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
7124 struct vcpu_vmx *vmx = to_vmx(vcpu);
7128 if (vmx->req_immediate_exit) {
7129 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
7130 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7131 } else if (vmx->hv_deadline_tsc != -1) {
7133 if (vmx->hv_deadline_tsc > tscl)
7134 /* set_hv_timer ensures the delta fits in 32-bits */
7135 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7136 cpu_preemption_timer_multi);
7140 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
7141 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7142 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7143 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
7144 vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7148 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7150 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7151 vmx->loaded_vmcs->host_state.rsp = host_rsp;
7152 vmcs_writel(HOST_RSP, host_rsp);
7156 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7159 u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7161 if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7164 if (flags & VMX_RUN_SAVE_SPEC_CTRL)
7165 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7168 * If the guest/host SPEC_CTRL values differ, restore the host value.
7170 * For legacy IBRS, the IBRS bit always needs to be written after
7171 * transitioning from a less privileged predictor mode, regardless of
7172 * whether the guest/host values differ.
7174 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7175 vmx->spec_ctrl != hostval)
7176 native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7181 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
7183 switch (to_vmx(vcpu)->exit_reason.basic) {
7184 case EXIT_REASON_MSR_WRITE:
7185 return handle_fastpath_set_msr_irqoff(vcpu);
7186 case EXIT_REASON_PREEMPTION_TIMER:
7187 return handle_fastpath_preemption_timer(vcpu);
7189 return EXIT_FASTPATH_NONE;
7193 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7196 struct vcpu_vmx *vmx = to_vmx(vcpu);
7198 guest_state_enter_irqoff();
7200 /* L1D Flush includes CPU buffer clear to mitigate MDS */
7201 if (static_branch_unlikely(&vmx_l1d_should_flush))
7202 vmx_l1d_flush(vcpu);
7203 else if (static_branch_unlikely(&mds_user_clear))
7204 mds_clear_cpu_buffers();
7205 else if (static_branch_unlikely(&mmio_stale_data_clear) &&
7206 kvm_arch_has_assigned_device(vcpu->kvm))
7207 mds_clear_cpu_buffers();
7209 vmx_disable_fb_clear(vmx);
7211 if (vcpu->arch.cr2 != native_read_cr2())
7212 native_write_cr2(vcpu->arch.cr2);
7214 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7217 vcpu->arch.cr2 = native_read_cr2();
7219 vmx_enable_fb_clear(vmx);
7221 if (unlikely(vmx->fail))
7222 vmx->exit_reason.full = 0xdead;
7224 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7226 if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
7227 is_nmi(vmx_get_intr_info(vcpu))) {
7228 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7229 vmx_do_nmi_irqoff();
7230 kvm_after_interrupt(vcpu);
7233 guest_state_exit_irqoff();
7236 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
7238 struct vcpu_vmx *vmx = to_vmx(vcpu);
7239 unsigned long cr3, cr4;
7241 /* Record the guest's net vcpu time for enforced NMI injections. */
7242 if (unlikely(!enable_vnmi &&
7243 vmx->loaded_vmcs->soft_vnmi_blocked))
7244 vmx->loaded_vmcs->entry_time = ktime_get();
7247 * Don't enter VMX if guest state is invalid, let the exit handler
7248 * start emulation until we arrive back to a valid state. Synthesize a
7249 * consistency check VM-Exit due to invalid guest state and bail.
7251 if (unlikely(vmx->emulation_required)) {
7254 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
7255 vmx->exit_reason.failed_vmentry = 1;
7256 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7257 vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
7258 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7259 vmx->exit_intr_info = 0;
7260 return EXIT_FASTPATH_NONE;
7263 trace_kvm_entry(vcpu);
7265 if (vmx->ple_window_dirty) {
7266 vmx->ple_window_dirty = false;
7267 vmcs_write32(PLE_WINDOW, vmx->ple_window);
7271 * We did this in prepare_switch_to_guest, because it needs to
7272 * be within srcu_read_lock.
7274 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7276 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7277 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7278 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7279 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7280 vcpu->arch.regs_dirty = 0;
7283 * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
7284 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7285 * it switches back to the current->mm, which can occur in KVM context
7286 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7287 * toggles a static key while handling a VM-Exit.
7289 cr3 = __get_current_cr3_fast();
7290 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7291 vmcs_writel(HOST_CR3, cr3);
7292 vmx->loaded_vmcs->host_state.cr3 = cr3;
7295 cr4 = cr4_read_shadow();
7296 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7297 vmcs_writel(HOST_CR4, cr4);
7298 vmx->loaded_vmcs->host_state.cr4 = cr4;
7301 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
7302 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7303 set_debugreg(vcpu->arch.dr6, 6);
7305 /* When single-stepping over STI and MOV SS, we must clear the
7306 * corresponding interruptibility bits in the guest state. Otherwise
7307 * vmentry fails as it then expects bit 14 (BS) in pending debug
7308 * exceptions being set, but that's not correct for the guest debugging
7310 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7311 vmx_set_interrupt_shadow(vcpu, 0);
7313 kvm_load_guest_xsave_state(vcpu);
7315 pt_guest_enter(vmx);
7317 atomic_switch_perf_msrs(vmx);
7318 if (intel_pmu_lbr_is_enabled(vcpu))
7319 vmx_passthrough_lbr_msrs(vcpu);
7321 if (enable_preemption_timer)
7322 vmx_update_hv_timer(vcpu);
7324 kvm_wait_lapic_expire(vcpu);
7326 /* The actual VMENTER/EXIT is in the .noinstr.text section. */
7327 vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7329 /* All fields are clean at this point */
7330 if (kvm_is_using_evmcs()) {
7331 current_evmcs->hv_clean_fields |=
7332 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7334 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7337 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7338 if (vmx->host_debugctlmsr)
7339 update_debugctlmsr(vmx->host_debugctlmsr);
7341 #ifndef CONFIG_X86_64
7343 * The sysexit path does not restore ds/es, so we must set them to
7344 * a reasonable value ourselves.
7346 * We can't defer this to vmx_prepare_switch_to_host() since that
7347 * function may be executed in interrupt context, which saves and
7348 * restore segments around it, nullifying its effect.
7350 loadsegment(ds, __USER_DS);
7351 loadsegment(es, __USER_DS);
7354 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7358 kvm_load_host_xsave_state(vcpu);
7360 if (is_guest_mode(vcpu)) {
7362 * Track VMLAUNCH/VMRESUME that have made past guest state
7365 if (vmx->nested.nested_run_pending &&
7366 !vmx->exit_reason.failed_vmentry)
7367 ++vcpu->stat.nested_run;
7369 vmx->nested.nested_run_pending = 0;
7372 vmx->idt_vectoring_info = 0;
7374 if (unlikely(vmx->fail))
7375 return EXIT_FASTPATH_NONE;
7377 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
7378 kvm_machine_check();
7380 if (likely(!vmx->exit_reason.failed_vmentry))
7381 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7383 trace_kvm_exit(vcpu, KVM_ISA_VMX);
7385 if (unlikely(vmx->exit_reason.failed_vmentry))
7386 return EXIT_FASTPATH_NONE;
7388 vmx->loaded_vmcs->launched = 1;
7390 vmx_recover_nmi_blocking(vmx);
7391 vmx_complete_interrupts(vmx);
7393 if (is_guest_mode(vcpu))
7394 return EXIT_FASTPATH_NONE;
7396 return vmx_exit_handlers_fastpath(vcpu);
7399 static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7401 struct vcpu_vmx *vmx = to_vmx(vcpu);
7404 vmx_destroy_pml_buffer(vmx);
7405 free_vpid(vmx->vpid);
7406 nested_vmx_free_vcpu(vcpu);
7407 free_loaded_vmcs(vmx->loaded_vmcs);
7410 static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7412 struct vmx_uret_msr *tsx_ctrl;
7413 struct vcpu_vmx *vmx;
7416 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7419 INIT_LIST_HEAD(&vmx->pi_wakeup_list);
7423 vmx->vpid = allocate_vpid();
7426 * If PML is turned on, failure on enabling PML just results in failure
7427 * of creating the vcpu, therefore we can simplify PML logic (by
7428 * avoiding dealing with cases, such as enabling PML partially on vcpus
7429 * for the guest), etc.
7432 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7437 for (i = 0; i < kvm_nr_uret_msrs; ++i)
7438 vmx->guest_uret_msrs[i].mask = -1ull;
7439 if (boot_cpu_has(X86_FEATURE_RTM)) {
7441 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7442 * Keep the host value unchanged to avoid changing CPUID bits
7443 * under the host kernel's feet.
7445 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7447 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7450 err = alloc_loaded_vmcs(&vmx->vmcs01);
7455 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7456 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7457 * feature only for vmcs01, KVM currently isn't equipped to realize any
7458 * performance benefits from enabling it for vmcs02.
7460 if (kvm_is_using_evmcs() &&
7461 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7462 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7464 evmcs->hv_enlightenments_control.msr_bitmap = 1;
7467 /* The MSR bitmap starts with all ones */
7468 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7469 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7471 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7472 #ifdef CONFIG_X86_64
7473 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7474 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7475 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7477 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7478 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7479 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7480 if (kvm_cstate_in_guest(vcpu->kvm)) {
7481 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7482 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7483 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7484 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7487 vmx->loaded_vmcs = &vmx->vmcs01;
7489 if (cpu_need_virtualize_apic_accesses(vcpu)) {
7490 err = kvm_alloc_apic_access_page(vcpu->kvm);
7495 if (enable_ept && !enable_unrestricted_guest) {
7496 err = init_rmode_identity_map(vcpu->kvm);
7501 if (vmx_can_use_ipiv(vcpu))
7502 WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7503 __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
7508 free_loaded_vmcs(vmx->loaded_vmcs);
7510 vmx_destroy_pml_buffer(vmx);
7512 free_vpid(vmx->vpid);
7516 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7517 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7519 static int vmx_vm_init(struct kvm *kvm)
7522 kvm->arch.pause_in_guest = true;
7524 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7525 switch (l1tf_mitigation) {
7526 case L1TF_MITIGATION_OFF:
7527 case L1TF_MITIGATION_FLUSH_NOWARN:
7528 /* 'I explicitly don't care' is set */
7530 case L1TF_MITIGATION_FLUSH:
7531 case L1TF_MITIGATION_FLUSH_NOSMT:
7532 case L1TF_MITIGATION_FULL:
7534 * Warn upon starting the first VM in a potentially
7535 * insecure environment.
7537 if (sched_smt_active())
7538 pr_warn_once(L1TF_MSG_SMT);
7539 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7540 pr_warn_once(L1TF_MSG_L1D);
7542 case L1TF_MITIGATION_FULL_FORCE:
7543 /* Flush is enforced */
7550 static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7554 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
7555 * memory aliases with conflicting memory types and sometimes MCEs.
7556 * We have to be careful as to what are honored and when.
7558 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to
7559 * UC. The effective memory type is UC or WC depending on guest PAT.
7560 * This was historically the source of MCEs and we want to be
7563 * When there is no need to deal with noncoherent DMA (e.g., no VT-d
7564 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The
7565 * EPT memory type is set to WB. The effective memory type is forced
7568 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The
7569 * EPT memory type is used to emulate guest CD/MTRR.
7573 return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7575 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
7576 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7578 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
7579 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
7580 cache = MTRR_TYPE_WRBACK;
7582 cache = MTRR_TYPE_UNCACHABLE;
7584 return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7587 return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
7590 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7593 * These bits in the secondary execution controls field
7594 * are dynamic, the others are mostly based on the hypervisor
7595 * architecture and the guest's CPUID. Do not touch the
7599 SECONDARY_EXEC_SHADOW_VMCS |
7600 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7601 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7602 SECONDARY_EXEC_DESC;
7604 u32 cur_ctl = secondary_exec_controls_get(vmx);
7606 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7610 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7611 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7613 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7615 struct vcpu_vmx *vmx = to_vmx(vcpu);
7616 struct kvm_cpuid_entry2 *entry;
7618 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7619 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7621 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
7622 if (entry && (entry->_reg & (_cpuid_mask))) \
7623 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7626 entry = kvm_find_cpuid_entry(vcpu, 0x1);
7627 cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME));
7628 cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME));
7629 cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC));
7630 cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE));
7631 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE));
7632 cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE));
7633 cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE));
7634 cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE));
7635 cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR));
7636 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7637 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX));
7638 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX));
7639 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID));
7640 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE));
7642 entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7643 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE));
7644 cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP));
7645 cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP));
7646 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU));
7647 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
7648 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
7650 #undef cr4_fixed1_update
7653 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7655 struct vcpu_vmx *vmx = to_vmx(vcpu);
7656 struct kvm_cpuid_entry2 *best = NULL;
7659 for (i = 0; i < PT_CPUID_LEAVES; i++) {
7660 best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7663 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7664 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7665 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7666 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7669 /* Get the number of configurable Address Ranges for filtering */
7670 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7671 PT_CAP_num_address_ranges);
7673 /* Initialize and clear the no dependency bits */
7674 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7675 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7676 RTIT_CTL_BRANCH_EN);
7679 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7680 * will inject an #GP
7682 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7683 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7686 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7687 * PSBFreq can be set
7689 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7690 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7691 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7694 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7696 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7697 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7698 RTIT_CTL_MTC_RANGE);
7700 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7701 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7702 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7705 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7706 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7707 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7709 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7710 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7711 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7713 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7714 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7715 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7717 /* unmask address range configure area */
7718 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7719 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7722 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7724 struct vcpu_vmx *vmx = to_vmx(vcpu);
7726 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */
7727 vcpu->arch.xsaves_enabled = false;
7729 vmx_setup_uret_msrs(vmx);
7731 if (cpu_has_secondary_exec_ctrls())
7732 vmcs_set_secondary_exec_control(vmx,
7733 vmx_secondary_exec_control(vmx));
7735 if (nested_vmx_allowed(vcpu))
7736 vmx->msr_ia32_feature_control_valid_bits |=
7737 FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7738 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7740 vmx->msr_ia32_feature_control_valid_bits &=
7741 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7742 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7744 if (nested_vmx_allowed(vcpu))
7745 nested_vmx_cr_fixed1_bits_update(vcpu);
7747 if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7748 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
7749 update_intel_pt_cfg(vcpu);
7751 if (boot_cpu_has(X86_FEATURE_RTM)) {
7752 struct vmx_uret_msr *msr;
7753 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7755 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
7756 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7760 if (kvm_cpu_cap_has(X86_FEATURE_XFD))
7761 vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
7762 !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
7765 set_cr4_guest_host_mask(vmx);
7767 vmx_write_encls_bitmap(vcpu, NULL);
7768 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
7769 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7771 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7773 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
7774 vmx->msr_ia32_feature_control_valid_bits |=
7775 FEAT_CTL_SGX_LC_ENABLED;
7777 vmx->msr_ia32_feature_control_valid_bits &=
7778 ~FEAT_CTL_SGX_LC_ENABLED;
7780 /* Refresh #PF interception to account for MAXPHYADDR changes. */
7781 vmx_update_exception_bitmap(vcpu);
7784 static u64 vmx_get_perf_capabilities(void)
7786 u64 perf_cap = PMU_CAP_FW_WRITES;
7787 struct x86_pmu_lbr lbr;
7788 u64 host_perf_cap = 0;
7793 if (boot_cpu_has(X86_FEATURE_PDCM))
7794 rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
7796 x86_perf_get_lbr(&lbr);
7798 perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
7800 if (vmx_pebs_supported()) {
7801 perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
7802 if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
7803 perf_cap &= ~PERF_CAP_PEBS_BASELINE;
7809 static __init void vmx_set_cpu_caps(void)
7815 kvm_cpu_cap_set(X86_FEATURE_VMX);
7818 if (kvm_mpx_supported())
7819 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
7820 if (!cpu_has_vmx_invpcid())
7821 kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
7822 if (vmx_pt_mode_is_host_guest())
7823 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
7824 if (vmx_pebs_supported()) {
7825 kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
7826 kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
7830 kvm_cpu_cap_clear(X86_FEATURE_PDCM);
7831 kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
7834 kvm_cpu_cap_clear(X86_FEATURE_SGX);
7835 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
7836 kvm_cpu_cap_clear(X86_FEATURE_SGX1);
7837 kvm_cpu_cap_clear(X86_FEATURE_SGX2);
7840 if (vmx_umip_emulated())
7841 kvm_cpu_cap_set(X86_FEATURE_UMIP);
7844 kvm_caps.supported_xss = 0;
7845 if (!cpu_has_vmx_xsaves())
7846 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
7848 /* CPUID 0x80000001 and 0x7 (RDPID) */
7849 if (!cpu_has_vmx_rdtscp()) {
7850 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
7851 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
7854 if (cpu_has_vmx_waitpkg())
7855 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
7858 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
7860 to_vmx(vcpu)->req_immediate_exit = true;
7863 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
7864 struct x86_instruction_info *info)
7866 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7867 unsigned short port;
7871 if (info->intercept == x86_intercept_in ||
7872 info->intercept == x86_intercept_ins) {
7873 port = info->src_val;
7874 size = info->dst_bytes;
7876 port = info->dst_val;
7877 size = info->src_bytes;
7881 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
7882 * VM-exits depend on the 'unconditional IO exiting' VM-execution
7885 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
7887 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
7888 intercept = nested_cpu_has(vmcs12,
7889 CPU_BASED_UNCOND_IO_EXITING);
7891 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
7893 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7894 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
7897 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
7898 struct x86_instruction_info *info,
7899 enum x86_intercept_stage stage,
7900 struct x86_exception *exception)
7902 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7904 switch (info->intercept) {
7906 * RDPID causes #UD if disabled through secondary execution controls.
7907 * Because it is marked as EmulateOnUD, we need to intercept it here.
7908 * Note, RDPID is hidden behind ENABLE_RDTSCP.
7910 case x86_intercept_rdpid:
7911 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
7912 exception->vector = UD_VECTOR;
7913 exception->error_code_valid = false;
7914 return X86EMUL_PROPAGATE_FAULT;
7918 case x86_intercept_in:
7919 case x86_intercept_ins:
7920 case x86_intercept_out:
7921 case x86_intercept_outs:
7922 return vmx_check_intercept_io(vcpu, info);
7924 case x86_intercept_lgdt:
7925 case x86_intercept_lidt:
7926 case x86_intercept_lldt:
7927 case x86_intercept_ltr:
7928 case x86_intercept_sgdt:
7929 case x86_intercept_sidt:
7930 case x86_intercept_sldt:
7931 case x86_intercept_str:
7932 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
7933 return X86EMUL_CONTINUE;
7935 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7938 /* TODO: check more intercepts... */
7943 return X86EMUL_UNHANDLEABLE;
7946 #ifdef CONFIG_X86_64
7947 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
7948 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
7949 u64 divisor, u64 *result)
7951 u64 low = a << shift, high = a >> (64 - shift);
7953 /* To avoid the overflow on divq */
7954 if (high >= divisor)
7957 /* Low hold the result, high hold rem which is discarded */
7958 asm("divq %2\n\t" : "=a" (low), "=d" (high) :
7959 "rm" (divisor), "0" (low), "1" (high));
7965 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
7968 struct vcpu_vmx *vmx;
7969 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
7970 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
7974 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
7975 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
7976 lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
7977 ktimer->timer_advance_ns);
7979 if (delta_tsc > lapic_timer_advance_cycles)
7980 delta_tsc -= lapic_timer_advance_cycles;
7984 /* Convert to host delta tsc if tsc scaling is enabled */
7985 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
7986 delta_tsc && u64_shl_div_u64(delta_tsc,
7987 kvm_caps.tsc_scaling_ratio_frac_bits,
7988 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
7992 * If the delta tsc can't fit in the 32 bit after the multi shift,
7993 * we can't use the preemption timer.
7994 * It's possible that it fits on later vmentries, but checking
7995 * on every vmentry is costly so we just use an hrtimer.
7997 if (delta_tsc >> (cpu_preemption_timer_multi + 32))
8000 vmx->hv_deadline_tsc = tscl + delta_tsc;
8001 *expired = !delta_tsc;
8005 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8007 to_vmx(vcpu)->hv_deadline_tsc = -1;
8011 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
8013 if (!kvm_pause_in_guest(vcpu->kvm))
8014 shrink_ple_window(vcpu);
8017 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8019 struct vcpu_vmx *vmx = to_vmx(vcpu);
8021 if (WARN_ON_ONCE(!enable_pml))
8024 if (is_guest_mode(vcpu)) {
8025 vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8030 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8031 * code, but in that case another update request will be made and so
8032 * the guest will never run with a stale PML value.
8034 if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8035 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8037 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8040 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
8042 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8043 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8044 FEAT_CTL_LMCE_ENABLED;
8046 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8047 ~FEAT_CTL_LMCE_ENABLED;
8050 #ifdef CONFIG_KVM_SMM
8051 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8053 /* we need a nested vmexit to enter SMM, postpone if run is pending */
8054 if (to_vmx(vcpu)->nested.nested_run_pending)
8056 return !is_smm(vcpu);
8059 static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8061 struct vcpu_vmx *vmx = to_vmx(vcpu);
8064 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8065 * SMI and RSM. Using the common VM-Exit + VM-Enter routines is wrong
8066 * SMI and RSM only modify state that is saved and restored via SMRAM.
8067 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8068 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8070 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8071 if (vmx->nested.smm.guest_mode)
8072 nested_vmx_vmexit(vcpu, -1, 0, 0);
8074 vmx->nested.smm.vmxon = vmx->nested.vmxon;
8075 vmx->nested.vmxon = false;
8076 vmx_clear_hlt(vcpu);
8080 static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8082 struct vcpu_vmx *vmx = to_vmx(vcpu);
8085 if (vmx->nested.smm.vmxon) {
8086 vmx->nested.vmxon = true;
8087 vmx->nested.smm.vmxon = false;
8090 if (vmx->nested.smm.guest_mode) {
8091 ret = nested_vmx_enter_non_root_mode(vcpu, false);
8095 vmx->nested.nested_run_pending = 1;
8096 vmx->nested.smm.guest_mode = false;
8101 static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8103 /* RSM will cause a vmexit anyway. */
8107 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8109 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8112 static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8114 if (is_guest_mode(vcpu)) {
8115 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8117 if (hrtimer_try_to_cancel(timer) == 1)
8118 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8122 static void vmx_hardware_unsetup(void)
8124 kvm_set_posted_intr_wakeup_handler(NULL);
8127 nested_vmx_hardware_unsetup();
8132 #define VMX_REQUIRED_APICV_INHIBITS \
8134 BIT(APICV_INHIBIT_REASON_DISABLE)| \
8135 BIT(APICV_INHIBIT_REASON_ABSENT) | \
8136 BIT(APICV_INHIBIT_REASON_HYPERV) | \
8137 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
8138 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
8139 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
8140 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) \
8143 static void vmx_vm_destroy(struct kvm *kvm)
8145 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
8147 free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
8150 static struct kvm_x86_ops vmx_x86_ops __initdata = {
8151 .name = KBUILD_MODNAME,
8153 .check_processor_compatibility = vmx_check_processor_compat,
8155 .hardware_unsetup = vmx_hardware_unsetup,
8157 .hardware_enable = vmx_hardware_enable,
8158 .hardware_disable = vmx_hardware_disable,
8159 .has_emulated_msr = vmx_has_emulated_msr,
8161 .vm_size = sizeof(struct kvm_vmx),
8162 .vm_init = vmx_vm_init,
8163 .vm_destroy = vmx_vm_destroy,
8165 .vcpu_precreate = vmx_vcpu_precreate,
8166 .vcpu_create = vmx_vcpu_create,
8167 .vcpu_free = vmx_vcpu_free,
8168 .vcpu_reset = vmx_vcpu_reset,
8170 .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
8171 .vcpu_load = vmx_vcpu_load,
8172 .vcpu_put = vmx_vcpu_put,
8174 .update_exception_bitmap = vmx_update_exception_bitmap,
8175 .get_msr_feature = vmx_get_msr_feature,
8176 .get_msr = vmx_get_msr,
8177 .set_msr = vmx_set_msr,
8178 .get_segment_base = vmx_get_segment_base,
8179 .get_segment = vmx_get_segment,
8180 .set_segment = vmx_set_segment,
8181 .get_cpl = vmx_get_cpl,
8182 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
8183 .set_cr0 = vmx_set_cr0,
8184 .is_valid_cr4 = vmx_is_valid_cr4,
8185 .set_cr4 = vmx_set_cr4,
8186 .set_efer = vmx_set_efer,
8187 .get_idt = vmx_get_idt,
8188 .set_idt = vmx_set_idt,
8189 .get_gdt = vmx_get_gdt,
8190 .set_gdt = vmx_set_gdt,
8191 .set_dr7 = vmx_set_dr7,
8192 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
8193 .cache_reg = vmx_cache_reg,
8194 .get_rflags = vmx_get_rflags,
8195 .set_rflags = vmx_set_rflags,
8196 .get_if_flag = vmx_get_if_flag,
8198 .flush_tlb_all = vmx_flush_tlb_all,
8199 .flush_tlb_current = vmx_flush_tlb_current,
8200 .flush_tlb_gva = vmx_flush_tlb_gva,
8201 .flush_tlb_guest = vmx_flush_tlb_guest,
8203 .vcpu_pre_run = vmx_vcpu_pre_run,
8204 .vcpu_run = vmx_vcpu_run,
8205 .handle_exit = vmx_handle_exit,
8206 .skip_emulated_instruction = vmx_skip_emulated_instruction,
8207 .update_emulated_instruction = vmx_update_emulated_instruction,
8208 .set_interrupt_shadow = vmx_set_interrupt_shadow,
8209 .get_interrupt_shadow = vmx_get_interrupt_shadow,
8210 .patch_hypercall = vmx_patch_hypercall,
8211 .inject_irq = vmx_inject_irq,
8212 .inject_nmi = vmx_inject_nmi,
8213 .inject_exception = vmx_inject_exception,
8214 .cancel_injection = vmx_cancel_injection,
8215 .interrupt_allowed = vmx_interrupt_allowed,
8216 .nmi_allowed = vmx_nmi_allowed,
8217 .get_nmi_mask = vmx_get_nmi_mask,
8218 .set_nmi_mask = vmx_set_nmi_mask,
8219 .enable_nmi_window = vmx_enable_nmi_window,
8220 .enable_irq_window = vmx_enable_irq_window,
8221 .update_cr8_intercept = vmx_update_cr8_intercept,
8222 .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
8223 .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
8224 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
8225 .load_eoi_exitmap = vmx_load_eoi_exitmap,
8226 .apicv_post_state_restore = vmx_apicv_post_state_restore,
8227 .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
8228 .hwapic_irr_update = vmx_hwapic_irr_update,
8229 .hwapic_isr_update = vmx_hwapic_isr_update,
8230 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
8231 .sync_pir_to_irr = vmx_sync_pir_to_irr,
8232 .deliver_interrupt = vmx_deliver_interrupt,
8233 .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
8235 .set_tss_addr = vmx_set_tss_addr,
8236 .set_identity_map_addr = vmx_set_identity_map_addr,
8237 .get_mt_mask = vmx_get_mt_mask,
8239 .get_exit_info = vmx_get_exit_info,
8241 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
8243 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
8245 .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
8246 .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
8247 .write_tsc_offset = vmx_write_tsc_offset,
8248 .write_tsc_multiplier = vmx_write_tsc_multiplier,
8250 .load_mmu_pgd = vmx_load_mmu_pgd,
8252 .check_intercept = vmx_check_intercept,
8253 .handle_exit_irqoff = vmx_handle_exit_irqoff,
8255 .request_immediate_exit = vmx_request_immediate_exit,
8257 .sched_in = vmx_sched_in,
8259 .cpu_dirty_log_size = PML_ENTITY_NUM,
8260 .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
8262 .nested_ops = &vmx_nested_ops,
8264 .pi_update_irte = vmx_pi_update_irte,
8265 .pi_start_assignment = vmx_pi_start_assignment,
8267 #ifdef CONFIG_X86_64
8268 .set_hv_timer = vmx_set_hv_timer,
8269 .cancel_hv_timer = vmx_cancel_hv_timer,
8272 .setup_mce = vmx_setup_mce,
8274 #ifdef CONFIG_KVM_SMM
8275 .smi_allowed = vmx_smi_allowed,
8276 .enter_smm = vmx_enter_smm,
8277 .leave_smm = vmx_leave_smm,
8278 .enable_smi_window = vmx_enable_smi_window,
8281 .can_emulate_instruction = vmx_can_emulate_instruction,
8282 .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
8283 .migrate_timers = vmx_migrate_timers,
8285 .msr_filter_changed = vmx_msr_filter_changed,
8286 .complete_emulated_msr = kvm_complete_insn_gp,
8288 .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
8291 static unsigned int vmx_handle_intel_pt_intr(void)
8293 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8295 /* '0' on failure so that the !PT case can use a RET0 static call. */
8296 if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8299 kvm_make_request(KVM_REQ_PMI, vcpu);
8300 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8301 (unsigned long *)&vcpu->arch.pmu.global_status);
8305 static __init void vmx_setup_user_return_msrs(void)
8309 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8310 * will emulate SYSCALL in legacy mode if the vendor string in guest
8311 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8312 * support this emulation, MSR_STAR is included in the list for i386,
8313 * but is never loaded into hardware. MSR_CSTAR is also never loaded
8314 * into hardware and is here purely for emulation purposes.
8316 const u32 vmx_uret_msrs_list[] = {
8317 #ifdef CONFIG_X86_64
8318 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8320 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8325 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
8327 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8328 kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
8331 static void __init vmx_setup_me_spte_mask(void)
8336 * kvm_get_shadow_phys_bits() returns shadow_phys_bits. Use
8337 * the former to avoid exposing shadow_phys_bits.
8339 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8340 * shadow_phys_bits. On MKTME and/or TDX capable systems,
8341 * boot_cpu_data.x86_phys_bits holds the actual physical address
8342 * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR
8343 * reported by CPUID. Those bits between are KeyID bits.
8345 if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits())
8346 me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
8347 kvm_get_shadow_phys_bits() - 1);
8349 * Unlike SME, host kernel doesn't support setting up any
8350 * MKTME KeyID on Intel platforms. No memory encryption
8351 * bits should be included into the SPTE.
8353 kvm_mmu_set_me_spte_mask(0, me_mask);
8356 static struct kvm_x86_init_ops vmx_init_ops __initdata;
8358 static __init int hardware_setup(void)
8360 unsigned long host_bndcfgs;
8365 host_idt_base = dt.address;
8367 vmx_setup_user_return_msrs();
8369 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
8372 if (cpu_has_perf_global_ctrl_bug())
8373 pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
8374 "does not work properly. Using workaround\n");
8376 if (boot_cpu_has(X86_FEATURE_NX))
8377 kvm_enable_efer_bits(EFER_NX);
8379 if (boot_cpu_has(X86_FEATURE_MPX)) {
8380 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
8381 WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
8384 if (!cpu_has_vmx_mpx())
8385 kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
8386 XFEATURE_MASK_BNDCSR);
8388 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8389 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8392 if (!cpu_has_vmx_ept() ||
8393 !cpu_has_vmx_ept_4levels() ||
8394 !cpu_has_vmx_ept_mt_wb() ||
8395 !cpu_has_vmx_invept_global())
8398 /* NX support is required for shadow paging. */
8399 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8400 pr_err_ratelimited("NX (Execute Disable) not supported\n");
8404 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
8405 enable_ept_ad_bits = 0;
8407 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
8408 enable_unrestricted_guest = 0;
8410 if (!cpu_has_vmx_flexpriority())
8411 flexpriority_enabled = 0;
8413 if (!cpu_has_virtual_nmis())
8416 #ifdef CONFIG_X86_SGX_KVM
8417 if (!cpu_has_vmx_encls_vmexit())
8422 * set_apic_access_page_addr() is used to reload apic access
8423 * page upon invalidation. No need to do anything if not
8424 * using the APIC_ACCESS_ADDR VMCS field.
8426 if (!flexpriority_enabled)
8427 vmx_x86_ops.set_apic_access_page_addr = NULL;
8429 if (!cpu_has_vmx_tpr_shadow())
8430 vmx_x86_ops.update_cr8_intercept = NULL;
8432 #if IS_ENABLED(CONFIG_HYPERV)
8433 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8435 vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
8436 vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
8440 if (!cpu_has_vmx_ple()) {
8443 ple_window_grow = 0;
8445 ple_window_shrink = 0;
8448 if (!cpu_has_vmx_apicv())
8451 vmx_x86_ops.sync_pir_to_irr = NULL;
8453 if (!enable_apicv || !cpu_has_vmx_ipiv())
8454 enable_ipiv = false;
8456 if (cpu_has_vmx_tsc_scaling())
8457 kvm_caps.has_tsc_control = true;
8459 kvm_caps.max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
8460 kvm_caps.tsc_scaling_ratio_frac_bits = 48;
8461 kvm_caps.has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
8462 kvm_caps.has_notify_vmexit = cpu_has_notify_vmexit();
8464 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
8467 kvm_mmu_set_ept_masks(enable_ept_ad_bits,
8468 cpu_has_vmx_ept_execute_only());
8471 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8472 * bits to shadow_zero_check.
8474 vmx_setup_me_spte_mask();
8476 kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
8477 ept_caps_to_lpage_level(vmx_capability.ept));
8480 * Only enable PML when hardware supports PML feature, and both EPT
8481 * and EPT A/D bit features are enabled -- PML depends on them to work.
8483 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
8487 vmx_x86_ops.cpu_dirty_log_size = 0;
8489 if (!cpu_has_vmx_preemption_timer())
8490 enable_preemption_timer = false;
8492 if (enable_preemption_timer) {
8493 u64 use_timer_freq = 5000ULL * 1000 * 1000;
8495 cpu_preemption_timer_multi =
8496 vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
8499 use_timer_freq = (u64)tsc_khz * 1000;
8500 use_timer_freq >>= cpu_preemption_timer_multi;
8503 * KVM "disables" the preemption timer by setting it to its max
8504 * value. Don't use the timer if it might cause spurious exits
8505 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8507 if (use_timer_freq > 0xffffffffu / 10)
8508 enable_preemption_timer = false;
8511 if (!enable_preemption_timer) {
8512 vmx_x86_ops.set_hv_timer = NULL;
8513 vmx_x86_ops.cancel_hv_timer = NULL;
8514 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
8517 kvm_caps.supported_mce_cap |= MCG_LMCE_P;
8518 kvm_caps.supported_mce_cap |= MCG_CMCI_P;
8520 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8522 if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
8523 pt_mode = PT_MODE_SYSTEM;
8524 if (pt_mode == PT_MODE_HOST_GUEST)
8525 vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
8527 vmx_init_ops.handle_intel_pt_intr = NULL;
8529 setup_default_sgx_lepubkeyhash();
8532 nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
8534 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
8541 r = alloc_kvm_area();
8543 nested_vmx_hardware_unsetup();
8545 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8550 static struct kvm_x86_init_ops vmx_init_ops __initdata = {
8551 .hardware_setup = hardware_setup,
8552 .handle_intel_pt_intr = NULL,
8554 .runtime_ops = &vmx_x86_ops,
8555 .pmu_ops = &intel_pmu_ops,
8558 static void vmx_cleanup_l1d_flush(void)
8560 if (vmx_l1d_flush_pages) {
8561 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
8562 vmx_l1d_flush_pages = NULL;
8564 /* Restore state so sysfs ignores VMX */
8565 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8568 static void __vmx_exit(void)
8570 allow_smaller_maxphyaddr = false;
8572 #ifdef CONFIG_KEXEC_CORE
8573 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
8576 vmx_cleanup_l1d_flush();
8579 static void vmx_exit(void)
8582 kvm_x86_vendor_exit();
8586 module_exit(vmx_exit);
8588 static int __init vmx_init(void)
8592 if (!kvm_is_vmx_supported())
8596 * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
8597 * to unwind if a later step fails.
8601 r = kvm_x86_vendor_init(&vmx_init_ops);
8606 * Must be called after common x86 init so enable_ept is properly set
8607 * up. Hand the parameter mitigation value in which was stored in
8608 * the pre module init parser. If no parameter was given, it will
8609 * contain 'auto' which will be turned into the default 'cond'
8612 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8616 vmx_setup_fb_clear_ctrl();
8618 for_each_possible_cpu(cpu) {
8619 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8624 #ifdef CONFIG_KEXEC_CORE
8625 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
8626 crash_vmclear_local_loaded_vmcss);
8628 vmx_check_vmcs12_offsets();
8631 * Shadow paging doesn't have a (further) performance penalty
8632 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8636 allow_smaller_maxphyaddr = true;
8639 * Common KVM initialization _must_ come last, after this, /dev/kvm is
8640 * exposed to userspace!
8642 r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
8652 kvm_x86_vendor_exit();
8655 module_init(vmx_init);