1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
14 extern const u32 vmx_msr_index[];
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
23 #define NR_AUTOLOAD_MSRS 8
27 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
30 struct shared_msr_entry {
36 enum segment_cache_field {
45 /* Posted-Interrupt Descriptor */
47 u32 pir[8]; /* Posted interrupt requested */
50 /* bit 256 - Outstanding Notification */
52 /* bit 257 - Suppress Notification */
54 /* bit 271:258 - Reserved */
56 /* bit 279:272 - Notification Vector */
58 /* bit 287:280 - Reserved */
60 /* bit 319:288 - Notification Destination */
68 #define RTIT_ADDR_RANGE 4
76 u64 addr_a[RTIT_ADDR_RANGE];
77 u64 addr_b[RTIT_ADDR_RANGE];
83 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
89 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
90 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
93 /* Has the level1 guest done vmxon? */
98 /* The guest-physical address of the current VMCS L1 keeps for L2 */
101 * Cache of the guest's VMCS, existing outside of guest memory.
102 * Loaded from guest memory during VMPTRLD. Flushed to guest
103 * memory during VMCLEAR and VMPTRLD.
105 struct vmcs12 *cached_vmcs12;
107 * Cache of the guest's shadow VMCS, existing outside of guest
108 * memory. Loaded from guest memory during VM entry. Flushed
109 * to guest memory during VM exit.
111 struct vmcs12 *cached_shadow_vmcs12;
113 * Indicates if the shadow vmcs or enlightened vmcs must be updated
114 * with the data held by struct vmcs12.
116 bool need_vmcs12_sync;
120 * vmcs02 has been initialized, i.e. state that is constant for
121 * vmcs02 has been written to the backing VMCS. Initialization
122 * is delayed until L1 actually attempts to run a nested VM.
124 bool vmcs02_initialized;
126 bool change_vmcs01_virtual_apic_mode;
129 * Enlightened VMCS has been enabled. It does not mean that L1 has to
130 * use it. However, VMX features available to L1 will be limited based
131 * on what the enlightened VMCS supports.
133 bool enlightened_vmcs_enabled;
135 /* L2 must run next, and mustn't decide to exit to L1. */
136 bool nested_run_pending;
138 struct loaded_vmcs vmcs02;
141 * Guest pages referred to in the vmcs02 with host-physical
142 * pointers, so we must keep them pinned while L2 runs.
144 struct page *apic_access_page;
145 struct page *virtual_apic_page;
146 struct page *pi_desc_page;
147 struct pi_desc *pi_desc;
151 struct hrtimer preemption_timer;
152 bool preemption_timer_expired;
154 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
156 u64 vmcs01_guest_bndcfgs;
161 struct nested_vmx_msrs msrs;
163 /* SMM related state */
165 /* in VMX operation on SMM entry? */
167 /* in guest mode on SMM entry? */
171 gpa_t hv_evmcs_vmptr;
172 struct page *hv_evmcs_page;
173 struct hv_enlightened_vmcs *hv_evmcs;
177 struct kvm_vcpu vcpu;
181 u32 idt_vectoring_info;
183 struct shared_msr_entry *guest_msrs;
186 bool guest_msrs_dirty;
187 unsigned long host_idt_base;
189 u64 msr_host_kernel_gs_base;
190 u64 msr_guest_kernel_gs_base;
195 u32 vm_entry_controls_shadow;
196 u32 vm_exit_controls_shadow;
197 u32 secondary_exec_control;
200 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
201 * non-nested (L1) guest, it always points to vmcs01. For a nested
202 * guest (L2), it points to a different VMCS. loaded_cpu_state points
203 * to the VMCS whose state is loaded into the CPU registers that only
204 * need to be switched when transitioning to/from the kernel; a NULL
205 * value indicates that host state is loaded.
207 struct loaded_vmcs vmcs01;
208 struct loaded_vmcs *loaded_vmcs;
209 struct loaded_vmcs *loaded_cpu_state;
211 struct msr_autoload {
212 struct vmx_msrs guest;
213 struct vmx_msrs host;
219 struct kvm_segment segs[8];
222 u32 bitmask; /* 4 bits per segment (1 bit per field) */
223 struct kvm_save_segment {
231 bool emulation_required;
235 /* Posted interrupt descriptor */
236 struct pi_desc pi_desc;
238 /* Support for a guest hypervisor (nested VMX) */
239 struct nested_vmx nested;
241 /* Dynamic PLE window. */
243 bool ple_window_dirty;
245 bool req_immediate_exit;
247 /* Support for PML */
248 #define PML_ENTITY_NUM 512
251 /* apic deadline value in host tsc */
254 u64 current_tsc_ratio;
258 unsigned long host_debugctlmsr;
261 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
262 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
263 * in msr_ia32_feature_control_valid_bits.
265 u64 msr_ia32_feature_control;
266 u64 msr_ia32_feature_control_valid_bits;
269 struct pt_desc pt_desc;
272 enum ept_pointers_status {
273 EPT_POINTERS_CHECK = 0,
274 EPT_POINTERS_MATCH = 1,
275 EPT_POINTERS_MISMATCH = 2
281 unsigned int tss_addr;
282 bool ept_identity_pagetable_done;
283 gpa_t ept_identity_map_addr;
285 enum ept_pointers_status ept_pointers_match;
286 spinlock_t ept_pointer_lock;
289 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
290 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
291 void vmx_vcpu_put(struct kvm_vcpu *vcpu);
292 int allocate_vpid(void);
293 void free_vpid(int vpid);
294 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
295 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
296 int vmx_get_cpl(struct kvm_vcpu *vcpu);
297 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
298 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
299 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
300 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
301 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
302 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
303 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
304 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
305 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
306 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
307 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
308 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
309 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
310 void update_exception_bitmap(struct kvm_vcpu *vcpu);
311 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
312 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
313 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
314 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
315 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
316 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
318 #define POSTED_INTR_ON 0
319 #define POSTED_INTR_SN 1
321 static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
323 return test_and_set_bit(POSTED_INTR_ON,
324 (unsigned long *)&pi_desc->control);
327 static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
329 return test_and_clear_bit(POSTED_INTR_ON,
330 (unsigned long *)&pi_desc->control);
333 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
335 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
338 static inline void pi_set_sn(struct pi_desc *pi_desc)
340 set_bit(POSTED_INTR_SN,
341 (unsigned long *)&pi_desc->control);
344 static inline void pi_set_on(struct pi_desc *pi_desc)
346 set_bit(POSTED_INTR_ON,
347 (unsigned long *)&pi_desc->control);
350 static inline void pi_clear_on(struct pi_desc *pi_desc)
352 clear_bit(POSTED_INTR_ON,
353 (unsigned long *)&pi_desc->control);
356 static inline int pi_test_on(struct pi_desc *pi_desc)
358 return test_bit(POSTED_INTR_ON,
359 (unsigned long *)&pi_desc->control);
362 static inline int pi_test_sn(struct pi_desc *pi_desc)
364 return test_bit(POSTED_INTR_SN,
365 (unsigned long *)&pi_desc->control);
368 static inline u8 vmx_get_rvi(void)
370 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
373 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
375 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
378 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
380 vmcs_write32(VM_ENTRY_CONTROLS, val);
381 vmx->vm_entry_controls_shadow = val;
384 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
386 if (vmx->vm_entry_controls_shadow != val)
387 vm_entry_controls_init(vmx, val);
390 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
392 return vmx->vm_entry_controls_shadow;
395 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
397 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
400 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
402 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
405 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
407 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
410 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
412 vmcs_write32(VM_EXIT_CONTROLS, val);
413 vmx->vm_exit_controls_shadow = val;
416 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
418 if (vmx->vm_exit_controls_shadow != val)
419 vm_exit_controls_init(vmx, val);
422 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
424 return vmx->vm_exit_controls_shadow;
427 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
429 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
432 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
434 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
437 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
439 vmx->segment_cache.bitmask = 0;
442 static inline u32 vmx_vmentry_ctrl(void)
444 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
445 if (pt_mode == PT_MODE_SYSTEM)
446 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
447 VM_ENTRY_LOAD_IA32_RTIT_CTL);
448 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
449 return vmentry_ctrl &
450 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
453 static inline u32 vmx_vmexit_ctrl(void)
455 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
456 if (pt_mode == PT_MODE_SYSTEM)
457 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
458 VM_EXIT_CLEAR_IA32_RTIT_CTL);
459 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
461 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
464 u32 vmx_exec_control(struct vcpu_vmx *vmx);
466 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
468 return container_of(kvm, struct kvm_vmx, kvm);
471 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
473 return container_of(vcpu, struct vcpu_vmx, vcpu);
476 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
478 return &(to_vmx(vcpu)->pi_desc);
481 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
482 void free_vmcs(struct vmcs *vmcs);
483 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
484 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
485 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
486 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
488 static inline struct vmcs *alloc_vmcs(bool shadow)
490 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
494 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
496 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
499 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
500 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
502 ept_sync_context(construct_eptp(vcpu,
503 vcpu->arch.mmu->root_hpa));
505 vpid_sync_context(vpid);
509 static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
511 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
514 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
516 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
517 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
520 #endif /* __KVM_X86_VMX_H */