1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
23 static const u32 host_save_user_msrs[] = {
25 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
28 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
32 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
34 #define MSRPM_OFFSETS 16
35 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
36 extern bool npt_enabled;
39 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
41 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
43 VMCB_INTR, /* int_ctl, int_vector */
44 VMCB_NPT, /* npt_en, nCR3, gPAT */
45 VMCB_CR, /* CR0, CR3, CR4, EFER */
46 VMCB_DR, /* DR6, DR7 */
47 VMCB_DT, /* GDT, IDT */
48 VMCB_SEG, /* CS, DS, SS, ES, CPL */
49 VMCB_CR2, /* CR2 only */
50 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
51 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
52 * AVIC PHYSICAL_TABLE pointer,
53 * AVIC LOGICAL_TABLE pointer
58 /* TPR and CR2 are always written before VMRUN */
59 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
62 bool active; /* SEV enabled guest */
63 unsigned int asid; /* ASID used for this guest */
64 unsigned int handle; /* SEV firmware handle */
65 int fd; /* SEV device fd */
66 unsigned long pages_locked; /* Number of pages locked */
67 struct list_head regions_list; /* List of registered regions */
73 /* Struct members for AVIC */
75 struct page *avic_logical_id_table_page;
76 struct page *avic_physical_id_table_page;
77 struct hlist_node hnode;
79 struct kvm_sev_info sev_info;
90 /* These are the merged vectors */
93 /* gpa pointers to the real vectors */
97 /* A VMEXIT is required but not yet emulated */
100 /* A VMRUN has started but has not yet been performed, so
101 * we cannot inject a nested vmexit yet. */
102 bool nested_run_pending;
104 /* cache for intercepts of the guest */
107 u32 intercept_exceptions;
110 /* Nested Paging related state */
115 struct kvm_vcpu vcpu;
117 unsigned long vmcb_pa;
118 struct svm_cpu_data *svm_data;
119 uint64_t asid_generation;
120 uint64_t sysenter_esp;
121 uint64_t sysenter_eip;
128 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
138 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
139 * translated into the appropriate L2_CFG bits on the host to
140 * perform speculative control.
148 struct nested_state nested;
151 u64 nmi_singlestep_guest_rflags;
153 unsigned int3_injected;
154 unsigned long int3_rip;
156 /* cached guest cpuid flags for faster access */
157 bool nrips_enabled : 1;
161 struct page *avic_backing_page;
162 u64 *avic_physical_id_cache;
163 bool avic_is_running;
166 * Per-vcpu list of struct amd_svm_iommu_ir:
167 * This is used mainly to store interrupt remapping information used
168 * when update the vcpu affinity. This avoids the need to scan for
169 * IRTE and try to match ga_tag in the IOMMU driver.
171 struct list_head ir_list;
172 spinlock_t ir_list_lock;
174 /* which host CPU was used for running this vcpu */
175 unsigned int last_cpu;
178 struct svm_cpu_data {
185 struct kvm_ldttss_desc *tss_desc;
187 struct page *save_area;
188 struct vmcb *current_vmcb;
190 /* index = sev_asid, value = vmcb pointer */
191 struct vmcb **sev_vmcbs;
194 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
196 void recalc_intercepts(struct vcpu_svm *svm);
198 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
200 return container_of(kvm, struct kvm_svm, kvm);
203 static inline void mark_all_dirty(struct vmcb *vmcb)
205 vmcb->control.clean = 0;
208 static inline void mark_all_clean(struct vmcb *vmcb)
210 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
211 & ~VMCB_ALWAYS_DIRTY_MASK;
214 static inline void mark_dirty(struct vmcb *vmcb, int bit)
216 vmcb->control.clean &= ~(1 << bit);
219 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
221 return container_of(vcpu, struct vcpu_svm, vcpu);
224 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
226 if (is_guest_mode(&svm->vcpu))
227 return svm->nested.hsave;
232 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
234 struct vmcb *vmcb = get_host_vmcb(svm);
236 vmcb->control.intercept_cr |= (1U << bit);
238 recalc_intercepts(svm);
241 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
243 struct vmcb *vmcb = get_host_vmcb(svm);
245 vmcb->control.intercept_cr &= ~(1U << bit);
247 recalc_intercepts(svm);
250 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
252 struct vmcb *vmcb = get_host_vmcb(svm);
254 return vmcb->control.intercept_cr & (1U << bit);
257 static inline void set_dr_intercepts(struct vcpu_svm *svm)
259 struct vmcb *vmcb = get_host_vmcb(svm);
261 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
262 | (1 << INTERCEPT_DR1_READ)
263 | (1 << INTERCEPT_DR2_READ)
264 | (1 << INTERCEPT_DR3_READ)
265 | (1 << INTERCEPT_DR4_READ)
266 | (1 << INTERCEPT_DR5_READ)
267 | (1 << INTERCEPT_DR6_READ)
268 | (1 << INTERCEPT_DR7_READ)
269 | (1 << INTERCEPT_DR0_WRITE)
270 | (1 << INTERCEPT_DR1_WRITE)
271 | (1 << INTERCEPT_DR2_WRITE)
272 | (1 << INTERCEPT_DR3_WRITE)
273 | (1 << INTERCEPT_DR4_WRITE)
274 | (1 << INTERCEPT_DR5_WRITE)
275 | (1 << INTERCEPT_DR6_WRITE)
276 | (1 << INTERCEPT_DR7_WRITE);
278 recalc_intercepts(svm);
281 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
283 struct vmcb *vmcb = get_host_vmcb(svm);
285 vmcb->control.intercept_dr = 0;
287 recalc_intercepts(svm);
290 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
292 struct vmcb *vmcb = get_host_vmcb(svm);
294 vmcb->control.intercept_exceptions |= (1U << bit);
296 recalc_intercepts(svm);
299 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
301 struct vmcb *vmcb = get_host_vmcb(svm);
303 vmcb->control.intercept_exceptions &= ~(1U << bit);
305 recalc_intercepts(svm);
308 static inline void set_intercept(struct vcpu_svm *svm, int bit)
310 struct vmcb *vmcb = get_host_vmcb(svm);
312 vmcb->control.intercept |= (1ULL << bit);
314 recalc_intercepts(svm);
317 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
319 struct vmcb *vmcb = get_host_vmcb(svm);
321 vmcb->control.intercept &= ~(1ULL << bit);
323 recalc_intercepts(svm);
326 static inline bool is_intercept(struct vcpu_svm *svm, int bit)
328 return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
331 static inline bool vgif_enabled(struct vcpu_svm *svm)
333 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
336 static inline void enable_gif(struct vcpu_svm *svm)
338 if (vgif_enabled(svm))
339 svm->vmcb->control.int_ctl |= V_GIF_MASK;
341 svm->vcpu.arch.hflags |= HF_GIF_MASK;
344 static inline void disable_gif(struct vcpu_svm *svm)
346 if (vgif_enabled(svm))
347 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
349 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
352 static inline bool gif_set(struct vcpu_svm *svm)
354 if (vgif_enabled(svm))
355 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
357 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
361 #define MSR_INVALID 0xffffffffU
363 u32 svm_msrpm_offset(u32 msr);
364 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
365 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
366 int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
367 void svm_flush_tlb(struct kvm_vcpu *vcpu);
368 void disable_nmi_singlestep(struct vcpu_svm *svm);
369 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
370 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
371 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
375 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
376 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
377 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
379 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
381 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
384 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
386 return (svm->nested.intercept & (1ULL << INTERCEPT_SMI));
389 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
391 return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));
394 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
395 struct vmcb *nested_vmcb, struct kvm_host_map *map);
396 int nested_svm_vmrun(struct vcpu_svm *svm);
397 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
398 int nested_svm_vmexit(struct vcpu_svm *svm);
399 int nested_svm_exit_handled(struct vcpu_svm *svm);
400 int nested_svm_check_permissions(struct vcpu_svm *svm);
401 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
402 bool has_error_code, u32 error_code);
403 int nested_svm_exit_special(struct vcpu_svm *svm);
405 extern struct kvm_x86_nested_ops svm_nested_ops;
409 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
410 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
411 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
413 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
414 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
415 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
416 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
418 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
422 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
424 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
425 mark_dirty(svm->vmcb, VMCB_AVIC);
428 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
430 struct vcpu_svm *svm = to_svm(vcpu);
431 u64 *entry = svm->avic_physical_id_cache;
436 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
439 int avic_ga_log_notifier(u32 ga_tag);
440 void avic_vm_destroy(struct kvm *kvm);
441 int avic_vm_init(struct kvm *kvm);
442 void avic_init_vmcb(struct vcpu_svm *svm);
443 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
444 int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
445 int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
446 int avic_init_vcpu(struct vcpu_svm *svm);
447 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
448 void avic_vcpu_put(struct kvm_vcpu *vcpu);
449 void avic_post_state_restore(struct kvm_vcpu *vcpu);
450 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
451 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
452 bool svm_check_apicv_inhibit_reasons(ulong bit);
453 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
454 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
455 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
456 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
457 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
458 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
459 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
460 uint32_t guest_irq, bool set);
461 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
462 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
466 extern unsigned int max_sev_asid;
468 static inline bool sev_guest(struct kvm *kvm)
470 #ifdef CONFIG_KVM_AMD_SEV
471 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
479 static inline bool svm_sev_enabled(void)
481 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
484 void sev_vm_destroy(struct kvm *kvm);
485 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
486 int svm_register_enc_region(struct kvm *kvm,
487 struct kvm_enc_region *range);
488 int svm_unregister_enc_region(struct kvm *kvm,
489 struct kvm_enc_region *range);
490 void pre_sev_run(struct vcpu_svm *svm, int cpu);
491 int __init sev_hardware_setup(void);
492 void sev_hardware_teardown(void);