1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
32 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
34 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
35 struct x86_exception *fault)
37 struct vcpu_svm *svm = to_svm(vcpu);
39 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
41 * TODO: track the cause of the nested page fault, and
42 * correctly fill in the high bits of exit_info_1.
44 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
45 svm->vmcb->control.exit_code_hi = 0;
46 svm->vmcb->control.exit_info_1 = (1ULL << 32);
47 svm->vmcb->control.exit_info_2 = fault->address;
50 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
51 svm->vmcb->control.exit_info_1 |= fault->error_code;
53 nested_svm_vmexit(svm);
56 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
58 struct vcpu_svm *svm = to_svm(vcpu);
59 WARN_ON(!is_guest_mode(vcpu));
61 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
62 !svm->nested.nested_run_pending) {
63 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
64 svm->vmcb->control.exit_code_hi = 0;
65 svm->vmcb->control.exit_info_1 = fault->error_code;
66 svm->vmcb->control.exit_info_2 = fault->address;
67 nested_svm_vmexit(svm);
69 kvm_inject_page_fault(vcpu, fault);
73 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
75 struct vcpu_svm *svm = to_svm(vcpu);
76 u64 cr3 = svm->nested.ctl.nested_cr3;
80 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
81 offset_in_page(cr3) + index * 8, 8);
87 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
89 struct vcpu_svm *svm = to_svm(vcpu);
91 return svm->nested.ctl.nested_cr3;
94 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
96 struct vcpu_svm *svm = to_svm(vcpu);
98 WARN_ON(mmu_is_nested(vcpu));
100 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
103 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
104 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
105 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
107 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
108 svm->vmcb01.ptr->save.efer,
109 svm->nested.ctl.nested_cr3);
110 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
111 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
112 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
113 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
116 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
118 vcpu->arch.mmu = &vcpu->arch.root_mmu;
119 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
122 void recalc_intercepts(struct vcpu_svm *svm)
124 struct vmcb_control_area *c, *h, *g;
127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
129 if (!is_guest_mode(&svm->vcpu))
132 c = &svm->vmcb->control;
133 h = &svm->vmcb01.ptr->control;
134 g = &svm->nested.ctl;
136 for (i = 0; i < MAX_INTERCEPT; i++)
137 c->intercepts[i] = h->intercepts[i];
139 if (g->int_ctl & V_INTR_MASKING_MASK) {
140 /* We only want the cr8 intercept bits of L1 */
141 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
142 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
145 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
146 * affect any interrupt we may want to inject; therefore,
147 * interrupt window vmexits are irrelevant to L0.
149 vmcb_clr_intercept(c, INTERCEPT_VINTR);
152 /* We don't want to see VMMCALLs from a nested guest */
153 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
155 for (i = 0; i < MAX_INTERCEPT; i++)
156 c->intercepts[i] |= g->intercepts[i];
158 /* If SMI is not intercepted, ignore guest SMI intercept as well */
160 vmcb_clr_intercept(c, INTERCEPT_SMI);
163 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
164 struct vmcb_control_area *from)
168 for (i = 0; i < MAX_INTERCEPT; i++)
169 dst->intercepts[i] = from->intercepts[i];
171 dst->iopm_base_pa = from->iopm_base_pa;
172 dst->msrpm_base_pa = from->msrpm_base_pa;
173 dst->tsc_offset = from->tsc_offset;
174 /* asid not copied, it is handled manually for svm->vmcb. */
175 dst->tlb_ctl = from->tlb_ctl;
176 dst->int_ctl = from->int_ctl;
177 dst->int_vector = from->int_vector;
178 dst->int_state = from->int_state;
179 dst->exit_code = from->exit_code;
180 dst->exit_code_hi = from->exit_code_hi;
181 dst->exit_info_1 = from->exit_info_1;
182 dst->exit_info_2 = from->exit_info_2;
183 dst->exit_int_info = from->exit_int_info;
184 dst->exit_int_info_err = from->exit_int_info_err;
185 dst->nested_ctl = from->nested_ctl;
186 dst->event_inj = from->event_inj;
187 dst->event_inj_err = from->event_inj_err;
188 dst->nested_cr3 = from->nested_cr3;
189 dst->virt_ext = from->virt_ext;
190 dst->pause_filter_count = from->pause_filter_count;
191 dst->pause_filter_thresh = from->pause_filter_thresh;
194 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
197 * This function merges the msr permission bitmaps of kvm and the
198 * nested vmcb. It is optimized in that it only merges the parts where
199 * the kvm msr permission bitmap may contain zero bits
203 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
206 for (i = 0; i < MSRPM_OFFSETS; i++) {
210 if (msrpm_offsets[i] == 0xffffffff)
213 p = msrpm_offsets[i];
214 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
216 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
219 svm->nested.msrpm[p] = svm->msrpm[p] | value;
222 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
228 * Bits 11:0 of bitmap address are ignored by hardware
230 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
232 u64 addr = PAGE_ALIGN(pa);
234 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
235 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
238 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
239 struct vmcb_control_area *control)
241 if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
244 if (CC(control->asid == 0))
247 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
250 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
253 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
260 static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
261 struct vmcb_save_area *save)
264 * These checks are also performed by KVM_SET_SREGS,
265 * except that EFER.LMA is not checked by SVM against
266 * CR0.PG && EFER.LME.
268 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
269 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
270 CC(!(save->cr0 & X86_CR0_PE)) ||
271 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
275 if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
281 /* Common checks that apply to both L1 and L2 state. */
282 static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
283 struct vmcb_save_area *save)
286 * FIXME: these should be done after copying the fields,
287 * to avoid TOC/TOU races. For these save area checks
288 * the possible damage is limited since kvm_set_cr0 and
289 * kvm_set_cr4 handle failure; EFER_SVME is an exception
290 * so it is force-set later in nested_prepare_vmcb_save.
292 if (CC(!(save->efer & EFER_SVME)))
295 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
296 CC(save->cr0 & ~0xffffffffULL))
299 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
302 if (!nested_vmcb_check_cr3_cr4(vcpu, save))
305 if (CC(!kvm_valid_efer(vcpu, save->efer)))
311 void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
312 struct vmcb_control_area *control)
314 copy_vmcb_control_area(&svm->nested.ctl, control);
316 /* Copy it here because nested_svm_check_controls will check it. */
317 svm->nested.ctl.asid = control->asid;
318 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
319 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
323 * Synchronize fields that are written by the processor, so that
324 * they can be copied back into the vmcb12.
326 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
329 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
330 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
332 /* Only a few fields of int_ctl are written by the processor. */
333 mask = V_IRQ_MASK | V_TPR_MASK;
334 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
335 svm_is_intercept(svm, INTERCEPT_VINTR)) {
337 * In order to request an interrupt window, L0 is usurping
338 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
339 * even if it was clear in L1's VMCB. Restoring it would be
340 * wrong. However, in this case V_IRQ will remain true until
341 * interrupt_window_interception calls svm_clear_vintr and
342 * restores int_ctl. We can just leave it aside.
346 svm->nested.ctl.int_ctl &= ~mask;
347 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
351 * Transfer any event that L0 or L1 wanted to inject into L2 to
354 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
357 struct kvm_vcpu *vcpu = &svm->vcpu;
358 u32 exit_int_info = 0;
361 if (vcpu->arch.exception.injected) {
362 nr = vcpu->arch.exception.nr;
363 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
365 if (vcpu->arch.exception.has_error_code) {
366 exit_int_info |= SVM_EVTINJ_VALID_ERR;
367 vmcb12->control.exit_int_info_err =
368 vcpu->arch.exception.error_code;
371 } else if (vcpu->arch.nmi_injected) {
372 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
374 } else if (vcpu->arch.interrupt.injected) {
375 nr = vcpu->arch.interrupt.nr;
376 exit_int_info = nr | SVM_EVTINJ_VALID;
378 if (vcpu->arch.interrupt.soft)
379 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
381 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
384 vmcb12->control.exit_int_info = exit_int_info;
387 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
389 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
392 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
395 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
396 * things to fix before this can be conditional:
398 * - Flush TLBs for both L1 and L2 remote TLB flush
399 * - Honor L1's request to flush an ASID on nested VMRUN
400 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
401 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
402 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
404 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
405 * NPT guest-physical mappings on VMRUN.
407 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
408 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
412 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
413 * if we are emulating VM-Entry into a guest with NPT enabled.
415 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
416 bool nested_npt, bool reload_pdptrs)
418 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
421 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
422 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
426 kvm_mmu_new_pgd(vcpu, cr3);
428 vcpu->arch.cr3 = cr3;
429 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
431 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
437 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
439 if (!svm->nested.vmcb02.ptr)
442 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
443 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
446 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
448 bool new_vmcb12 = false;
450 nested_vmcb02_compute_g_pat(svm);
452 /* Load the nested guest state */
453 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
455 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
458 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
459 svm->vmcb->save.es = vmcb12->save.es;
460 svm->vmcb->save.cs = vmcb12->save.cs;
461 svm->vmcb->save.ss = vmcb12->save.ss;
462 svm->vmcb->save.ds = vmcb12->save.ds;
463 svm->vmcb->save.cpl = vmcb12->save.cpl;
464 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
467 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
468 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
469 svm->vmcb->save.idtr = vmcb12->save.idtr;
470 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
473 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
476 * Force-set EFER_SVME even though it is checked earlier on the
477 * VMCB12, because the guest can flip the bit between the check
478 * and now. Clearing EFER_SVME would call svm_free_nested.
480 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
482 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
483 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
485 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
487 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
488 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
489 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
491 /* In case we don't even reach vcpu_run, the fields are not updated */
492 svm->vmcb->save.rax = vmcb12->save.rax;
493 svm->vmcb->save.rsp = vmcb12->save.rsp;
494 svm->vmcb->save.rip = vmcb12->save.rip;
496 /* These bits will be set properly on the first execution when new_vmc12 is true */
497 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
498 svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
499 svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
500 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
504 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
506 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
507 struct kvm_vcpu *vcpu = &svm->vcpu;
510 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
511 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
515 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
518 WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
520 /* Copied from vmcb01. msrpm_base can be overwritten later. */
521 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
522 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
523 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
525 /* Done at vmrun: asid. */
527 /* Also overwritten later if necessary. */
528 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
531 if (nested_npt_enabled(svm))
532 nested_svm_init_mmu_context(vcpu);
534 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
535 vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
537 svm->vmcb->control.int_ctl =
538 (svm->nested.ctl.int_ctl & ~mask) |
539 (svm->vmcb01.ptr->control.int_ctl & mask);
541 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
542 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
543 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
544 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
545 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
547 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
548 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
550 nested_svm_transition_tlb_flush(vcpu);
552 /* Enter Guest-Mode */
553 enter_guest_mode(vcpu);
556 * Merge guest and host intercepts - must be called with vcpu in
557 * guest-mode to take effect.
559 recalc_intercepts(svm);
562 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
565 * Some VMCB state is shared between L1 and L2 and thus has to be
566 * moved at the time of nested vmrun and vmexit.
568 * VMLOAD/VMSAVE state would also belong in this category, but KVM
569 * always performs VMLOAD and VMSAVE from the VMCB01.
571 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
574 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
577 struct vcpu_svm *svm = to_svm(vcpu);
580 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
582 vmcb12->control.int_ctl,
583 vmcb12->control.event_inj,
584 vmcb12->control.nested_ctl);
586 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
587 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
588 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
589 vmcb12->control.intercepts[INTERCEPT_WORD3],
590 vmcb12->control.intercepts[INTERCEPT_WORD4],
591 vmcb12->control.intercepts[INTERCEPT_WORD5]);
594 svm->nested.vmcb12_gpa = vmcb12_gpa;
596 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
598 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
600 svm_switch_vmcb(svm, &svm->nested.vmcb02);
601 nested_vmcb02_prepare_control(svm);
602 nested_vmcb02_prepare_save(svm, vmcb12);
604 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
605 nested_npt_enabled(svm), true);
610 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
612 svm_set_gif(svm, true);
617 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
619 struct vcpu_svm *svm = to_svm(vcpu);
622 struct kvm_host_map map;
625 if (!svm->nested.hsave_msr) {
626 kvm_inject_gp(vcpu, 0);
631 kvm_queue_exception(vcpu, UD_VECTOR);
635 vmcb12_gpa = svm->vmcb->save.rax;
636 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
637 if (ret == -EINVAL) {
638 kvm_inject_gp(vcpu, 0);
641 return kvm_skip_emulated_instruction(vcpu);
644 ret = kvm_skip_emulated_instruction(vcpu);
648 if (WARN_ON_ONCE(!svm->nested.initialized))
651 nested_load_control_from_vmcb12(svm, &vmcb12->control);
653 if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
654 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
655 vmcb12->control.exit_code = SVM_EXIT_ERR;
656 vmcb12->control.exit_code_hi = 0;
657 vmcb12->control.exit_info_1 = 0;
658 vmcb12->control.exit_info_2 = 0;
663 /* Clear internal status */
664 kvm_clear_exception_queue(vcpu);
665 kvm_clear_interrupt_queue(vcpu);
668 * Since vmcb01 is not in use, we can use it to store some of the L1
671 svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
672 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
673 svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
674 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
675 svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
678 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
680 svm->nested.nested_run_pending = 1;
682 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
685 if (nested_svm_vmrun_msrpm(svm))
689 svm->nested.nested_run_pending = 0;
691 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
692 svm->vmcb->control.exit_code_hi = 0;
693 svm->vmcb->control.exit_info_1 = 0;
694 svm->vmcb->control.exit_info_2 = 0;
696 nested_svm_vmexit(svm);
699 kvm_vcpu_unmap(vcpu, &map, true);
704 /* Copy state save area fields which are handled by VMRUN */
705 void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
706 struct vmcb_save_area *to_save)
708 to_save->es = from_save->es;
709 to_save->cs = from_save->cs;
710 to_save->ss = from_save->ss;
711 to_save->ds = from_save->ds;
712 to_save->gdtr = from_save->gdtr;
713 to_save->idtr = from_save->idtr;
714 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
715 to_save->efer = from_save->efer;
716 to_save->cr0 = from_save->cr0;
717 to_save->cr3 = from_save->cr3;
718 to_save->cr4 = from_save->cr4;
719 to_save->rax = from_save->rax;
720 to_save->rsp = from_save->rsp;
721 to_save->rip = from_save->rip;
725 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
727 to_vmcb->save.fs = from_vmcb->save.fs;
728 to_vmcb->save.gs = from_vmcb->save.gs;
729 to_vmcb->save.tr = from_vmcb->save.tr;
730 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
731 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
732 to_vmcb->save.star = from_vmcb->save.star;
733 to_vmcb->save.lstar = from_vmcb->save.lstar;
734 to_vmcb->save.cstar = from_vmcb->save.cstar;
735 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
736 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
737 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
738 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
741 int nested_svm_vmexit(struct vcpu_svm *svm)
743 struct kvm_vcpu *vcpu = &svm->vcpu;
745 struct vmcb *vmcb = svm->vmcb;
746 struct kvm_host_map map;
749 /* Triple faults in L2 should never escape. */
750 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
752 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
755 kvm_inject_gp(vcpu, 0);
761 /* Exit Guest-Mode */
762 leave_guest_mode(vcpu);
763 svm->nested.vmcb12_gpa = 0;
764 WARN_ON_ONCE(svm->nested.nested_run_pending);
766 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
768 /* in case we halted in L2 */
769 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
771 /* Give the current vmcb to the guest */
773 vmcb12->save.es = vmcb->save.es;
774 vmcb12->save.cs = vmcb->save.cs;
775 vmcb12->save.ss = vmcb->save.ss;
776 vmcb12->save.ds = vmcb->save.ds;
777 vmcb12->save.gdtr = vmcb->save.gdtr;
778 vmcb12->save.idtr = vmcb->save.idtr;
779 vmcb12->save.efer = svm->vcpu.arch.efer;
780 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
781 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
782 vmcb12->save.cr2 = vmcb->save.cr2;
783 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
784 vmcb12->save.rflags = kvm_get_rflags(vcpu);
785 vmcb12->save.rip = kvm_rip_read(vcpu);
786 vmcb12->save.rsp = kvm_rsp_read(vcpu);
787 vmcb12->save.rax = kvm_rax_read(vcpu);
788 vmcb12->save.dr7 = vmcb->save.dr7;
789 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
790 vmcb12->save.cpl = vmcb->save.cpl;
792 vmcb12->control.int_state = vmcb->control.int_state;
793 vmcb12->control.exit_code = vmcb->control.exit_code;
794 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
795 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
796 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
798 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
799 nested_save_pending_event_to_vmcb12(svm, vmcb12);
801 if (svm->nrips_enabled)
802 vmcb12->control.next_rip = vmcb->control.next_rip;
804 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
805 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
806 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
807 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
809 vmcb12->control.pause_filter_count =
810 svm->vmcb->control.pause_filter_count;
811 vmcb12->control.pause_filter_thresh =
812 svm->vmcb->control.pause_filter_thresh;
814 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
816 svm_switch_vmcb(svm, &svm->vmcb01);
819 * On vmexit the GIF is set to false and
820 * no event can be injected in L1.
822 svm_set_gif(svm, false);
823 svm->vmcb->control.exit_int_info = 0;
825 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
826 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
827 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
828 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
831 svm->nested.ctl.nested_cr3 = 0;
834 * Restore processor state that had been saved in vmcb01
836 kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
837 svm_set_efer(vcpu, svm->vmcb->save.efer);
838 svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
839 svm_set_cr4(vcpu, svm->vmcb->save.cr4);
840 kvm_rax_write(vcpu, svm->vmcb->save.rax);
841 kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
842 kvm_rip_write(vcpu, svm->vmcb->save.rip);
844 svm->vcpu.arch.dr7 = DR7_FIXED_1;
845 kvm_update_dr7(&svm->vcpu);
847 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
848 vmcb12->control.exit_info_1,
849 vmcb12->control.exit_info_2,
850 vmcb12->control.exit_int_info,
851 vmcb12->control.exit_int_info_err,
854 kvm_vcpu_unmap(vcpu, &map, true);
856 nested_svm_transition_tlb_flush(vcpu);
858 nested_svm_uninit_mmu_context(vcpu);
860 rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
865 * Drop what we picked up for L2 via svm_complete_interrupts() so it
866 * doesn't end up in L1.
868 svm->vcpu.arch.nmi_injected = false;
869 kvm_clear_exception_queue(vcpu);
870 kvm_clear_interrupt_queue(vcpu);
873 * If we are here following the completion of a VMRUN that
874 * is being single-stepped, queue the pending #DB intercept
875 * right now so that it an be accounted for before we execute
876 * L1's next instruction.
878 if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
879 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
884 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
886 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
889 int svm_allocate_nested(struct vcpu_svm *svm)
891 struct page *vmcb02_page;
893 if (svm->nested.initialized)
896 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
899 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
900 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
902 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
903 if (!svm->nested.msrpm)
904 goto err_free_vmcb02;
905 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
907 svm->nested.initialized = true;
911 __free_page(vmcb02_page);
915 void svm_free_nested(struct vcpu_svm *svm)
917 if (!svm->nested.initialized)
920 svm_vcpu_free_msrpm(svm->nested.msrpm);
921 svm->nested.msrpm = NULL;
923 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
924 svm->nested.vmcb02.ptr = NULL;
927 * When last_vmcb12_gpa matches the current vmcb12 gpa,
928 * some vmcb12 fields are not loaded if they are marked clean
929 * in the vmcb12, since in this case they are up to date already.
931 * When the vmcb02 is freed, this optimization becomes invalid.
933 svm->nested.last_vmcb12_gpa = INVALID_GPA;
935 svm->nested.initialized = false;
939 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
941 void svm_leave_nested(struct vcpu_svm *svm)
943 struct kvm_vcpu *vcpu = &svm->vcpu;
945 if (is_guest_mode(vcpu)) {
946 svm->nested.nested_run_pending = 0;
947 svm->nested.vmcb12_gpa = INVALID_GPA;
949 leave_guest_mode(vcpu);
951 svm_switch_vmcb(svm, &svm->vmcb01);
953 nested_svm_uninit_mmu_context(vcpu);
954 vmcb_mark_all_dirty(svm->vmcb);
957 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
960 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
962 u32 offset, msr, value;
965 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
966 return NESTED_EXIT_HOST;
968 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
969 offset = svm_msrpm_offset(msr);
970 write = svm->vmcb->control.exit_info_1 & 1;
971 mask = 1 << ((2 * (msr & 0xf)) + write);
973 if (offset == MSR_INVALID)
974 return NESTED_EXIT_DONE;
976 /* Offset is in 32 bit units but need in 8 bit units */
979 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
980 return NESTED_EXIT_DONE;
982 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
985 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
987 unsigned port, size, iopm_len;
992 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
993 return NESTED_EXIT_HOST;
995 port = svm->vmcb->control.exit_info_1 >> 16;
996 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
998 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
999 start_bit = port % 8;
1000 iopm_len = (start_bit + size > 8) ? 2 : 1;
1001 mask = (0xf >> (4 - size)) << start_bit;
1004 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1005 return NESTED_EXIT_DONE;
1007 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1010 static int nested_svm_intercept(struct vcpu_svm *svm)
1012 u32 exit_code = svm->vmcb->control.exit_code;
1013 int vmexit = NESTED_EXIT_HOST;
1015 switch (exit_code) {
1017 vmexit = nested_svm_exit_handled_msr(svm);
1020 vmexit = nested_svm_intercept_ioio(svm);
1022 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1023 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1024 vmexit = NESTED_EXIT_DONE;
1027 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1028 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1029 vmexit = NESTED_EXIT_DONE;
1032 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1034 * Host-intercepted exceptions have been checked already in
1035 * nested_svm_exit_special. There is nothing to do here,
1036 * the vmexit is injected by svm_check_nested_events.
1038 vmexit = NESTED_EXIT_DONE;
1041 case SVM_EXIT_ERR: {
1042 vmexit = NESTED_EXIT_DONE;
1046 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1047 vmexit = NESTED_EXIT_DONE;
1054 int nested_svm_exit_handled(struct vcpu_svm *svm)
1058 vmexit = nested_svm_intercept(svm);
1060 if (vmexit == NESTED_EXIT_DONE)
1061 nested_svm_vmexit(svm);
1066 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1068 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1069 kvm_queue_exception(vcpu, UD_VECTOR);
1073 if (to_svm(vcpu)->vmcb->save.cpl) {
1074 kvm_inject_gp(vcpu, 0);
1081 static bool nested_exit_on_exception(struct vcpu_svm *svm)
1083 unsigned int nr = svm->vcpu.arch.exception.nr;
1085 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1088 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1090 unsigned int nr = svm->vcpu.arch.exception.nr;
1092 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1093 svm->vmcb->control.exit_code_hi = 0;
1095 if (svm->vcpu.arch.exception.has_error_code)
1096 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1099 * EXITINFO2 is undefined for all exception intercepts other
1102 if (nr == PF_VECTOR) {
1103 if (svm->vcpu.arch.exception.nested_apf)
1104 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1105 else if (svm->vcpu.arch.exception.has_payload)
1106 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1108 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1109 } else if (nr == DB_VECTOR) {
1110 /* See inject_pending_event. */
1111 kvm_deliver_exception_payload(&svm->vcpu);
1112 if (svm->vcpu.arch.dr7 & DR7_GD) {
1113 svm->vcpu.arch.dr7 &= ~DR7_GD;
1114 kvm_update_dr7(&svm->vcpu);
1117 WARN_ON(svm->vcpu.arch.exception.has_payload);
1119 nested_svm_vmexit(svm);
1122 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1124 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1127 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1129 struct vcpu_svm *svm = to_svm(vcpu);
1130 bool block_nested_events =
1131 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1132 struct kvm_lapic *apic = vcpu->arch.apic;
1134 if (lapic_in_kernel(vcpu) &&
1135 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1136 if (block_nested_events)
1138 if (!nested_exit_on_init(svm))
1140 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1144 if (vcpu->arch.exception.pending) {
1146 * Only a pending nested run can block a pending exception.
1147 * Otherwise an injected NMI/interrupt should either be
1148 * lost or delivered to the nested hypervisor in the EXITINTINFO
1149 * vmcb field, while delivering the pending exception.
1151 if (svm->nested.nested_run_pending)
1153 if (!nested_exit_on_exception(svm))
1155 nested_svm_inject_exception_vmexit(svm);
1159 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1160 if (block_nested_events)
1162 if (!nested_exit_on_smi(svm))
1164 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1168 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1169 if (block_nested_events)
1171 if (!nested_exit_on_nmi(svm))
1173 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1177 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1178 if (block_nested_events)
1180 if (!nested_exit_on_intr(svm))
1182 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1183 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1190 int nested_svm_exit_special(struct vcpu_svm *svm)
1192 u32 exit_code = svm->vmcb->control.exit_code;
1194 switch (exit_code) {
1198 return NESTED_EXIT_HOST;
1199 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1200 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1202 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1204 return NESTED_EXIT_HOST;
1205 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1206 svm->vcpu.arch.apf.host_apf_flags)
1207 /* Trap async PF even if not shadowing */
1208 return NESTED_EXIT_HOST;
1215 return NESTED_EXIT_CONTINUE;
1218 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1219 struct kvm_nested_state __user *user_kvm_nested_state,
1222 struct vcpu_svm *svm;
1223 struct kvm_nested_state kvm_state = {
1225 .format = KVM_STATE_NESTED_FORMAT_SVM,
1226 .size = sizeof(kvm_state),
1228 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1229 &user_kvm_nested_state->data.svm[0];
1232 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1236 if (user_data_size < kvm_state.size)
1239 /* First fill in the header and copy it out. */
1240 if (is_guest_mode(vcpu)) {
1241 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1242 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1243 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1245 if (svm->nested.nested_run_pending)
1246 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1250 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1252 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1255 if (!is_guest_mode(vcpu))
1259 * Copy over the full size of the VMCB rather than just the size
1262 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1264 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1265 sizeof(user_vmcb->control)))
1267 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1268 sizeof(user_vmcb->save)))
1271 return kvm_state.size;
1274 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1275 struct kvm_nested_state __user *user_kvm_nested_state,
1276 struct kvm_nested_state *kvm_state)
1278 struct vcpu_svm *svm = to_svm(vcpu);
1279 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1280 &user_kvm_nested_state->data.svm[0];
1281 struct vmcb_control_area *ctl;
1282 struct vmcb_save_area *save;
1286 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1287 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1289 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1292 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1293 KVM_STATE_NESTED_RUN_PENDING |
1294 KVM_STATE_NESTED_GIF_SET))
1298 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1299 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1301 if (!(vcpu->arch.efer & EFER_SVME)) {
1302 /* GIF=1 and no guest mode are required if SVME=0. */
1303 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1307 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1308 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1311 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1312 svm_leave_nested(svm);
1313 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1317 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1319 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1323 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1324 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1329 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1331 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1335 if (!nested_vmcb_check_controls(vcpu, ctl))
1339 * Processor state contains L2 state. Check that it is
1340 * valid for guest mode (see nested_vmcb_check_save).
1342 cr0 = kvm_read_cr0(vcpu);
1343 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1347 * Validate host state saved from before VMRUN (see
1348 * nested_svm_check_permissions).
1350 if (!(save->cr0 & X86_CR0_PG) ||
1351 !(save->cr0 & X86_CR0_PE) ||
1352 (save->rflags & X86_EFLAGS_VM) ||
1353 !nested_vmcb_valid_sregs(vcpu, save))
1357 * While the nested guest CR3 is already checked and set by
1358 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1359 * thus MMU might not be initialized correctly.
1360 * Set it again to fix this.
1363 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1364 nested_npt_enabled(svm), false);
1365 if (WARN_ON_ONCE(ret))
1370 * All checks done, we can enter guest mode. Userspace provides
1371 * vmcb12.control, which will be combined with L1 and stored into
1372 * vmcb02, and the L1 save state which we store in vmcb01.
1373 * L2 registers if needed are moved from the current VMCB to VMCB02.
1376 if (is_guest_mode(vcpu))
1377 svm_leave_nested(svm);
1379 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1381 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1383 svm->nested.nested_run_pending =
1384 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1386 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1388 svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
1389 nested_load_control_from_vmcb12(svm, ctl);
1391 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1392 nested_vmcb02_prepare_control(svm);
1393 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1402 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1404 struct vcpu_svm *svm = to_svm(vcpu);
1406 if (WARN_ON(!is_guest_mode(vcpu)))
1409 if (!vcpu->arch.pdptrs_from_userspace &&
1410 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1412 * Reload the guest's PDPTRs since after a migration
1413 * the guest CR3 might be restored prior to setting the nested
1414 * state which can lead to a load of wrong PDPTRs.
1416 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
1419 if (!nested_svm_vmrun_msrpm(svm)) {
1420 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1421 vcpu->run->internal.suberror =
1422 KVM_INTERNAL_ERROR_EMULATION;
1423 vcpu->run->internal.ndata = 0;
1430 struct kvm_x86_nested_ops svm_nested_ops = {
1431 .check_events = svm_check_nested_events,
1432 .triple_fault = nested_svm_triple_fault,
1433 .get_nested_state_pages = svm_get_nested_state_pages,
1434 .get_state = svm_get_nested_state,
1435 .set_state = svm_set_nested_state,