void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
+ bool skip_mmu_sync);
void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
- bool skip_tlb_flush)
+ bool skip_tlb_flush, bool skip_mmu_sync)
{
if (!fast_cr3_switch(vcpu, new_cr3, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
*/
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
- if (!skip_tlb_flush) {
+ if (!skip_mmu_sync)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ if (!skip_tlb_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
/*
* The last MMIO access's GVA and GPA are cached in the VCPU. When
__clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
}
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
+ bool skip_mmu_sync)
{
__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
- skip_tlb_flush);
+ skip_tlb_flush, skip_mmu_sync);
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level);
- __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
+ __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
- kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
+ kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);