Flush the shadow mmu before removing regions to avoid stale entries.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
return 0;
}
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
return 0;
}
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
return 0;
}
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
return gfn;
return 0;
}
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+ kvm_mmu_zap_all(kvm);
+}
+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
+void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
+ if (!npages)
+ kvm_arch_flush_shadow(kvm);
+
*memslot = new;
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);