}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
-void kvm_mmu_zap_all(struct kvm *kvm)
+static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
- if (sp->role.invalid && sp->root_count)
+ if (mmio_only && !sp->mmio_cached)
continue;
- if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign) ||
- cond_resched_lock(&kvm->mmu_lock))
- goto restart;
- }
-
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
- spin_unlock(&kvm->mmu_lock);
-}
-
-static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
-{
- struct kvm_mmu_page *sp, *node;
- LIST_HEAD(invalid_list);
- int ign;
-
- spin_lock(&kvm->mmu_lock);
-restart:
- list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
- if (!sp->mmio_cached)
+ if (sp->role.invalid && sp->root_count)
continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(mmio_only);
goto restart;
}
if (cond_resched_lock(&kvm->mmu_lock))
spin_unlock(&kvm->mmu_lock);
}
+void kvm_mmu_zap_all(struct kvm *kvm)
+{
+ return __kvm_mmu_zap_all(kvm, false);
+}
+
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
*/
if (unlikely(gen == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
- kvm_mmu_zap_mmio_sptes(kvm);
+ __kvm_mmu_zap_all(kvm, true);
}
}