From 7d919c7a38fb2e34d4d27e535f19979e1b415f68 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 23 Sep 2020 11:37:29 -0700 Subject: [PATCH] KVM: x86/mmu: Refactor the zap loop for recovering NX lpages Refactor the zap loop in kvm_recover_nx_lpages() to be a for loop that iterates on to_zap and drop the !to_zap check that leads to the in-loop calling of kvm_mmu_commit_zap_page(). The in-loop commit when to_zap hits zero is superfluous now that there's an unconditional commit after the loop to handle the case where lpage_disallowed_mmu_pages is emptied. Signed-off-by: Sean Christopherson Message-Id: <20200923183735.584-3-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 340eacf3327b..a4cbf5e14991 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6375,7 +6375,10 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; - while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { + for ( ; to_zap; --to_zap) { + if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) + break; + /* * We use a separate list instead of just using active_mmu_pages * because the number of lpage_disallowed pages is expected to @@ -6388,10 +6391,9 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->lpage_disallowed); - if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { kvm_mmu_commit_zap_page(kvm, &invalid_list); - if (to_zap) - cond_resched_lock(&kvm->mmu_lock); + cond_resched_lock(&kvm->mmu_lock); } } kvm_mmu_commit_zap_page(kvm, &invalid_list); -- 2.34.1