KVM: x86/mmu: Move "invalid" check out of kvm_tdp_mmu_get_root()
authorSean Christopherson <seanjc@google.com>
Wed, 15 Dec 2021 01:15:55 +0000 (01:15 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Apr 2022 12:24:03 +0000 (14:24 +0200)
commit 04dc4e6ce274fa729feda32aa957b27388a3870c upstream.

Move the check for an invalid root out of kvm_tdp_mmu_get_root() and into
the one place it actually matters, tdp_mmu_next_root(), as the other user
already has an implicit validity check.  A future bug fix will need to
get references to invalid roots to honor mmu_notifier requests; there's
no point in forcing what will be a common path to open code getting a
reference to a root.

No functional change intended.

Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211215011557.399940-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index d479b2b..38f698e 100644 (file)
@@ -121,9 +121,14 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
                next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
                                                   typeof(*next_root), link);
 
-       while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
+       while (next_root) {
+               if (!next_root->role.invalid &&
+                   kvm_tdp_mmu_get_root(kvm, next_root))
+                       break;
+
                next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
                                &next_root->link, typeof(*next_root), link);
+       }
 
        rcu_read_unlock();
 
@@ -199,7 +204,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
 
        role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
 
-       /* Check for an existing root before allocating a new one. */
+       /*
+        * Check for an existing root before allocating a new one.  Note, the
+        * role check prevents consuming an invalid root.
+        */
        for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
                if (root->role.word == role.word &&
                    kvm_tdp_mmu_get_root(kvm, root))
index ba3681c..39468b6 100644 (file)
@@ -10,9 +10,6 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
                                                     struct kvm_mmu_page *root)
 {
-       if (root->role.invalid)
-               return false;
-
        return refcount_inc_not_zero(&root->tdp_mmu_root_count);
 }