KVM: x86/mmu: Pass address space ID to TDP MMU root walkers
authorSean Christopherson <seanjc@google.com>
Fri, 26 Mar 2021 02:19:45 +0000 (19:19 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 17 Apr 2021 12:30:55 +0000 (08:30 -0400)
Move the address space ID check that is performed when iterating over
roots into the macro helpers to consolidate code.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/tdp_mmu.c

index e03267e..f296b8a 100644 (file)
@@ -88,9 +88,14 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
        return to_shadow_page(__pa(sptep));
 }
 
+static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
+{
+       return role.smm ? 1 : 0;
+}
+
 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
 {
-       return sp->role.smm ? 1 : 0;
+       return kvm_mmu_role_as_id(sp->role);
 }
 
 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
index 7c337db..3608f4a 100644 (file)
@@ -76,14 +76,18 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * if exiting the loop early, the caller must drop the reference to the most
  * recent root. (Unless keeping a live reference is desirable.)
  */
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                          \
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)          \
        for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,        \
                                      typeof(*_root), link);            \
             tdp_mmu_next_root_valid(_kvm, _root);                      \
-            _root = tdp_mmu_next_root(_kvm, _root))
+            _root = tdp_mmu_next_root(_kvm, _root))                    \
+               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
+               } else
 
-#define for_each_tdp_mmu_root(_kvm, _root)                             \
-       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
+#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                     \
+       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)     \
+               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
+               } else
 
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                          gfn_t start, gfn_t end, bool can_yield, bool flush);
@@ -148,7 +152,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
        role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
 
        /* Check for an existing root before allocating a new one. */
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
                if (root->role.word == role.word) {
                        kvm_mmu_get_root(kvm, root);
                        goto out;
@@ -704,11 +708,8 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               if (kvm_mmu_page_as_id(root) != as_id)
-                       continue;
+       for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
                flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
-       }
 
        return flush;
 }
@@ -888,27 +889,28 @@ static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
        int ret = 0;
        int as_id;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               as_id = kvm_mmu_page_as_id(root);
-               slots = __kvm_memslots(kvm, as_id);
-               kvm_for_each_memslot(memslot, slots) {
-                       unsigned long hva_start, hva_end;
-                       gfn_t gfn_start, gfn_end;
-
-                       hva_start = max(start, memslot->userspace_addr);
-                       hva_end = min(end, memslot->userspace_addr +
-                                     (memslot->npages << PAGE_SHIFT));
-                       if (hva_start >= hva_end)
-                               continue;
-                       /*
-                        * {gfn(page) | page intersects with [hva_start, hva_end)} =
-                        * {gfn_start, gfn_start+1, ..., gfn_end-1}.
-                        */
-                       gfn_start = hva_to_gfn_memslot(hva_start, memslot);
-                       gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
-                       ret |= handler(kvm, memslot, root, gfn_start,
-                                      gfn_end, data);
+       for (as_id = 0; as_id < KVM_ADDRESS_SPACE_NUM; as_id++) {
+               for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) {
+                       slots = __kvm_memslots(kvm, as_id);
+                       kvm_for_each_memslot(memslot, slots) {
+                               unsigned long hva_start, hva_end;
+                               gfn_t gfn_start, gfn_end;
+
+                               hva_start = max(start, memslot->userspace_addr);
+                               hva_end = min(end, memslot->userspace_addr +
+                                       (memslot->npages << PAGE_SHIFT));
+                               if (hva_start >= hva_end)
+                                       continue;
+                               /*
+                                * {gfn(page) | page intersects with [hva_start, hva_end)} =
+                                * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+                                */
+                               gfn_start = hva_to_gfn_memslot(hva_start, memslot);
+                               gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+                               ret |= handler(kvm, memslot, root, gfn_start,
+                                       gfn_end, data);
+                       }
                }
        }
 
@@ -1120,17 +1122,11 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
                             int min_level)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
                             slot->base_gfn + slot->npages, min_level);
-       }
 
        return spte_set;
 }
@@ -1185,17 +1181,11 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-       }
 
        return spte_set;
 }
@@ -1257,16 +1247,10 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                       bool wrprot)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       for_each_tdp_mmu_root(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root(kvm, root, slot->as_id)
                clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
-       }
 }
 
 /*
@@ -1319,15 +1303,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                       struct kvm_memory_slot *slot, bool flush)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
-
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
 
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-       }
 
        return flush;
 }
@@ -1371,17 +1349,12 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       for_each_tdp_mmu_root(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root(kvm, root, slot->as_id)
                spte_set |= write_protect_gfn(kvm, root, gfn);
-       }
+
        return spte_set;
 }