KVM: x86/mmu: Clear only A-bit (if enabled) when aging TDP MMU SPTEs
authorVipin Sharma <vipinsh@google.com>
Tue, 21 Mar 2023 22:00:16 +0000 (15:00 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 4 Apr 2023 19:37:30 +0000 (12:37 -0700)
Use tdp_mmu_clear_spte_bits() when clearing the Accessed bit in TDP MMU
SPTEs so as to use an atomic-AND instead of XCHG to clear the A-bit.
Similar to the D-bit story, this will allow KVM to bypass
__handle_changed_spte() by ensuring only the A-bit is modified.

Link: https://lore.kernel.org/all/Y9HcHRBShQgjxsQb@google.com
Signed-off-by: Vipin Sharma <vipinsh@google.com>
Reviewed-by: David Matlack <dmatlack@google.com>
[sean: massage changelog]
Link: https://lore.kernel.org/r/20230321220021.2119033-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/tdp_mmu.c

index 3cc81fa..adbdfed 100644 (file)
@@ -756,13 +756,6 @@ static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
        _tdp_mmu_set_spte(kvm, iter, new_spte, true);
 }
 
-static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
-                                                struct tdp_iter *iter,
-                                                u64 new_spte)
-{
-       _tdp_mmu_set_spte(kvm, iter, new_spte, false);
-}
-
 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
        for_each_tdp_pte(_iter, _root, _start, _end)
 
@@ -1248,33 +1241,44 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
 /*
  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
  * if any of the GFNs in the range have been accessed.
+ *
+ * No need to mark the corresponding PFN as accessed as this call is coming
+ * from the clear_young() or clear_flush_young() notifier, which uses the
+ * return value to determine if the page has been accessed.
  */
 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
                          struct kvm_gfn_range *range)
 {
-       u64 new_spte = 0;
+       u64 new_spte;
 
        /* If we have a non-accessed entry we don't need to change the pte. */
        if (!is_accessed_spte(iter->old_spte))
                return false;
 
-       new_spte = iter->old_spte;
-
-       if (spte_ad_enabled(new_spte)) {
-               new_spte &= ~shadow_accessed_mask;
+       if (spte_ad_enabled(iter->old_spte)) {
+               iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
+                                                        iter->old_spte,
+                                                        shadow_accessed_mask,
+                                                        iter->level);
+               new_spte = iter->old_spte & ~shadow_accessed_mask;
        } else {
                /*
                 * Capture the dirty status of the page, so that it doesn't get
                 * lost when the SPTE is marked for access tracking.
                 */
-               if (is_writable_pte(new_spte))
-                       kvm_set_pfn_dirty(spte_to_pfn(new_spte));
+               if (is_writable_pte(iter->old_spte))
+                       kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
 
-               new_spte = mark_spte_for_access_track(new_spte);
+               new_spte = mark_spte_for_access_track(iter->old_spte);
+               iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
+                                                       iter->old_spte, new_spte,
+                                                       iter->level);
        }
 
-       tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
-
+       __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+                             new_spte, iter->level, false);
+       handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
+                                     iter->old_spte, new_spte, iter->level);
        return true;
 }