KVM: x86/mmu: Rename page-track APIs to reflect the new reality
authorSean Christopherson <seanjc@google.com>
Sat, 29 Jul 2023 01:35:30 +0000 (18:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 18:08:15 +0000 (14:08 -0400)
Rename the page-track APIs to capture that they're all about tracking
writes, now that the facade of supporting multiple modes is gone.

Opportunstically replace "slot" with "gfn" in anticipation of removing
the @slot param from the external APIs.

No functional change intended.

Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Link: https://lore.kernel.org/r/20230729013535.1070024-25-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_page_track.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/page_track.h
drivers/gpu/drm/i915/gvt/kvmgt.c

index 9e4ee26..f5c1db3 100644 (file)
@@ -4,10 +4,10 @@
 
 #include <linux/kvm_types.h>
 
-void kvm_slot_page_track_add_page(struct kvm *kvm,
-                                 struct kvm_memory_slot *slot, gfn_t gfn);
-void kvm_slot_page_track_remove_page(struct kvm *kvm,
-                                    struct kvm_memory_slot *slot, gfn_t gfn);
+void kvm_write_track_add_gfn(struct kvm *kvm,
+                            struct kvm_memory_slot *slot, gfn_t gfn);
+void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                               gfn_t gfn);
 
 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
 /*
index 2d76ead..cc9a1e6 100644 (file)
@@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 
        /* the non-leaf shadow pages are keeping readonly. */
        if (sp->role.level > PG_LEVEL_4K)
-               return kvm_slot_page_track_add_page(kvm, slot, gfn);
+               return kvm_write_track_add_gfn(kvm, slot, gfn);
 
        kvm_mmu_gfn_disallow_lpage(slot, gfn);
 
@@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
        slot = __gfn_to_memslot(slots, gfn);
        if (sp->role.level > PG_LEVEL_4K)
-               return kvm_slot_page_track_remove_page(kvm, slot, gfn);
+               return kvm_write_track_remove_gfn(kvm, slot, gfn);
 
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
@@ -2807,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
         * track machinery is used to write-protect upper-level shadow pages,
         * i.e. this guards the role.level == 4K assertion below!
         */
-       if (kvm_slot_page_track_is_active(kvm, slot, gfn))
+       if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
                return -EPERM;
 
        /*
@@ -4201,7 +4201,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
         * guest is writing the page which is write tracked which can
         * not be fixed by page fault handler.
         */
-       if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn))
+       if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
                return true;
 
        return false;
index 9d0b1a5..dd6765b 100644 (file)
@@ -84,8 +84,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn,
  * @slot: the @gfn belongs to.
  * @gfn: the guest page.
  */
-void kvm_slot_page_track_add_page(struct kvm *kvm,
-                                 struct kvm_memory_slot *slot, gfn_t gfn)
+void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                            gfn_t gfn)
 {
        if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
                return;
@@ -101,12 +101,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
        if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
                kvm_flush_remote_tlbs(kvm);
 }
-EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
+EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);
 
 /*
  * remove the guest page from the tracking pool which stops the interception
- * of corresponding access on that page. It is the opposed operation of
- * kvm_slot_page_track_add_page().
+ * of corresponding access on that page.
  *
  * It should be called under the protection both of mmu-lock and kvm->srcu
  * or kvm->slots_lock.
@@ -115,8 +114,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
  * @slot: the @gfn belongs to.
  * @gfn: the guest page.
  */
-void kvm_slot_page_track_remove_page(struct kvm *kvm,
-                                    struct kvm_memory_slot *slot, gfn_t gfn)
+void kvm_write_track_remove_gfn(struct kvm *kvm,
+                               struct kvm_memory_slot *slot, gfn_t gfn)
 {
        if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
                return;
@@ -129,14 +128,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
         */
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
-EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
+EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);
 
 /*
  * check if the corresponding access on the specified guest page is tracked.
  */
-bool kvm_slot_page_track_is_active(struct kvm *kvm,
-                                  const struct kvm_memory_slot *slot,
-                                  gfn_t gfn)
+bool kvm_gfn_is_write_tracked(struct kvm *kvm,
+                             const struct kvm_memory_slot *slot, gfn_t gfn)
 {
        int index;
 
index 48a5377..b020f99 100644 (file)
@@ -15,8 +15,8 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
                                  struct kvm_memory_slot *slot,
                                  unsigned long npages);
 
-bool kvm_slot_page_track_is_active(struct kvm *kvm,
-                                  const struct kvm_memory_slot *slot, gfn_t gfn);
+bool kvm_gfn_is_write_tracked(struct kvm *kvm,
+                             const struct kvm_memory_slot *slot, gfn_t gfn);
 
 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
 int kvm_page_track_init(struct kvm *kvm);
index e71182b..05a7e61 100644 (file)
@@ -1564,7 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        }
 
        write_lock(&kvm->mmu_lock);
-       kvm_slot_page_track_add_page(kvm, slot, gfn);
+       kvm_write_track_add_gfn(kvm, slot, gfn);
        write_unlock(&kvm->mmu_lock);
 
        srcu_read_unlock(&kvm->srcu, idx);
@@ -1593,7 +1593,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        }
 
        write_lock(&kvm->mmu_lock);
-       kvm_slot_page_track_remove_page(kvm, slot, gfn);
+       kvm_write_track_remove_gfn(kvm, slot, gfn);
        write_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, idx);