KVM: x86/mmu: Drop @slot param from exported/external page-track APIs
authorSean Christopherson <seanjc@google.com>
Sat, 29 Jul 2023 01:35:33 +0000 (18:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 18:08:18 +0000 (14:08 -0400)
Refactor KVM's exported/external page-track, a.k.a. write-track, APIs
to take only the gfn and do the required memslot lookup in KVM proper.
Forcing users of the APIs to get the memslot unnecessarily bleeds
KVM internals into KVMGT and complicates usage of the APIs.

No functional change intended.

Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Link: https://lore.kernel.org/r/20230729013535.1070024-28-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_page_track.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/page_track.h
drivers/gpu/drm/i915/gvt/kvmgt.c

index f5c1db36cdb752f1ac68bb240443c81ee1e69220..4afab697e21ca8e1c46b65e3583b236ea85e227b 100644 (file)
@@ -4,11 +4,6 @@
 
 #include <linux/kvm_types.h>
 
-void kvm_write_track_add_gfn(struct kvm *kvm,
-                            struct kvm_memory_slot *slot, gfn_t gfn);
-void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                               gfn_t gfn);
-
 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
 /*
  * The notifier represented by @kvm_page_track_notifier_node is linked into
@@ -55,6 +50,8 @@ kvm_page_track_register_notifier(struct kvm *kvm,
 void
 kvm_page_track_unregister_notifier(struct kvm *kvm,
                                   struct kvm_page_track_notifier_node *n);
+int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
+int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
 #else
 /*
  * Allow defining a node in a structure even if page tracking is disabled, e.g.
index cc9a1e627d7ea29998a0301408329a2ada9bc489..060849d6aadf70d8f0ffe6dab5f9ac7e6268e2c4 100644 (file)
@@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 
        /* the non-leaf shadow pages are keeping readonly. */
        if (sp->role.level > PG_LEVEL_4K)
-               return kvm_write_track_add_gfn(kvm, slot, gfn);
+               return __kvm_write_track_add_gfn(kvm, slot, gfn);
 
        kvm_mmu_gfn_disallow_lpage(slot, gfn);
 
@@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
        slot = __gfn_to_memslot(slots, gfn);
        if (sp->role.level > PG_LEVEL_4K)
-               return kvm_write_track_remove_gfn(kvm, slot, gfn);
+               return __kvm_write_track_remove_gfn(kvm, slot, gfn);
 
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
index 9bf01311ee9cc383c471b7a5ec9736212880143b..c9ab8b587d25f93f72074c8a02bdaeb40a327f8f 100644 (file)
@@ -74,16 +74,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn,
        slot->arch.gfn_write_track[index] += count;
 }
 
-/*
- * add guest page to the tracking pool so that corresponding access on that
- * page will be intercepted.
- *
- * @kvm: the guest instance we are interested in.
- * @slot: the @gfn belongs to.
- * @gfn: the guest page.
- */
-void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                            gfn_t gfn)
+void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                              gfn_t gfn)
 {
        lockdep_assert_held_write(&kvm->mmu_lock);
 
@@ -104,18 +96,9 @@ void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
        if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
                kvm_flush_remote_tlbs(kvm);
 }
-EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);
 
-/*
- * remove the guest page from the tracking pool which stops the interception
- * of corresponding access on that page.
- *
- * @kvm: the guest instance we are interested in.
- * @slot: the @gfn belongs to.
- * @gfn: the guest page.
- */
-void kvm_write_track_remove_gfn(struct kvm *kvm,
-                               struct kvm_memory_slot *slot, gfn_t gfn)
+void __kvm_write_track_remove_gfn(struct kvm *kvm,
+                                 struct kvm_memory_slot *slot, gfn_t gfn)
 {
        lockdep_assert_held_write(&kvm->mmu_lock);
 
@@ -133,7 +116,6 @@ void kvm_write_track_remove_gfn(struct kvm *kvm,
         */
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
-EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);
 
 /*
  * check if the corresponding access on the specified guest page is tracked.
@@ -257,4 +239,63 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
        srcu_read_unlock(&head->track_srcu, idx);
 }
 
+/*
+ * add guest page to the tracking pool so that corresponding access on that
+ * page will be intercepted.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ */
+int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+
+       slot = gfn_to_memslot(kvm, gfn);
+       if (!slot) {
+               srcu_read_unlock(&kvm->srcu, idx);
+               return -EINVAL;
+       }
+
+       write_lock(&kvm->mmu_lock);
+       __kvm_write_track_add_gfn(kvm, slot, gfn);
+       write_unlock(&kvm->mmu_lock);
+
+       srcu_read_unlock(&kvm->srcu, idx);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);
+
+/*
+ * remove the guest page from the tracking pool which stops the interception
+ * of corresponding access on that page.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ */
+int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+
+       slot = gfn_to_memslot(kvm, gfn);
+       if (!slot) {
+               srcu_read_unlock(&kvm->srcu, idx);
+               return -EINVAL;
+       }
+
+       write_lock(&kvm->mmu_lock);
+       __kvm_write_track_remove_gfn(kvm, slot, gfn);
+       write_unlock(&kvm->mmu_lock);
+
+       srcu_read_unlock(&kvm->srcu, idx);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);
 #endif
index b020f998ee2c6eb3a855b7a1baf25c26fa4923da..d4d72ed999b1dbbf4b3384c127adf1258e689e48 100644 (file)
@@ -15,6 +15,11 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
                                  struct kvm_memory_slot *slot,
                                  unsigned long npages);
 
+void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                              gfn_t gfn);
+void __kvm_write_track_remove_gfn(struct kvm *kvm,
+                                 struct kvm_memory_slot *slot, gfn_t gfn);
+
 bool kvm_gfn_is_write_tracked(struct kvm *kvm,
                              const struct kvm_memory_slot *slot, gfn_t gfn);
 
index 05a7e614ead096958b2447303192a7bfef6374e9..21342a93e4184c716342230633050e6114cc822e 100644 (file)
@@ -1546,9 +1546,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
 
 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
 {
-       struct kvm *kvm = info->vfio_device.kvm;
-       struct kvm_memory_slot *slot;
-       int idx;
+       int r;
 
        if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
                return -ESRCH;
@@ -1556,18 +1554,9 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        if (kvmgt_gfn_is_write_protected(info, gfn))
                return 0;
 
-       idx = srcu_read_lock(&kvm->srcu);
-       slot = gfn_to_memslot(kvm, gfn);
-       if (!slot) {
-               srcu_read_unlock(&kvm->srcu, idx);
-               return -EINVAL;
-       }
-
-       write_lock(&kvm->mmu_lock);
-       kvm_write_track_add_gfn(kvm, slot, gfn);
-       write_unlock(&kvm->mmu_lock);
-
-       srcu_read_unlock(&kvm->srcu, idx);
+       r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
+       if (r)
+               return r;
 
        kvmgt_protect_table_add(info, gfn);
        return 0;
@@ -1575,9 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
 
 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
 {
-       struct kvm *kvm = info->vfio_device.kvm;
-       struct kvm_memory_slot *slot;
-       int idx;
+       int r;
 
        if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
                return -ESRCH;
@@ -1585,17 +1572,9 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        if (!kvmgt_gfn_is_write_protected(info, gfn))
                return 0;
 
-       idx = srcu_read_lock(&kvm->srcu);
-       slot = gfn_to_memslot(kvm, gfn);
-       if (!slot) {
-               srcu_read_unlock(&kvm->srcu, idx);
-               return -EINVAL;
-       }
-
-       write_lock(&kvm->mmu_lock);
-       kvm_write_track_remove_gfn(kvm, slot, gfn);
-       write_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
+       r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
+       if (r)
+               return r;
 
        kvmgt_protect_table_del(info, gfn);
        return 0;