drm/i915/gvt: use atomic operations to change the vGPU status
authorZhi Wang <zhi.a.wang@intel.com>
Thu, 10 Nov 2022 12:20:34 +0000 (12:20 +0000)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Wed, 4 Jan 2023 15:21:19 +0000 (23:21 +0800)
Several vGPU status are used to decide the availability of GVT-g core
logics when creating a vGPU. Use atomic operations on changing the vGPU
status to avoid the racing.

Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: intel-gvt-dev@lists.freedesktop.org
Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20221110122034.3382-2-zhi.a.wang@intel.com
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c

index e08ed0e9f1653e9b88690ba6319262686a9a7419..0616b73175f3e929e36682745c72e73c5b561da0 100644 (file)
@@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
                        vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
                        "0x%llx\n");
 
+static int vgpu_status_get(void *data, u64 *val)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+       *val = 0;
+
+       if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+               *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+       if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+               *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
+
 /**
  * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
  * @vgpu: a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
        snprintf(name, 16, "vgpu%d", vgpu->id);
        vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
 
-       debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
        debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
                            &vgpu_mmio_diff_fops);
        debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
                            &vgpu_scan_nonprivbb_fops);
+       debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
+                           &vgpu_status_fops);
 }
 
 /**
index 355f1c0e86641730b09a2851b8172325087b360d..ffe41e9be04fc091f7a984e614f0df58f408666b 100644 (file)
@@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
        struct list_head *pos;
        struct intel_vgpu_dmabuf_obj *dmabuf_obj;
 
-       if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+       if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+           !list_empty(&vgpu->dmabuf_obj_list_head)) {
                list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
                        dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
                        if (dmabuf_obj == obj) {
index 51e5e8fb505bccac4627139f10ff4565f93f11f4..6b4039010caee169495f9c113a04645df02f92d6 100644 (file)
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
        int idx;
        bool ret;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return false;
 
        idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
        if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
                return 0;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -EINVAL;
        pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
        if (is_error_noslot_pfn(pfn))
index 62823c0e13ab83a8a2a31b2b76d6dbf6300f1c90..2d65800d8e93b9b288fda886c35448394f99506a 100644 (file)
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
 
 #define KVMGT_DEBUGFS_FILENAME         "kvmgt_nr_cache_entries"
 
+enum {
+       INTEL_VGPU_STATUS_ATTACHED = 0,
+       INTEL_VGPU_STATUS_ACTIVE,
+       INTEL_VGPU_STATUS_NR_BITS,
+};
+
 struct intel_vgpu {
        struct vfio_device vfio_device;
        struct intel_gvt *gvt;
        struct mutex vgpu_lock;
        int id;
-       bool active;
-       bool attached;
+       DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
        bool pv_notified;
        bool failsafe;
        unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 
 #define for_each_active_vgpu(gvt, vgpu, id) \
        idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
-               for_each_if(vgpu->active)
+               for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
 
 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
                                            u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
 static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
                void *buf, unsigned long len)
 {
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
 }
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
 static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
                unsigned long gpa, void *buf, unsigned long len)
 {
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
 }
index a6b2021b665ffdd40574e4352ebad70f267e756d..68eca023bbc68b4cb06bd3ded782d078c842a5e1 100644 (file)
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
         * enabled by guest. so if msi_trigger is null, success is still
         * returned and don't inject interrupt into guest.
         */
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
                return -EFAULT;
index f5451adcd4890c08cb121f1e1d00d58ce64d010f..8ae7039b3683257d73a38cc1ee72299aea6b7c4d 100644 (file)
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
 
        mutex_lock(&vgpu->gvt->lock);
        for_each_active_vgpu(vgpu->gvt, itr, id) {
-               if (!itr->attached)
+               if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
                        continue;
 
                if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
 {
        struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
 
-       if (vgpu->attached)
-               return -EEXIST;
-
        if (!vgpu->vfio_device.kvm ||
            vgpu->vfio_device.kvm->mm != current->mm) {
                gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
        if (__kvmgt_vgpu_exist(vgpu))
                return -EEXIST;
 
-       vgpu->attached = true;
-
        vgpu->track_node.track_write = kvmgt_page_track_write;
        vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
        kvm_get_kvm(vgpu->vfio_device.kvm);
        kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
                                         &vgpu->track_node);
 
+       set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
        debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
                             &vgpu->nr_cache_entries);
 
@@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
 {
        struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
 
-       if (!vgpu->attached)
-               return;
-
        intel_gvt_release_vgpu(vgpu);
 
+       clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
        debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
 
        kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
        vgpu->dma_addr_cache = RB_ROOT;
 
        intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
-       vgpu->attached = false;
 }
 
 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
 {
        struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
 
-       if (WARN_ON_ONCE(vgpu->attached))
-               return;
-
        vfio_unregister_group_dev(&vgpu->vfio_device);
        vfio_put_device(&vgpu->vfio_device);
 }
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        struct kvm_memory_slot *slot;
        int idx;
 
-       if (!info->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
                return -ESRCH;
 
        idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        struct kvm_memory_slot *slot;
        int idx;
 
-       if (!info->attached)
-               return 0;
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+               return -ESRCH;
 
        idx = srcu_read_lock(&kvm->srcu);
        slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
        struct gvt_dma *entry;
        int ret;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -EINVAL;
 
        mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
        struct gvt_dma *entry;
        int ret = 0;
 
-       if (!vgpu->attached)
-               return -ENODEV;
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+               return -EINVAL;
 
        mutex_lock(&vgpu->cache_lock);
        entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
 {
        struct gvt_dma *entry;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return;
 
        mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
        idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
                if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
                                       (void *)&gvt->service_request)) {
-                       if (vgpu->active)
+                       if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
                                intel_vgpu_emulate_vblank(vgpu);
                }
        }
index 8009239935f7bb2261069d658415f908984e511f..f4055804aad1feb470422a5f808454359390e771 100644 (file)
@@ -866,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
                goto out;
        }
 
-       if (!scheduler->current_vgpu->active ||
+       if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+                     scheduler->current_vgpu->status) ||
            list_empty(workload_q_head(scheduler->current_vgpu, engine)))
                goto out;
 
index 3c529c2705ddcaf4c796d776c42e135ecebe34f4..a5497440484f129c1525c8e4ceb137eabcf3bb9c 100644 (file)
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
  */
 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
 {
-       mutex_lock(&vgpu->vgpu_lock);
-       vgpu->active = true;
-       mutex_unlock(&vgpu->vgpu_lock);
+       set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
 }
 
 /**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
        mutex_lock(&vgpu->vgpu_lock);
 
-       vgpu->active = false;
+       clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
 
        if (atomic_read(&vgpu->submission.running_workload_num)) {
                mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        struct intel_gvt *gvt = vgpu->gvt;
        struct drm_i915_private *i915 = gvt->gt->i915;
 
-       drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+       drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+                "vGPU is still active!\n");
 
        /*
         * remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
        if (ret)
                goto out_free_vgpu;
 
-       vgpu->active = false;
-
+       clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
        return vgpu;
 
 out_free_vgpu: