drm/amdgpu: use xarray for storing pasid in vm
authorNirmoy Das <nirmoy.das@amd.com>
Mon, 28 Jun 2021 21:29:39 +0000 (23:29 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 8 Jul 2021 19:12:07 +0000 (15:12 -0400)
Replace idr with xarray as we actually need hash functionality.
Cleanup code related to vm pasid by adding helper function.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 79cfa2d..9ea527e 100644 (file)
@@ -88,6 +88,46 @@ struct amdgpu_prt_cb {
        struct dma_fence_cb cb;
 };
 
+/**
+ * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm pointer
+ * @pasid: the pasid the VM is using on this GPU
+ *
+ * Set the pasid this VM is using on this GPU, can also be used to remove the
+ * pasid by passing in zero.
+ *
+ */
+int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                       u32 pasid)
+{
+       int r;
+
+       if (vm->pasid == pasid)
+               return 0;
+
+       if (vm->pasid) {
+               r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
+               if (r < 0)
+                       return r;
+
+               vm->pasid = 0;
+       }
+
+       if (pasid) {
+               r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
+                                       GFP_KERNEL));
+               if (r < 0)
+                       return r;
+
+               vm->pasid = pasid;
+       }
+
+
+       return 0;
+}
+
 /*
  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  * happens while holding this lock anywhere to prevent deadlocks when
@@ -2945,18 +2985,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
 
        amdgpu_bo_unreserve(vm->root.bo);
 
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
-                             GFP_ATOMIC);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-               if (r < 0)
-                       goto error_free_root;
-
-               vm->pasid = pasid;
-       }
+       r = amdgpu_vm_set_pasid(adev, vm, pasid);
+       if (r)
+               goto error_free_root;
 
        INIT_KFIFO(vm->faults);
 
@@ -3044,18 +3075,15 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (r)
                goto unreserve_bo;
 
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
-                             GFP_ATOMIC);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+       /* Free the original amdgpu allocated pasid,
+        * will be replaced with kfd allocated pasid.
+        */
+       if (vm->pasid)
+               amdgpu_pasid_free(vm->pasid);
 
-               if (r == -ENOSPC)
-                       goto unreserve_bo;
-               r = 0;
-       }
+       r = amdgpu_vm_set_pasid(adev, vm, pasid);
+       if (r)
+               goto unreserve_bo;
 
        /* Check if PD needs to be reinitialized and do it before
         * changing any other state, in case it fails.
@@ -3066,7 +3094,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                                       to_amdgpu_bo_vm(vm->root.bo),
                                       false);
                if (r)
-                       goto free_idr;
+                       goto free_pasid_entry;
        }
 
        /* Update VM state */
@@ -3083,7 +3111,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                r = amdgpu_bo_sync_wait(vm->root.bo,
                                        AMDGPU_FENCE_OWNER_UNDEFINED, true);
                if (r)
-                       goto free_idr;
+                       goto free_pasid_entry;
 
                vm->update_funcs = &amdgpu_vm_cpu_funcs;
        } else {
@@ -3093,36 +3121,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        vm->last_update = NULL;
        vm->is_compute_context = true;
 
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-
-               /* Free the original amdgpu allocated pasid
-                * Will be replaced with kfd allocated pasid
-                */
-               amdgpu_pasid_free(vm->pasid);
-               vm->pasid = 0;
-       }
-
        /* Free the shadow bo for compute VM */
        amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
 
-       if (pasid)
-               vm->pasid = pasid;
-
        goto unreserve_bo;
 
-free_idr:
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-       }
+free_pasid_entry:
+       amdgpu_vm_set_pasid(adev, vm, 0);
 unreserve_bo:
        amdgpu_bo_unreserve(vm->root.bo);
        return r;
@@ -3138,14 +3143,7 @@ unreserve_bo:
  */
 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-       }
-       vm->pasid = 0;
+       amdgpu_vm_set_pasid(adev, vm, 0);
        vm->is_compute_context = false;
 }
 
@@ -3169,15 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        root = amdgpu_bo_ref(vm->root.bo);
        amdgpu_bo_reserve(root, true);
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-               vm->pasid = 0;
-       }
-
+       amdgpu_vm_set_pasid(adev, vm, 0);
        dma_fence_wait(vm->last_unlocked, false);
        dma_fence_put(vm->last_unlocked);
 
@@ -3259,8 +3249,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
        adev->vm_manager.vm_update_mode = 0;
 #endif
 
-       idr_init(&adev->vm_manager.pasid_idr);
-       spin_lock_init(&adev->vm_manager.pasid_lock);
+       xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
 }
 
 /**
@@ -3272,8 +3261,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
  */
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 {
-       WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
-       idr_destroy(&adev->vm_manager.pasid_idr);
+       WARN_ON(!xa_empty(&adev->vm_manager.pasids));
+       xa_destroy(&adev->vm_manager.pasids);
 
        amdgpu_vmid_mgr_fini(adev);
 }
@@ -3342,13 +3331,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_vm *vm;
        unsigned long flags;
 
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+       xa_lock_irqsave(&adev->vm_manager.pasids, flags);
 
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm)
                *task_info = vm->task_info;
 
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
 }
 
 /**
@@ -3390,15 +3379,15 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_vm *vm;
        int r;
 
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm) {
                root = amdgpu_bo_ref(vm->root.bo);
                is_compute_context = vm->is_compute_context;
        } else {
                root = NULL;
        }
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
 
        if (!root)
                return false;
@@ -3416,11 +3405,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                goto error_unref;
 
        /* Double check that the VM still exists */
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm && vm->root.bo != root)
                vm = NULL;
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
        if (!vm)
                goto error_unlock;
 
index ddb85a8..bcbe362 100644 (file)
@@ -359,8 +359,7 @@ struct amdgpu_vm_manager {
        /* PASID to VM mapping, will be used in interrupt context to
         * look up VM of a page fault
         */
-       struct idr                              pasid_idr;
-       spinlock_t                              pasid_lock;
+       struct xarray                           pasids;
 };
 
 struct amdgpu_bo_va_mapping;
@@ -375,6 +374,9 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 
+int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                       u32 pasid);
+
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);