drm/amdgpu: reserve the PD during unmap and remove
authorChristian König <christian.koenig@amd.com>
Tue, 8 Mar 2016 16:47:46 +0000 (17:47 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 9 Mar 2016 18:04:01 +0000 (13:04 -0500)
We not only need to protect the mapping tree and freed list itself,
but also the items on those list.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

index cb27754..7f6b4d9 100644 (file)
@@ -140,25 +140,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
 void amdgpu_gem_object_close(struct drm_gem_object *obj,
                             struct drm_file *file_priv)
 {
-       struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
-       struct amdgpu_device *adev = rbo->adev;
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = bo->adev;
        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
+
+       struct amdgpu_bo_list_entry vm_pd;
+       struct list_head list, duplicates;
+       struct ttm_validate_buffer tv;
+       struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
        int r;
-       r = amdgpu_bo_reserve(rbo, true);
+
+       INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&duplicates);
+
+       tv.bo = &bo->tbo;
+       tv.shared = true;
+       list_add(&tv.head, &list);
+
+       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
                return;
        }
-       bo_va = amdgpu_vm_bo_find(vm, rbo);
+       bo_va = amdgpu_vm_bo_find(vm, bo);
        if (bo_va) {
                if (--bo_va->ref_count == 0) {
                        amdgpu_vm_bo_rmv(adev, bo_va);
                }
        }
-       amdgpu_bo_unreserve(rbo);
+       ttm_eu_backoff_reservation(&ticket, &list);
 }
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -580,11 +595,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        tv.shared = true;
        list_add(&tv.head, &list);
 
-       if (args->operation == AMDGPU_VA_OP_MAP) {
-               tv_pd.bo = &fpriv->vm.page_directory->tbo;
-               tv_pd.shared = true;
-               list_add(&tv_pd.head, &list);
-       }
+       tv_pd.bo = &fpriv->vm.page_directory->tbo;
+       tv_pd.shared = true;
+       list_add(&tv_pd.head, &list);
+
        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r) {
                drm_gem_object_unreference_unlocked(gobj);