drm/amdgpu: use drm_exec for MES testing
authorChristian König <christian.koenig@amd.com>
Tue, 16 Aug 2022 13:32:30 +0000 (15:32 +0200)
committerChristian König <christian.koenig@amd.com>
Wed, 12 Jul 2023 12:14:44 +0000 (14:14 +0200)
Start using the new component here as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-6-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c

index e9091eb..3e3dadd 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/firmware.h>
+#include <drm/drm_exec.h>
 
 #include "amdgpu_mes.h"
 #include "amdgpu.h"
@@ -1168,34 +1169,31 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
                                 struct amdgpu_mes_ctx_data *ctx_data)
 {
        struct amdgpu_bo_va *bo_va;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
        struct amdgpu_sync sync;
+       struct drm_exec exec;
        int r;
 
        amdgpu_sync_create(&sync);
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
 
-       csa_tv.bo = &ctx_data->meta_data_obj->tbo;
-       csa_tv.num_shared = 1;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
-               return r;
+       drm_exec_init(&exec, 0);
+       drm_exec_until_all_locked(&exec) {
+               r = drm_exec_lock_obj(&exec,
+                                     &ctx_data->meta_data_obj->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error_fini_exec;
+
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error_fini_exec;
        }
 
        bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
        if (!bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for meta data BO\n");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto error_fini_exec;
        }
 
        r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
@@ -1205,33 +1203,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
 
        if (r) {
                DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
-               goto error;
+               goto error_del_bo_va;
        }
 
        r = amdgpu_vm_bo_update(adev, bo_va, false);
        if (r) {
                DRM_ERROR("failed to do vm_bo_update on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, bo_va->last_pt_update);
 
        r = amdgpu_vm_update_pdes(adev, vm, false);
        if (r) {
                DRM_ERROR("failed to update pdes on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, vm->last_update);
 
        amdgpu_sync_wait(&sync, false);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        amdgpu_sync_free(&sync);
        ctx_data->meta_data_va = bo_va;
        return 0;
 
-error:
+error_del_bo_va:
        amdgpu_vm_bo_del(adev, bo_va);
-       ttm_eu_backoff_reservation(&ticket, &list);
+
+error_fini_exec:
+       drm_exec_fini(&exec);
        amdgpu_sync_free(&sync);
        return r;
 }
@@ -1242,34 +1242,30 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
        struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
        struct amdgpu_bo *bo = ctx_data->meta_data_obj;
        struct amdgpu_vm *vm = bo_va->base.vm;
-       struct amdgpu_bo_list_entry vm_pd;
-       struct list_head list, duplicates;
-       struct dma_fence *fence = NULL;
-       struct ttm_validate_buffer tv;
-       struct ww_acquire_ctx ticket;
-       long r = 0;
-
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&duplicates);
-
-       tv.bo = &bo->tbo;
-       tv.num_shared = 2;
-       list_add(&tv.head, &list);
-
-       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
-       if (r) {
-               dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%ld)\n", r);
-               return r;
+       struct dma_fence *fence;
+       struct drm_exec exec;
+       long r;
+
+       drm_exec_init(&exec, 0);
+       drm_exec_until_all_locked(&exec) {
+               r = drm_exec_lock_obj(&exec,
+                                     &ctx_data->meta_data_obj->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
+
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
        }
 
        amdgpu_vm_bo_del(adev, bo_va);
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
 
-       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+                                  &fence);
        if (r)
                goto out_unlock;
        if (fence) {
@@ -1288,7 +1284,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
 out_unlock:
        if (unlikely(r < 0))
                dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        return r;
 }