Merge tag 'drm-misc-next-2023-07-13' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 143d11a..d0e6009 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_drv.h>
 #include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
@@ -339,25 +340,20 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 }
 
 /**
- * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ * amdgpu_vm_lock_pd - lock PD in drm_exec
  *
  * @vm: vm providing the BOs
- * @validated: head of validation list
- * @entry: entry to add
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
  *
- * Add the page directory to the list of BOs to
- * validate for command submission.
+ * Lock the VM root PD in the DRM execution context.
  */
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
-                        struct list_head *validated,
-                        struct amdgpu_bo_list_entry *entry)
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+                     unsigned int num_fences)
 {
-       entry->priority = 0;
-       entry->tv.bo = &vm->root.bo->tbo;
-       /* Two for VM updates, one for TTM and one for the CS job */
-       entry->tv.num_shared = 4;
-       entry->user_pages = NULL;
-       list_add(&entry->tv.head, validated);
+       /* We need at least two fences for the VM PD/PT updates */
+       return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
+                                   2 + num_fences);
 }
 
 /**
@@ -1771,18 +1767,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 
        /* Insert partial mapping before the range */
        if (!list_empty(&before->list)) {
+               struct amdgpu_bo *bo = before->bo_va->base.bo;
+
                amdgpu_vm_it_insert(before, &vm->va);
                if (before->flags & AMDGPU_PTE_PRT)
                        amdgpu_vm_prt_get(adev);
+
+               if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+                   !before->bo_va->base.moved)
+                       amdgpu_vm_bo_moved(&before->bo_va->base);
        } else {
                kfree(before);
        }
 
        /* Insert partial mapping after the range */
        if (!list_empty(&after->list)) {
+               struct amdgpu_bo *bo = after->bo_va->base.bo;
+
                amdgpu_vm_it_insert(after, &vm->va);
                if (after->flags & AMDGPU_PTE_PRT)
                        amdgpu_vm_prt_get(adev);
+
+               if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+                   !after->bo_va->base.moved)
+                       amdgpu_vm_bo_moved(&after->bo_va->base);
        } else {
                kfree(after);
        }
@@ -2233,16 +2241,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        if (r)
                return r;
 
-       /* Sanity checks */
-       if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
-               r = -EINVAL;
-               goto unreserve_bo;
-       }
-
        /* Check if PD needs to be reinitialized and do it before
         * changing any other state, in case it fails.
         */
        if (pte_support_ats != vm->pte_support_ats) {
+               /* Sanity checks */
+               if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
+                       r = -EINVAL;
+                       goto unreserve_bo;
+               }
+
                vm->pte_support_ats = pte_support_ats;
                r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
                                       false);