drm/amdgpu: drop the fence argument from amdgpu_vmid_grab
authorChristian König <christian.koenig@amd.com>
Wed, 28 Sep 2022 11:21:05 +0000 (13:21 +0200)
committerChristian König <christian.koenig@amd.com>
Thu, 3 Nov 2022 11:45:19 +0000 (12:45 +0100)
This is always the job anyway.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-5-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index 03d115d2b5edd25a65fb1b5dcfec0306bde382f3..b76294d4275bc08feccd76a00ce19c0130f2bbea 100644 (file)
@@ -244,7 +244,6 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
  * @job: job who wants to use the VMID
  * @id: resulting VMID
  *
@@ -253,7 +252,6 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
                                     struct amdgpu_ring *ring,
                                     struct amdgpu_sync *sync,
-                                    struct dma_fence *fence,
                                     struct amdgpu_job *job,
                                     struct amdgpu_vmid **id)
 {
@@ -290,7 +288,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
        /* Good we can use this VMID. Remember this submission as
        * user of the VMID.
        */
-       r = amdgpu_sync_fence(&(*id)->active, fence);
+       r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
        if (r)
                return r;
 
@@ -305,7 +303,6 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
  * @job: job who wants to use the VMID
  * @id: resulting VMID
  *
@@ -314,7 +311,6 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                                 struct amdgpu_ring *ring,
                                 struct amdgpu_sync *sync,
-                                struct dma_fence *fence,
                                 struct amdgpu_job *job,
                                 struct amdgpu_vmid **id)
 {
@@ -352,7 +348,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                /* Good, we can use this VMID. Remember this submission as
                 * user of the VMID.
                 */
-               r = amdgpu_sync_fence(&(*id)->active, fence);
+               r = amdgpu_sync_fence(&(*id)->active,
+                                     &job->base.s_fence->finished);
                if (r)
                        return r;
 
@@ -371,14 +368,12 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
  * @job: job who wants to use the VMID
  *
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-                    struct amdgpu_sync *sync, struct dma_fence *fence,
-                    struct amdgpu_job *job)
+                    struct amdgpu_sync *sync, struct amdgpu_job *job)
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
@@ -393,11 +388,11 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                goto error;
 
        if (vm->reserved_vmid[vmhub]) {
-               r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+               r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
                if (r || !id)
                        goto error;
        } else {
-               r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+               r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
                if (r)
                        goto error;
 
@@ -406,7 +401,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                        id = idle;
 
                        /* Remember this submission as user of the VMID */
-                       r = amdgpu_sync_fence(&id->active, fence);
+                       r = amdgpu_sync_fence(&id->active,
+                                             &job->base.s_fence->finished);
                        if (r)
                                goto error;
 
index 06c8a0034fa5229b183c545254daba29fdc1210f..1b1e7d04655c0824363b0857aa562ccca19a45e1 100644 (file)
@@ -84,8 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               unsigned vmhub);
 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-                    struct amdgpu_sync *sync, struct dma_fence *fence,
-                    struct amdgpu_job *job);
+                    struct amdgpu_sync *sync, struct amdgpu_job *job);
 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
                       unsigned vmid);
 void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
index 46c99331d7f126a98d31631f1f03fac89c89d048..5aa053acc0b4f772447891fadd8a771c20cfce93 100644 (file)
@@ -256,9 +256,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
        }
 
        while (fence == NULL && vm && !job->vmid) {
-               r = amdgpu_vmid_grab(vm, ring, &job->sync,
-                                    &job->base.s_fence->finished,
-                                    job);
+               r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
                if (r)
                        DRM_ERROR("Error getting VM ID (%d)\n", r);