drm/amdgpu: use kernel fence for last_pt_update
authorChunming Zhou <david1.zhou@amd.com>
Mon, 3 Aug 2015 10:19:38 +0000 (18:19 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:51 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 5b8e1ae..371ff08 100644 (file)
@@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping {
 struct amdgpu_bo_va {
        /* protected by bo being reserved */
        struct list_head                bo_list;
-       struct amdgpu_fence             *last_pt_update;
+       struct fence                    *last_pt_update;
        unsigned                        ref_count;
 
        /* protected by vm mutex and spinlock */
@@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param {
                struct amdgpu_vm *vm;
                uint64_t start;
                uint64_t last;
-               struct amdgpu_fence **fence;
+               struct fence **fence;
 
        } vm_mapping;
        struct {
index fe81b46..aee5911 100644 (file)
@@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (r)
                                return r;
 
-                       f = &bo_va->last_pt_update->base;
+                       f = bo_va->last_pt_update;
                        r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
                        if (r)
                                return r;
index 8745d4c..d90254f 100644 (file)
@@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
                                uint64_t start, uint64_t end,
-                               struct amdgpu_fence *fence)
+                               struct fence *fence)
 {
        unsigned i;
 
@@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
        end >>= amdgpu_vm_block_size;
 
        for (i = start; i <= end; ++i)
-               amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
+               amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
 static int amdgpu_vm_bo_update_mapping_run_job(
        struct amdgpu_cs_parser *sched_job)
 {
-       struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
+       struct fence **fence = sched_job->job_param.vm_mapping.fence;
        amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
                            sched_job->job_param.vm_mapping.start,
                            sched_job->job_param.vm_mapping.last + 1,
-                           sched_job->ibs[sched_job->num_ibs -1].fence);
+                           &sched_job->ibs[sched_job->num_ibs -1].fence->base);
        if (fence) {
-               amdgpu_fence_unref(fence);
-               *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
+               fence_put(*fence);
+               *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
        }
        return 0;
 }
@@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct amdgpu_vm *vm,
                                       struct amdgpu_bo_va_mapping *mapping,
                                       uint64_t addr, uint32_t gtt_flags,
-                                      struct amdgpu_fence **fence)
+                                      struct fence **fence)
 {
        struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        unsigned nptes, ncmds, ndw;
@@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                }
 
                amdgpu_vm_fence_pts(vm, mapping->it.start,
-                                   mapping->it.last + 1, ib->fence);
+                                   mapping->it.last + 1, &ib->fence->base);
                if (fence) {
-                       amdgpu_fence_unref(fence);
-                       *fence = amdgpu_fence_ref(ib->fence);
+                       fence_put(*fence);
+                       *fence = fence_get(&ib->fence->base);
                }
 
                amdgpu_ib_free(adev, ib);
@@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
        spin_unlock(&vm->status_lock);
 
        if (bo_va)
-               r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+               r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
 
        return r;
 }
@@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                kfree(mapping);
        }
 
-       amdgpu_fence_unref(&bo_va->last_pt_update);
+       fence_put(bo_va->last_pt_update);
        kfree(bo_va);
 
        mutex_unlock(&vm->mutex);