drm/amdgpu: cleanup coding style in amdgpu_vm_flush
authorChristian König <christian.koenig@amd.com>
Mon, 3 Apr 2017 12:16:07 +0000 (14:16 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 5 Apr 2017 03:34:09 +0000 (23:34 -0400)
Abort early if there is nothing todo and correctly indent the "if"s.

Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 5bedba3..99889fb 100644 (file)
@@ -597,60 +597,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
                id->gws_size != job->gws_size ||
                id->oa_base != job->oa_base ||
                id->oa_size != job->oa_size);
+       unsigned patch_offset = 0;
        int r;
 
-       if (job->vm_needs_flush || gds_switch_needed ||
-               amdgpu_vm_had_gpu_reset(adev, id) ||
-               amdgpu_vm_ring_has_compute_vm_bug(ring)) {
-               unsigned patch_offset = 0;
+       if (!job->vm_needs_flush && !gds_switch_needed &&
+           !amdgpu_vm_had_gpu_reset(adev, id) &&
+           !amdgpu_vm_ring_has_compute_vm_bug(ring))
+               return 0;
 
-               if (ring->funcs->init_cond_exec)
-                       patch_offset = amdgpu_ring_init_cond_exec(ring);
 
-               if (ring->funcs->emit_pipeline_sync &&
-                       (job->vm_needs_flush || gds_switch_needed ||
-                       amdgpu_vm_ring_has_compute_vm_bug(ring)))
-                       amdgpu_ring_emit_pipeline_sync(ring);
+       if (ring->funcs->init_cond_exec)
+               patch_offset = amdgpu_ring_init_cond_exec(ring);
 
-               if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
-                       amdgpu_vm_had_gpu_reset(adev, id))) {
-                       struct dma_fence *fence;
-                       u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+       if (ring->funcs->emit_pipeline_sync &&
+           (job->vm_needs_flush || gds_switch_needed ||
+            amdgpu_vm_ring_has_compute_vm_bug(ring)))
+               amdgpu_ring_emit_pipeline_sync(ring);
 
-                       trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
-                       amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+       if (ring->funcs->emit_vm_flush &&
+           (job->vm_needs_flush || amdgpu_vm_had_gpu_reset(adev, id))) {
 
-                       r = amdgpu_fence_emit(ring, &fence);
-                       if (r)
-                               return r;
+               u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+               struct dma_fence *fence;
 
-                       mutex_lock(&adev->vm_manager.lock);
-                       dma_fence_put(id->last_flush);
-                       id->last_flush = fence;
-                       mutex_unlock(&adev->vm_manager.lock);
-               }
+               trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+               amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
 
-               if (gds_switch_needed) {
-                       id->gds_base = job->gds_base;
-                       id->gds_size = job->gds_size;
-                       id->gws_base = job->gws_base;
-                       id->gws_size = job->gws_size;
-                       id->oa_base = job->oa_base;
-                       id->oa_size = job->oa_size;
-                       amdgpu_ring_emit_gds_switch(ring, job->vm_id,
-                                                       job->gds_base, job->gds_size,
-                                                       job->gws_base, job->gws_size,
-                                                       job->oa_base, job->oa_size);
-               }
+               r = amdgpu_fence_emit(ring, &fence);
+               if (r)
+                       return r;
 
-               if (ring->funcs->patch_cond_exec)
-                       amdgpu_ring_patch_cond_exec(ring, patch_offset);
+               mutex_lock(&adev->vm_manager.lock);
+               dma_fence_put(id->last_flush);
+               id->last_flush = fence;
+               mutex_unlock(&adev->vm_manager.lock);
+       }
 
-               /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
-               if (ring->funcs->emit_switch_buffer) {
-                       amdgpu_ring_emit_switch_buffer(ring);
-                       amdgpu_ring_emit_switch_buffer(ring);
-               }
+       if (gds_switch_needed) {
+               id->gds_base = job->gds_base;
+               id->gds_size = job->gds_size;
+               id->gws_base = job->gws_base;
+               id->gws_size = job->gws_size;
+               id->oa_base = job->oa_base;
+               id->oa_size = job->oa_size;
+               amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+                                           job->gds_size, job->gws_base,
+                                           job->gws_size, job->oa_base,
+                                           job->oa_size);
+       }
+
+       if (ring->funcs->patch_cond_exec)
+               amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+       /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+       if (ring->funcs->emit_switch_buffer) {
+               amdgpu_ring_emit_switch_buffer(ring);
+               amdgpu_ring_emit_switch_buffer(ring);
        }
        return 0;
 }