drm/amdgpu: change job->ctx field name
authorMonk Liu <Monk.Liu@amd.com>
Thu, 25 Aug 2016 07:40:48 +0000 (15:40 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 12 Sep 2016 22:12:17 +0000 (18:12 -0400)
job->ctx actually is a fence_context of the entity
it belongs to, naming it as ctx is too vague, and
we'll need add amdgpu_ctx into the job structure
later.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index 39baabe..10ec29c 100644 (file)
@@ -1241,7 +1241,7 @@ struct amdgpu_job {
        struct fence            *fence; /* the hw fence */
        uint32_t                num_ibs;
        void                    *owner;
-       uint64_t                ctx;
+       uint64_t                fence_ctx; /* the fence_context this job uses */
        bool                    vm_needs_flush;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
index e29e7b9..56bde64 100644 (file)
@@ -989,7 +989,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        job->owner = p->filp;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        p->fence = fence_get(&job->base.s_fence->finished);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
index 11f2fba..04263f0 100644 (file)
@@ -124,7 +124,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       uint64_t ctx;
+       uint64_t fence_ctx;
 
        unsigned i;
        int r = 0;
@@ -135,10 +135,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* ring tests don't use a job */
        if (job) {
                vm = job->vm;
-               ctx = job->ctx;
+               fence_ctx = job->fence_ctx;
        } else {
                vm = NULL;
-               ctx = 0;
+               fence_ctx = 0;
        }
 
        if (!ring->ready) {
@@ -174,8 +174,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* always set cond_exec_polling to CONTINUE */
        *ring->cond_exe_cpu_addr = 1;
 
-       skip_preamble = ring->current_ctx == ctx;
-       need_ctx_switch = ring->current_ctx != ctx;
+       skip_preamble = ring->current_ctx == fence_ctx;
+       need_ctx_switch = ring->current_ctx != fence_ctx;
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
@@ -209,7 +209,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-       ring->current_ctx = ctx;
+       ring->current_ctx = fence_ctx;
        if (ring->funcs->emit_switch_buffer)
                amdgpu_ring_emit_switch_buffer(ring);
        amdgpu_ring_commit(ring);
index 6674d40..ac8d401 100644 (file)
@@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                return r;
 
        job->owner = owner;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        *f = fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);