Revert "drm/amdgpu: return new seq_no for amd_sched_push_job"
authorChunming Zhou <david1.zhou@amd.com>
Wed, 5 Aug 2015 11:07:08 +0000 (19:07 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:51:02 +0000 (16:51 -0400)
This reverts commit d1d33da8eb86b8ca41dd9ed95738030df5267b95.

Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Conflicts:
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index aa1bc24..f72a858 100644 (file)
@@ -901,6 +901,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (amdgpu_enable_scheduler && parser->num_ibs) {
                struct amdgpu_ring * ring =
                        amdgpu_cs_parser_get_ring(adev, parser);
+               parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
+                       &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
                if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
                        r = amdgpu_cs_parser_prepare_job(parser);
                        if (r)
@@ -910,8 +912,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                parser->ring = ring;
                parser->run_job = amdgpu_cs_parser_run_job;
                parser->free_job = amdgpu_cs_parser_free_job;
-               parser->ibs[parser->num_ibs - 1].sequence =
-                                  amd_sched_push_job(ring->scheduler,
+               amd_sched_push_job(ring->scheduler,
                                   &parser->ctx->rings[ring->idx].c_entity,
                                   parser);
                cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
index 995901b..0fcf020 100644 (file)
@@ -121,6 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
 {
        int r = 0;
        if (amdgpu_enable_scheduler) {
+               uint64_t v_seq;
                struct amdgpu_cs_parser *sched_job =
                        amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
                                                ibs, 1);
@@ -128,12 +129,16 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                        return -ENOMEM;
                }
                sched_job->free_job = free_job;
-               ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler,
+               v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+               ibs[num_ibs - 1].sequence = v_seq;
+               amd_sched_push_job(ring->scheduler,
                                   &adev->kernel_ctx.rings[ring->idx].c_entity,
                                   sched_job);
                r = amd_sched_wait_emit(
                        &adev->kernel_ctx.rings[ring->idx].c_entity,
-                       ibs[num_ibs - 1].sequence, false, -1);
+                       v_seq,
+                       false,
+                       -1);
                if (r)
                        WARN(true, "emit timeout\n");
        } else
index 78713ae..9d5043c 100644 (file)
@@ -371,6 +371,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 
        if (amdgpu_enable_scheduler) {
                int r;
+               uint64_t v_seq;
                sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
                                                    &adev->kernel_ctx, ib, 1);
                if(!sched_job)
@@ -378,11 +379,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                sched_job->job_param.vm.bo = bo;
                sched_job->run_job = amdgpu_vm_run_job;
                sched_job->free_job = amdgpu_vm_free_job;
-               ib->sequence = amd_sched_push_job(ring->scheduler,
+               v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+               ib->sequence = v_seq;
+               amd_sched_push_job(ring->scheduler,
                                   &adev->kernel_ctx.rings[ring->idx].c_entity,
                                   sched_job);
                r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
-                                       ib->sequence, false, -1);
+                                       v_seq,
+                                       false,
+                                       -1);
                if (r)
                        DRM_ERROR("emit timeout\n");
 
@@ -516,6 +521,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 
                if (amdgpu_enable_scheduler) {
                        int r;
+                       uint64_t v_seq;
                        sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
                                                            &adev->kernel_ctx,
                                                            ib, 1);
@@ -524,11 +530,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                        sched_job->job_param.vm.bo = pd;
                        sched_job->run_job = amdgpu_vm_run_job;
                        sched_job->free_job = amdgpu_vm_free_job;
-                       ib->sequence = amd_sched_push_job(ring->scheduler,
+                       v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+                       ib->sequence = v_seq;
+                       amd_sched_push_job(ring->scheduler,
                                           &adev->kernel_ctx.rings[ring->idx].c_entity,
                                           sched_job);
                        r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
-                                               ib->sequence, false, -1);
+                                               v_seq,
+                                               false,
+                                               -1);
                        if (r)
                                DRM_ERROR("emit timeout\n");
                } else {
@@ -862,6 +872,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        if (amdgpu_enable_scheduler) {
                int r;
+               uint64_t v_seq;
                sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
                                                    &adev->kernel_ctx, ib, 1);
                if(!sched_job)
@@ -872,11 +883,15 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                sched_job->job_param.vm_mapping.fence = fence;
                sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
                sched_job->free_job = amdgpu_vm_free_job;
-               ib->sequence = amd_sched_push_job(ring->scheduler,
+               v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+               ib->sequence = v_seq;
+               amd_sched_push_job(ring->scheduler,
                                   &adev->kernel_ctx.rings[ring->idx].c_entity,
                                   sched_job);
                r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
-                                       ib->sequence, false, -1);
+                                       v_seq,
+                                       false,
+                                       -1);
                if (r)
                        DRM_ERROR("emit timeout\n");
        } else {
index b9aa572..1204b73 100644 (file)
@@ -289,9 +289,12 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
  * @sched      The pointer to the scheduler
  * @c_entity    The pointer to amd_context_entity
  * @job                The pointer to job required to submit
- * return the virtual sequence number
+ * return 0 if succeed. -1 if failed.
+ *        -2 indicate queue is full for this client, client should wait untill
+ *          scheduler consum some queued command.
+ *       -1 other fail.
 */
-uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
+int amd_sched_push_job(struct amd_gpu_scheduler *sched,
                       struct amd_context_entity *c_entity,
                       void *job)
 {
@@ -305,8 +308,7 @@ uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
        }
 
        wake_up_interruptible(&sched->wait_queue);
-
-       return atomic64_inc_return(&c_entity->last_queued_v_seq);
+       return 0;
 }
 
 /**
index c46d085..1a01ac4 100644 (file)
@@ -124,7 +124,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
 
 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
 
-uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
+int amd_sched_push_job(struct amd_gpu_scheduler *sched,
                       struct amd_context_entity *c_entity,
                       void *job);