drm/amdgpu: get rid of incorrect TDR
authorMonk Liu <Monk.Liu@amd.com>
Fri, 4 Mar 2016 06:42:26 +0000 (14:42 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 2 May 2016 19:19:42 +0000 (15:19 -0400)
original time out detect routine is incorrect, cuz it measures
the gap from job scheduled, but we should only measure the
gap from processed by hw.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index af846f2..9a9fffd 100644 (file)
@@ -418,46 +418,18 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
        amd_sched_fence_signal(s_fence);
-       if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-               cancel_delayed_work(&s_fence->dwork);
-               spin_lock_irqsave(&sched->fence_list_lock, flags);
-               list_del_init(&s_fence->list);
-               spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-       }
+
        trace_amd_sched_process_job(s_fence);
        fence_put(&s_fence->base);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
-static void amd_sched_fence_work_func(struct work_struct *work)
-{
-       struct amd_sched_fence *s_fence =
-               container_of(work, struct amd_sched_fence, dwork.work);
-       struct amd_gpu_scheduler *sched = s_fence->sched;
-       struct amd_sched_fence *entity, *tmp;
-       unsigned long flags;
-
-       DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
-
-       /* Clean all pending fences */
-       spin_lock_irqsave(&sched->fence_list_lock, flags);
-       list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
-               DRM_ERROR("  fence no %d\n", entity->base.seqno);
-               cancel_delayed_work(&entity->dwork);
-               list_del_init(&entity->list);
-               fence_put(&entity->base);
-       }
-       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-}
-
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
        struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
        int r, count;
 
-       spin_lock_init(&sched->fence_list_lock);
-       INIT_LIST_HEAD(&sched->fence_list);
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
@@ -465,7 +437,6 @@ static int amd_sched_main(void *param)
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
-               unsigned long flags;
 
                wait_event_interruptible(sched->wake_up_worker,
                        (entity = amd_sched_select_entity(sched)) ||
@@ -480,14 +451,6 @@ static int amd_sched_main(void *param)
 
                s_fence = sched_job->s_fence;
 
-               if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-                       INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
-                       schedule_delayed_work(&s_fence->dwork, sched->timeout);
-                       spin_lock_irqsave(&sched->fence_list_lock, flags);
-                       list_add_tail(&s_fence->list, &sched->fence_list);
-                       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-               }
-
                atomic_inc(&sched->hw_rq_count);
                amd_sched_job_pre_schedule(sched, sched_job);
                fence = sched->ops->run_job(sched_job);
index 2e3b830..b26148d 100644 (file)
@@ -74,8 +74,6 @@ struct amd_sched_fence {
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
-       struct delayed_work             dwork;
-       struct list_head                list;
        struct amd_sched_job    *s_job;
 };
 
@@ -127,8 +125,6 @@ struct amd_gpu_scheduler {
        wait_queue_head_t               wake_up_worker;
        wait_queue_head_t               job_scheduled;
        atomic_t                        hw_rq_count;
-       struct list_head                fence_list;
-       spinlock_t                      fence_list_lock;
        struct task_struct              *thread;
        struct list_head        ring_mirror_list;
        spinlock_t                      job_list_lock;