Merge v6.5-rc1 into drm-misc-fixes
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / scheduler / sched_main.c
index eff0a7f..506371c 100644 (file)
  *    the hardware.
  *
  * The jobs in a entity are always scheduled in the order that they were pushed.
+ *
+ * Note that once a job was taken from the entities queue and pushed to the
+ * hardware, i.e. the pending queue, the entity must not be referenced anymore
+ * through the jobs entity pointer.
  */
 
 #include <linux/kthread.h>
@@ -258,7 +262,7 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
  *
  * Finish the job's fence and wake up the worker thread.
  */
-static void drm_sched_job_done(struct drm_sched_job *s_job)
+static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
 {
        struct drm_sched_fence *s_fence = s_job->s_fence;
        struct drm_gpu_scheduler *sched = s_fence->sched;
@@ -269,7 +273,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
        trace_drm_sched_process_job(s_fence);
 
        dma_fence_get(&s_fence->finished);
-       drm_sched_fence_finished(s_fence);
+       drm_sched_fence_finished(s_fence, result);
        dma_fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
 }
@@ -283,7 +287,7 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
 {
        struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
 
-       drm_sched_job_done(s_job);
+       drm_sched_job_done(s_job, f->error);
 }
 
 /**
@@ -534,12 +538,12 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
                        r = dma_fence_add_callback(fence, &s_job->cb,
                                                   drm_sched_job_done_cb);
                        if (r == -ENOENT)
-                               drm_sched_job_done(s_job);
+                               drm_sched_job_done(s_job, fence->error);
                        else if (r)
                                DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
                                          r);
                } else
-                       drm_sched_job_done(s_job);
+                       drm_sched_job_done(s_job, -ECANCELED);
        }
 
        if (full_recovery) {
@@ -844,27 +848,26 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
 EXPORT_SYMBOL(drm_sched_job_cleanup);
 
 /**
- * drm_sched_ready - is the scheduler ready
- *
+ * drm_sched_can_queue -- Can we queue more to the hardware?
  * @sched: scheduler instance
  *
  * Return true if we can push more jobs to the hw, otherwise false.
  */
-static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
+static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
 {
        return atomic_read(&sched->hw_rq_count) <
                sched->hw_submission_limit;
 }
 
 /**
- * drm_sched_wakeup - Wake up the scheduler when it is ready
- *
+ * drm_sched_wakeup_if_can_queue - Wake up the scheduler
  * @sched: scheduler instance
  *
+ * Wake up the scheduler if we can queue jobs.
  */
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
 {
-       if (drm_sched_ready(sched))
+       if (drm_sched_can_queue(sched))
                wake_up_interruptible(&sched->wake_up_worker);
 }
 
@@ -881,7 +884,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
        struct drm_sched_entity *entity;
        int i;
 
-       if (!drm_sched_ready(sched))
+       if (!drm_sched_can_queue(sched))
                return NULL;
 
        /* Kernel run queue has higher priority than normal run queue*/
@@ -1049,15 +1052,13 @@ static int drm_sched_main(void *param)
                        r = dma_fence_add_callback(fence, &sched_job->cb,
                                                   drm_sched_job_done_cb);
                        if (r == -ENOENT)
-                               drm_sched_job_done(sched_job);
+                               drm_sched_job_done(sched_job, fence->error);
                        else if (r)
                                DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
                                          r);
                } else {
-                       if (IS_ERR(fence))
-                               dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
-
-                       drm_sched_job_done(sched_job);
+                       drm_sched_job_done(sched_job, IS_ERR(fence) ?
+                                          PTR_ERR(fence) : 0);
                }
 
                wake_up(&sched->job_scheduled);