drm/panfrost: Simplify the reset serialization logic
authorBoris Brezillon <boris.brezillon@collabora.com>
Wed, 30 Jun 2021 06:27:44 +0000 (08:27 +0200)
committerBoris Brezillon <boris.brezillon@collabora.com>
Thu, 1 Jul 2021 06:53:32 +0000 (08:53 +0200)
Now that we can pass our own workqueue to drm_sched_init(), we can use
an ordered workqueue on for both the scheduler timeout tdr and our own
reset work (which we use when the reset is not caused by a fault/timeout
on a specific job, like when we have AS_ACTIVE bit stuck). This
guarantees that the timeout handlers and reset handler can't run
concurrently which drastically simplifies the locking.

v5:
* Don't call cancel_delayed_timeout() in the reset path (those works
  are canceled in drm_sched_stop())

v4:
* Actually pass the reset workqueue to drm_sched_init()
* Don't call cancel_work_sync() in panfrost_reset(). It will deadlock
  since it might be called from the reset work, which is executing and
  cancel_work_sync() will wait for the handler to return. Checking the
  reset pending status should avoid spurious resets

v3:
* New patch

Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210630062751.2832545-10-boris.brezillon@collabora.com
drivers/gpu/drm/panfrost/panfrost_device.h
drivers/gpu/drm/panfrost/panfrost_job.c

index aec11b6..d37bc09 100644 (file)
@@ -109,6 +109,7 @@ struct panfrost_device {
        struct mutex sched_lock;
 
        struct {
+               struct workqueue_struct *wq;
                struct work_struct work;
                atomic_t pending;
        } reset;
@@ -247,9 +248,8 @@ const char *panfrost_exception_name(u32 exception_code);
 static inline void
 panfrost_device_schedule_reset(struct panfrost_device *pfdev)
 {
-       /* Schedule a reset if there's no reset in progress. */
-       if (!atomic_xchg(&pfdev->reset.pending, 1))
-               schedule_work(&pfdev->reset.work);
+       atomic_set(&pfdev->reset.pending, 1);
+       queue_work(pfdev->reset.wq, &pfdev->reset.work);
 }
 
 #endif
index 6149e4c..59c23c9 100644 (file)
 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
 #define job_read(dev, reg) readl(dev->iomem + (reg))
 
-enum panfrost_queue_status {
-       PANFROST_QUEUE_STATUS_ACTIVE,
-       PANFROST_QUEUE_STATUS_STOPPED,
-       PANFROST_QUEUE_STATUS_STARTING,
-       PANFROST_QUEUE_STATUS_FAULT_PENDING,
-};
-
 struct panfrost_queue_state {
        struct drm_gpu_scheduler sched;
-       atomic_t status;
-       struct mutex lock;
        u64 fence_context;
        u64 emit_seqno;
 };
@@ -368,57 +359,67 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
        job_write(pfdev, JOB_INT_MASK, irq_mask);
 }
 
-static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
-                                   struct drm_sched_job *bad)
+static void panfrost_reset(struct panfrost_device *pfdev,
+                          struct drm_sched_job *bad)
 {
-       enum panfrost_queue_status old_status;
-       bool stopped = false;
+       unsigned int i;
+       bool cookie;
+
+       if (!atomic_read(&pfdev->reset.pending))
+               return;
+
+       /* Stop the schedulers.
+        *
+        * FIXME: We temporarily get out of the dma_fence_signalling section
+        * because the cleanup path generate lockdep splats when taking locks
+        * to release job resources. We should rework the code to follow this
+        * pattern:
+        *
+        *      try_lock
+        *      if (locked)
+        *              release
+        *      else
+        *              schedule_work_to_release_later
+        */
+       for (i = 0; i < NUM_JOB_SLOTS; i++)
+               drm_sched_stop(&pfdev->js->queue[i].sched, bad);
 
-       mutex_lock(&queue->lock);
-       old_status = atomic_xchg(&queue->status,
-                                PANFROST_QUEUE_STATUS_STOPPED);
-       if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
-               goto out;
+       cookie = dma_fence_begin_signalling();
 
-       WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
-       drm_sched_stop(&queue->sched, bad);
        if (bad)
                drm_sched_increase_karma(bad);
 
-       stopped = true;
+       spin_lock(&pfdev->js->job_lock);
+       for (i = 0; i < NUM_JOB_SLOTS; i++) {
+               if (pfdev->jobs[i]) {
+                       pm_runtime_put_noidle(pfdev->dev);
+                       panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
+                       pfdev->jobs[i] = NULL;
+               }
+       }
+       spin_unlock(&pfdev->js->job_lock);
 
-       /*
-        * Set the timeout to max so the timer doesn't get started
-        * when we return from the timeout handler (restored in
-        * panfrost_scheduler_start()).
-        */
-       queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
+       panfrost_device_reset(pfdev);
 
-out:
-       mutex_unlock(&queue->lock);
+       /* GPU has been reset, we can clear the reset pending bit. */
+       atomic_set(&pfdev->reset.pending, 0);
 
-       return stopped;
-}
+       /* Now resubmit jobs that were previously queued but didn't have a
+        * chance to finish.
+        * FIXME: We temporarily get out of the DMA fence signalling section
+        * while resubmitting jobs because the job submission logic will
+        * allocate memory with the GFP_KERNEL flag which can trigger memory
+        * reclaim and exposes a lock ordering issue.
+        */
+       dma_fence_end_signalling(cookie);
+       for (i = 0; i < NUM_JOB_SLOTS; i++)
+               drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+       cookie = dma_fence_begin_signalling();
 
-static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
-{
-       enum panfrost_queue_status old_status;
-
-       mutex_lock(&queue->lock);
-       old_status = atomic_xchg(&queue->status,
-                                PANFROST_QUEUE_STATUS_STARTING);
-       WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
-
-       /* Restore the original timeout before starting the scheduler. */
-       queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS);
-       drm_sched_resubmit_jobs(&queue->sched);
-       drm_sched_start(&queue->sched, true);
-       old_status = atomic_xchg(&queue->status,
-                                PANFROST_QUEUE_STATUS_ACTIVE);
-       if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING)
-               drm_sched_fault(&queue->sched);
-
-       mutex_unlock(&queue->lock);
+       for (i = 0; i < NUM_JOB_SLOTS; i++)
+               drm_sched_start(&pfdev->js->queue[i].sched, true);
+
+       dma_fence_end_signalling(cookie);
 }
 
 static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
@@ -443,11 +444,8 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
                job_read(pfdev, JS_TAIL_LO(js)),
                sched_job);
 
-       /* Scheduler is already stopped, nothing to do. */
-       if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
-               return DRM_GPU_SCHED_STAT_NOMINAL;
-
-       panfrost_device_schedule_reset(pfdev);
+       atomic_set(&pfdev->reset.pending, 1);
+       panfrost_reset(pfdev, sched_job);
 
        return DRM_GPU_SCHED_STAT_NOMINAL;
 }
@@ -474,8 +472,6 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
                job_write(pfdev, JOB_INT_CLEAR, mask);
 
                if (status & JOB_INT_MASK_ERR(j)) {
-                       enum panfrost_queue_status old_status;
-
                        job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
 
                        dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
@@ -483,19 +479,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
                                panfrost_exception_name(job_read(pfdev, JS_STATUS(j))),
                                job_read(pfdev, JS_HEAD_LO(j)),
                                job_read(pfdev, JS_TAIL_LO(j)));
-
-                       /*
-                        * When the queue is being restarted we don't report
-                        * faults directly to avoid races between the timeout
-                        * and reset handlers. panfrost_scheduler_start() will
-                        * call drm_sched_fault() after the queue has been
-                        * started if status == FAULT_PENDING.
-                        */
-                       old_status = atomic_cmpxchg(&pfdev->js->queue[j].status,
-                                                   PANFROST_QUEUE_STATUS_STARTING,
-                                                   PANFROST_QUEUE_STATUS_FAULT_PENDING);
-                       if (old_status == PANFROST_QUEUE_STATUS_ACTIVE)
-                               drm_sched_fault(&pfdev->js->queue[j].sched);
+                       drm_sched_fault(&pfdev->js->queue[j].sched);
                }
 
                if (status & JOB_INT_MASK_DONE(j)) {
@@ -550,56 +534,13 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-static void panfrost_reset(struct work_struct *work)
+static void panfrost_reset_work(struct work_struct *work)
 {
        struct panfrost_device *pfdev = container_of(work,
                                                     struct panfrost_device,
                                                     reset.work);
-       unsigned int i;
-       bool cookie;
-
-       cookie = dma_fence_begin_signalling();
-       for (i = 0; i < NUM_JOB_SLOTS; i++) {
-               /*
-                * We want pending timeouts to be handled before we attempt
-                * to stop the scheduler. If we don't do that and the timeout
-                * handler is in flight, it might have removed the bad job
-                * from the list, and we'll lose this job if the reset handler
-                * enters the critical section in panfrost_scheduler_stop()
-                * before the timeout handler.
-                *
-                * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need
-                * something big enough to make sure the timer will not expire
-                * before we manage to stop the scheduler, but we can't use
-                * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job()
-                * considers that as 'timer is not running' and will dequeue
-                * the job without making sure the timeout handler is not
-                * running.
-                */
-               pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1;
-               cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr);
-               panfrost_scheduler_stop(&pfdev->js->queue[i], NULL);
-       }
-
-       /* All timers have been stopped, we can safely reset the pending state. */
-       atomic_set(&pfdev->reset.pending, 0);
 
-       spin_lock(&pfdev->js->job_lock);
-       for (i = 0; i < NUM_JOB_SLOTS; i++) {
-               if (pfdev->jobs[i]) {
-                       pm_runtime_put_noidle(pfdev->dev);
-                       panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
-                       pfdev->jobs[i] = NULL;
-               }
-       }
-       spin_unlock(&pfdev->js->job_lock);
-
-       panfrost_device_reset(pfdev);
-
-       for (i = 0; i < NUM_JOB_SLOTS; i++)
-               panfrost_scheduler_start(&pfdev->js->queue[i]);
-
-       dma_fence_end_signalling(cookie);
+       panfrost_reset(pfdev, NULL);
 }
 
 int panfrost_job_init(struct panfrost_device *pfdev)
@@ -607,7 +548,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
        struct panfrost_job_slot *js;
        int ret, j, irq;
 
-       INIT_WORK(&pfdev->reset.work, panfrost_reset);
+       INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
 
        pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
        if (!js)
@@ -629,15 +570,18 @@ int panfrost_job_init(struct panfrost_device *pfdev)
                return ret;
        }
 
-       for (j = 0; j < NUM_JOB_SLOTS; j++) {
-               mutex_init(&js->queue[j].lock);
+       pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
+       if (!pfdev->reset.wq)
+               return -ENOMEM;
 
+       for (j = 0; j < NUM_JOB_SLOTS; j++) {
                js->queue[j].fence_context = dma_fence_context_alloc(1);
 
                ret = drm_sched_init(&js->queue[j].sched,
                                     &panfrost_sched_ops,
                                     1, 0,
-                                    msecs_to_jiffies(JOB_TIMEOUT_MS), NULL,
+                                    msecs_to_jiffies(JOB_TIMEOUT_MS),
+                                    pfdev->reset.wq,
                                     NULL, "pan_js");
                if (ret) {
                        dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
@@ -653,6 +597,7 @@ err_sched:
        for (j--; j >= 0; j--)
                drm_sched_fini(&js->queue[j].sched);
 
+       destroy_workqueue(pfdev->reset.wq);
        return ret;
 }
 
@@ -665,9 +610,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
 
        for (j = 0; j < NUM_JOB_SLOTS; j++) {
                drm_sched_fini(&js->queue[j].sched);
-               mutex_destroy(&js->queue[j].lock);
        }
 
+       cancel_work_sync(&pfdev->reset.work);
+       destroy_workqueue(pfdev->reset.wq);
 }
 
 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)