drm/i915: Protect request peeking with RCU
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 3 Nov 2019 16:23:05 +0000 (16:23 +0000)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Mon, 18 Nov 2019 19:25:16 +0000 (11:25 -0800)
Since the execlists_active() is no longer protected by the
engine->active.lock, we need to protect the request pointer with RCU to
prevent it being freed as we evaluate whether or not we need to preempt.

Fixes: df403069029d ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191104090158.2959-2-chris@chris-wilson.co.uk
(cherry picked from commit 7d148635253328dda7cfe55d57e3c828e9564427)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
(cherry picked from commit 8eb4704b124cbd44f189709959137d77063ecfa1)
(cherry picked from commit 7e27238e149ce4f00d9cd801fe3aa0ea55e986a2)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/i915/i915_scheduler.c

index 7b84ebc..3eba8a2 100644 (file)
@@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
        return rq->sched.attr.priority | __NO_PREEMPTION;
 }
 
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static inline bool need_preempt(int prio, int active)
 {
-       const struct i915_request *inflight = *engine->execlists.active;
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        */
+       return prio >= max(I915_PRIORITY_NORMAL, active);
+}
+
+static void kick_submission(struct intel_engine_cs *engine,
+                           const struct i915_request *rq,
+                           int prio)
+{
+       const struct i915_request *inflight;
+
+       /*
+        * We only need to kick the tasklet once for the high priority
+        * new context we add into the queue.
+        */
+       if (prio <= engine->execlists.queue_priority_hint)
+               return;
+
+       rcu_read_lock();
+
+       /* Nothing currently active? We're overdue for a submission! */
+       inflight = execlists_active(&engine->execlists);
+       if (!inflight)
+               goto unlock;
 
        /*
         * If we are already the currently executing context, don't
@@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
         * tasklet, i.e. we have not change the priority queue
         * sufficiently to oust the running context.
         */
-       if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
-               return;
+       if (inflight->hw_context == rq->hw_context)
+               goto unlock;
 
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+       engine->execlists.queue_priority_hint = prio;
+       if (need_preempt(prio, rq_prio(inflight)))
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+       rcu_read_unlock();
 }
 
 static void __i915_schedule(struct i915_sched_node *node,
@@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
                        list_move_tail(&node->link, cache.priolist);
                }
 
-               if (prio <= engine->execlists.queue_priority_hint)
-                       continue;
-
-               engine->execlists.queue_priority_hint = prio;
-
                /* Defer (tasklet) submission until after all of our updates. */
-               kick_submission(engine, prio);
+               kick_submission(engine, node_to_request(node), prio);
        }
 
        spin_unlock(&engine->active.lock);