drm/i915: Pass i915_sched_node around internally
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 13 May 2019 12:01:01 +0000 (13:01 +0100)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 20 May 2019 15:27:43 +0000 (18:27 +0300)
To simplify the next patch, update bump_priority and schedule to accept
the internal i915_sched_ndoe directly and not expect a request pointer.

add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
Function                                     old     new   delta
i915_schedule_bump_priority                  109     113      +4
i915_schedule                                 50      54      +4
__i915_schedule                              922     907     -15

v2: Adopt node for the old rq local, since it no longer is a request but
the origin node.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513120102.29660-2-chris@chris-wilson.co.uk
(cherry picked from commit 52c76fb18a34fc08dd06f32b9fc83f1375f083ee)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_scheduler.c

index 5756fcf..3cfadb9 100644 (file)
@@ -175,7 +175,7 @@ static bool inflight(const struct i915_request *rq,
        return active->hw_context == rq->hw_context;
 }
 
-static void __i915_schedule(struct i915_request *rq,
+static void __i915_schedule(struct i915_sched_node *node,
                            const struct i915_sched_attr *attr)
 {
        struct intel_engine_cs *engine;
@@ -189,13 +189,13 @@ static void __i915_schedule(struct i915_request *rq,
        lockdep_assert_held(&schedule_lock);
        GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
-       if (i915_request_completed(rq))
+       if (node_signaled(node))
                return;
 
-       if (prio <= READ_ONCE(rq->sched.attr.priority))
+       if (prio <= READ_ONCE(node->attr.priority))
                return;
 
-       stack.signaler = &rq->sched;
+       stack.signaler = node;
        list_add(&stack.dfs_link, &dfs);
 
        /*
@@ -246,9 +246,9 @@ static void __i915_schedule(struct i915_request *rq,
         * execlists_submit_request()), we can set our own priority and skip
         * acquiring the engine locks.
         */
-       if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
-               GEM_BUG_ON(!list_empty(&rq->sched.link));
-               rq->sched.attr = *attr;
+       if (node->attr.priority == I915_PRIORITY_INVALID) {
+               GEM_BUG_ON(!list_empty(&node->link));
+               node->attr = *attr;
 
                if (stack.dfs_link.next == stack.dfs_link.prev)
                        return;
@@ -257,15 +257,14 @@ static void __i915_schedule(struct i915_request *rq,
        }
 
        memset(&cache, 0, sizeof(cache));
-       engine = rq->engine;
+       engine = node_to_request(node)->engine;
        spin_lock(&engine->timeline.lock);
 
        /* Fifo and depth-first replacement ensure our deps execute before us */
        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
-               struct i915_sched_node *node = dep->signaler;
-
                INIT_LIST_HEAD(&dep->dfs_link);
 
+               node = dep->signaler;
                engine = sched_lock_engine(node, engine, &cache);
                lockdep_assert_held(&engine->timeline.lock);
 
@@ -315,13 +314,20 @@ static void __i915_schedule(struct i915_request *rq,
 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
 {
        spin_lock_irq(&schedule_lock);
-       __i915_schedule(rq, attr);
+       __i915_schedule(&rq->sched, attr);
        spin_unlock_irq(&schedule_lock);
 }
 
+static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
+{
+       struct i915_sched_attr attr = node->attr;
+
+       attr.priority |= bump;
+       __i915_schedule(node, &attr);
+}
+
 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 {
-       struct i915_sched_attr attr;
        unsigned long flags;
 
        GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
@@ -330,11 +336,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
                return;
 
        spin_lock_irqsave(&schedule_lock, flags);
-
-       attr = rq->sched.attr;
-       attr.priority |= bump;
-       __i915_schedule(rq, &attr);
-
+       __bump_priority(&rq->sched, bump);
        spin_unlock_irqrestore(&schedule_lock, flags);
 }