drm/i915/execlists: Assert there are no simple cycles in the dependencies
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 2 Jan 2018 15:12:26 +0000 (15:12 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 Jan 2018 12:09:44 +0000 (12:09 +0000)
The dependency chain must be an acyclic graph. This is checked by the
swfence, but for sanity, also do a simple check that we do not corrupt
our list iteration in execlists_schedule() by a shallow dependency
cycle.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: MichaƂ Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180102151235.3949-10-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/intel_lrc.c

index 04c35e4..7d1ce21 100644 (file)
@@ -1011,7 +1011,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        stack.signaler = &request->priotree;
        list_add(&stack.dfs_link, &dfs);
 
-       /* Recursively bump all dependent priorities to match the new request.
+       /*
+        * Recursively bump all dependent priorities to match the new request.
         *
         * A naive approach would be to use recursion:
         * static void update_priorities(struct i915_priotree *pt, prio) {
@@ -1031,12 +1032,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
 
-               /* Within an engine, there can be no cycle, but we may
+               /*
+                * Within an engine, there can be no cycle, but we may
                 * refer to the same dependency chain multiple times
                 * (redundant dependencies are not eliminated) and across
                 * engines.
                 */
                list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       GEM_BUG_ON(p == dep); /* no cycles! */
+
                        if (i915_priotree_signaled(p->signaler))
                                continue;
 
@@ -1048,7 +1052,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                list_safe_reset_next(dep, p, dfs_link);
        }
 
-       /* If we didn't need to bump any existing priorities, and we haven't
+       /*
+        * If we didn't need to bump any existing priorities, and we haven't
         * yet submitted this request (i.e. there is no potential race with
         * execlists_submit_request()), we can set our own priority and skip
         * acquiring the engine locks.