drm/i915: Make request conflict tracking understand parallel submits
authorMatthew Brost <matthew.brost@intel.com>
Thu, 14 Oct 2021 17:20:02 +0000 (10:20 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Fri, 15 Oct 2021 17:45:50 +0000 (10:45 -0700)
If an object in the excl or shared slot is a composite fence from a
parallel submit and the current request in the conflict tracking is from
the same parallel context there is no need to enforce ordering as the
ordering is already implicit. Make the request conflict tracking
understand this by comparing a parallel submit's parent context and
skipping conflict insertion if the values match.

v2:
 (John Harrison)
  - Reword commit message

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-23-matthew.brost@intel.com
drivers/gpu/drm/i915/i915_request.c

index d29e46a..2c3cd6e 100644 (file)
@@ -1335,6 +1335,25 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
        return err;
 }
 
+static inline bool is_parallel_rq(struct i915_request *rq)
+{
+       return intel_context_is_parallel(rq->context);
+}
+
+static inline struct intel_context *request_to_parent(struct i915_request *rq)
+{
+       return intel_context_to_parent(rq->context);
+}
+
+static bool is_same_parallel_context(struct i915_request *to,
+                                    struct i915_request *from)
+{
+       if (is_parallel_rq(to))
+               return request_to_parent(to) == request_to_parent(from);
+
+       return false;
+}
+
 int
 i915_request_await_execution(struct i915_request *rq,
                             struct dma_fence *fence)
@@ -1366,11 +1385,14 @@ i915_request_await_execution(struct i915_request *rq,
                 * want to run our callback in all cases.
                 */
 
-               if (dma_fence_is_i915(fence))
+               if (dma_fence_is_i915(fence)) {
+                       if (is_same_parallel_context(rq, to_request(fence)))
+                               continue;
                        ret = __i915_request_await_execution(rq,
                                                             to_request(fence));
-               else
+               } else {
                        ret = i915_request_await_external(rq, fence);
+               }
                if (ret < 0)
                        return ret;
        } while (--nchild);
@@ -1471,10 +1493,13 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
                                                 fence))
                        continue;
 
-               if (dma_fence_is_i915(fence))
+               if (dma_fence_is_i915(fence)) {
+                       if (is_same_parallel_context(rq, to_request(fence)))
+                               continue;
                        ret = i915_request_await_request(rq, to_request(fence));
-               else
+               } else {
                        ret = i915_request_await_external(rq, fence);
+               }
                if (ret < 0)
                        return ret;
 
@@ -1549,16 +1574,6 @@ i915_request_await_object(struct i915_request *to,
        return ret;
 }
 
-static inline bool is_parallel_rq(struct i915_request *rq)
-{
-       return intel_context_is_parallel(rq->context);
-}
-
-static inline struct intel_context *request_to_parent(struct i915_request *rq)
-{
-       return intel_context_to_parent(rq->context);
-}
-
 static struct i915_request *
 __i915_request_ensure_parallel_ordering(struct i915_request *rq,
                                        struct intel_timeline *timeline)