drm/i915: Refactor marking a request as EIO
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 9 Jan 2021 16:34:55 +0000 (16:34 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 9 Jan 2021 18:29:07 +0000 (18:29 +0000)
When wedging the device, we cancel all outstanding requests and mark
them as EIO. Rather than duplicate the small function to do so between
each submission backend, export one.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210109163455.28466-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h

index 4d7ac81..52c1fe6 100644 (file)
@@ -203,17 +203,6 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
        return container_of(engine, struct virtual_engine, base);
 }
 
-static void mark_eio(struct i915_request *rq)
-{
-       if (__i915_request_is_complete(rq))
-               return;
-
-       GEM_BUG_ON(i915_request_signaled(rq));
-
-       i915_request_set_error_once(rq, -EIO);
-       i915_request_mark_complete(rq);
-}
-
 static struct i915_request *
 __active_request(const struct intel_timeline * const tl,
                 struct i915_request *rq,
@@ -2996,7 +2985,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Flush the queued requests to the timeline list (for retiring). */
@@ -3005,7 +2994,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       mark_eio(rq);
+                       i915_request_mark_eio(rq);
                        __i915_request_submit(rq);
                }
 
@@ -3015,7 +3004,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 
        /* On-hold requests will be flushed to timeline upon their release */
        list_for_each_entry(rq, &engine->active.hold, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
 
        /* Cancel all attached virtual engines */
        while ((rb = rb_first_cached(&execlists->virtual))) {
@@ -3028,7 +3017,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                spin_lock(&ve->base.active.lock);
                rq = fetch_and_zero(&ve->request);
                if (rq) {
-                       mark_eio(rq);
+                       i915_request_mark_eio(rq);
 
                        rq->engine = engine;
                        __i915_request_submit(rq);
index 4ea741f..1c6d421 100644 (file)
@@ -473,10 +473,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
        spin_lock_irqsave(&engine->active.lock, flags);
 
        /* Mark all submitted requests as skipped. */
-       list_for_each_entry(request, &engine->active.requests, sched.link) {
-               i915_request_set_error_once(request, -EIO);
-               i915_request_mark_complete(request);
-       }
+       list_for_each_entry(request, &engine->active.requests, sched.link)
+               i915_request_mark_eio(request);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
index 2f83001..4b4f03b 100644 (file)
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
        GEM_BUG_ON(stalled);
 }
 
-static void mark_eio(struct i915_request *rq)
-{
-       if (i915_request_completed(rq))
-               return;
-
-       GEM_BUG_ON(i915_request_signaled(rq));
-
-       i915_request_set_error_once(rq, -EIO);
-       i915_request_mark_complete(rq);
-}
-
 static void mock_reset_cancel(struct intel_engine_cs *engine)
 {
        struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 
        /* Mark all submitted requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Cancel and submit all pending requests. */
        list_for_each_entry(rq, &mock->hw_queue, mock.link) {
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
                __i915_request_submit(rq);
        }
        INIT_LIST_HEAD(&mock->hw_queue);
index bbf42bc..d9f32a3 100644 (file)
@@ -515,6 +515,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
        } while (!try_cmpxchg(&rq->fence.error, &old, error));
 }
 
+void i915_request_mark_eio(struct i915_request *rq)
+{
+       if (__i915_request_is_complete(rq))
+               return;
+
+       GEM_BUG_ON(i915_request_signaled(rq));
+
+       i915_request_set_error_once(rq, -EIO);
+       i915_request_mark_complete(rq);
+}
+
 bool __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
index a8c4132..1bfe214 100644 (file)
@@ -309,8 +309,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
 struct i915_request * __must_check
 i915_request_create(struct intel_context *ce);
 
-void i915_request_set_error_once(struct i915_request *rq, int error);
 void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
 
 struct i915_request *__i915_request_commit(struct i915_request *request);
 void __i915_request_queue(struct i915_request *rq,