drm/i915/lrc: Scrub the GPU state of the guilty hanging request
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 28 Apr 2018 11:15:32 +0000 (12:15 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 30 Apr 2018 10:52:41 +0000 (11:52 +0100)
Previously, we just reset the ring register in the context image such
that we could skip over the broken batch and emit the closing
breadcrumb. However, on resume the context image and GPU state would be
reloaded, which may have been left in an inconsistent state by the
reset. The presumption was that at worst it would just cause another
reset and skip again until it recovered, however it seems just as likely
to cause an unrecoverable hang. Instead of risking loading an incomplete
context image, restore it back to the default state.

v2: Fix up off-by-one from including the ppHSWP in with the register
state.
v3: Use a ring local to compact a few lines.
v4: Beware setting the ring local before checking for a NULL request.

References: https://bugs.freedesktop.org/show_bug.cgi?id=105304
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: MichaƂ Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> #v2
Link: https://patchwork.freedesktop.org/patch/msgid/20180428111532.15819-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/intel_lrc.c

index 87eb3a6..58cad24 100644 (file)
@@ -1803,8 +1803,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
                              struct i915_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct intel_context *ce;
        unsigned long flags;
+       u32 *regs;
 
        GEM_TRACE("%s request global=%x, current=%d\n",
                  engine->name, request ? request->global_seqno : 0,
@@ -1854,14 +1854,24 @@ static void reset_common_ring(struct intel_engine_cs *engine,
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       ce = &request->ctx->engine[engine->id];
-       execlists_init_reg_state(ce->lrc_reg_state,
-                                request->ctx, engine, ce->ring);
+       regs = request->ctx->engine[engine->id].lrc_reg_state;
+       if (engine->default_state) {
+               void *defaults;
+
+               defaults = i915_gem_object_pin_map(engine->default_state,
+                                                  I915_MAP_WB);
+               if (!IS_ERR(defaults)) {
+                       memcpy(regs, /* skip restoring the vanilla PPHWSP */
+                              defaults + LRC_STATE_PN * PAGE_SIZE,
+                              engine->context_size - PAGE_SIZE);
+                       i915_gem_object_unpin_map(engine->default_state);
+               }
+       }
+       execlists_init_reg_state(regs, request->ctx, engine, request->ring);
 
        /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
-       ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
-               i915_ggtt_offset(ce->ring->vma);
-       ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+       regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+       regs[CTX_RING_HEAD + 1] = request->postfix;
 
        request->ring->head = request->postfix;
        intel_ring_update_space(request->ring);