drm/i915: Pull the unconditional GPU cache invalidation into request construction
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 20 Nov 2017 10:20:01 +0000 (10:20 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 20 Nov 2017 15:56:16 +0000 (15:56 +0000)
As the request will, in the following patch, implicitly invoke a
context-switch on construction, we should precede that with a GPU TLB
invalidation. Also, even before using GGTT, we always want to invalidate
the TLBs for any updates (as well as the ppgtt invalidates that are
unconditionally applied by execbuf). Since we almost always require the
TLB invalidate, do it unconditionally on request allocation and so we can
remove it from all other paths.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120102002.22254-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_request.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c

index 53ccb27..b789578 100644 (file)
@@ -1111,10 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        if (err)
                goto err_request;
 
-       err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (err)
-               goto err_request;
-
        err = i915_switch_context(rq);
        if (err)
                goto err_request;
@@ -1818,8 +1814,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
        /* Unconditionally flush any chipset caches (for streaming writes). */
        i915_gem_chipset_flush(eb->i915);
 
-       /* Unconditionally invalidate GPU caches and TLBs. */
-       return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
+       return 0;
 }
 
 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
index c2723a0..f7fc0df 100644 (file)
@@ -208,10 +208,6 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
        if (err)
                goto err_unpin;
 
-       err = engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (err)
-               goto err_unpin;
-
        err = engine->emit_bb_start(rq,
                                    so.batch_offset, so.batch_size,
                                    I915_DISPATCH_SECURE);
index e0d6221..91eae1b 100644 (file)
@@ -703,17 +703,31 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
        GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
 
-       ret = engine->request_alloc(req);
-       if (ret)
-               goto err_ctx;
-
-       /* Record the position of the start of the request so that
+       /*
+        * Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
        req->head = req->ring->emit;
 
+       /* Unconditionally invalidate GPU caches and TLBs. */
+       ret = engine->emit_flush(req, EMIT_INVALIDATE);
+       if (ret)
+               goto err_ctx;
+
+       ret = engine->request_alloc(req);
+       if (ret) {
+               /*
+                * Past the point-of-no-return. Since we may have updated
+                * global state after partially completing the request alloc,
+                * we need to commit any commands so far emitted in the
+                * request to the HW.
+                */
+               __i915_add_request(req, false);
+               return ERR_PTR(ret);
+       }
+
        /* Check that we didn't interrupt ourselves with a new request */
        GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
        return req;
index 01af540..159a2cb 100644 (file)
@@ -989,10 +989,6 @@ static int gpu_write(struct i915_vma *vma,
        i915_vma_unpin(batch);
        i915_vma_close(batch);
 
-       err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (err)
-               goto err_request;
-
        err = i915_switch_context(rq);
        if (err)
                goto err_request;
index c82780a..4ff30b9 100644 (file)
@@ -158,10 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
                goto err_batch;
        }
 
-       err = engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (err)
-               goto err_request;
-
        err = i915_switch_context(rq);
        if (err)
                goto err_request;
index 6bce990..d7bf53f 100644 (file)
@@ -459,10 +459,6 @@ empty_request(struct intel_engine_cs *engine,
        if (IS_ERR(request))
                return request;
 
-       err = engine->emit_flush(request, EMIT_INVALIDATE);
-       if (err)
-               goto out_request;
-
        err = i915_switch_context(request);
        if (err)
                goto out_request;
@@ -675,9 +671,6 @@ static int live_all_engines(void *arg)
                        goto out_request;
                }
 
-               err = engine->emit_flush(request[id], EMIT_INVALIDATE);
-               GEM_BUG_ON(err);
-
                err = i915_switch_context(request[id]);
                GEM_BUG_ON(err);
 
@@ -797,9 +790,6 @@ static int live_sequential_engines(void *arg)
                        }
                }
 
-               err = engine->emit_flush(request[id], EMIT_INVALIDATE);
-               GEM_BUG_ON(err);
-
                err = i915_switch_context(request[id]);
                GEM_BUG_ON(err);
 
index 71ce066..145bdc2 100644 (file)
@@ -114,10 +114,6 @@ static int emit_recurse_batch(struct hang *h,
        if (err)
                goto unpin_vma;
 
-       err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
-       if (err)
-               goto unpin_hws;
-
        err = i915_switch_context(rq);
        if (err)
                goto unpin_hws;