From 223128f767102127bf651a509df745fb6d56b3fb Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 15 Jun 2020 16:14:49 +0100 Subject: [PATCH] drm/i915: Remove redundant i915_request_await_object in blit clears One i915_request_await_object is enough and we keep the one under the object lock so it is final. At the same time move async clflushing setup under the same locked section and consolidate common code into a helper function. v2: * Emit initial breadcrumbs after aways are set up. (Chris) Signed-off-by: Tvrtko Ursulin Cc: Matthew Auld Cc: Chris Wilson Cc: Michael J. Ruhl Reviewed-by: Chris Wilson Reviewed-by: Michael J. Ruhl Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200615151449.32605-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 52 +++++++++++--------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index f457d71..bfdb32d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -126,6 +126,17 @@ void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma) intel_engine_pm_put(ce->engine); } +static int +move_obj_to_gpu(struct drm_i915_gem_object *obj, + struct i915_request *rq, + bool write) +{ + if (obj->cache_dirty & ~obj->cache_coherent) + i915_gem_clflush_object(obj, 0); + + return i915_request_await_object(rq, obj, write); +} + int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, struct intel_context *ce, u32 value) @@ -143,12 +154,6 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, if (unlikely(err)) return err; - if (obj->cache_dirty & ~obj->cache_coherent) { - i915_gem_object_lock(obj); - i915_gem_clflush_object(obj, 0); - i915_gem_object_unlock(obj); - } - batch = intel_emit_vma_fill_blt(ce, vma, value); if (IS_ERR(batch)) { err = PTR_ERR(batch); @@ -165,27 +170,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, if (unlikely(err)) goto out_request; - err = i915_request_await_object(rq, obj, true); - if (unlikely(err)) - goto out_request; - - if (ce->engine->emit_init_breadcrumb) { - err = ce->engine->emit_init_breadcrumb(rq); - if (unlikely(err)) - goto out_request; - } - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, true); + err = move_obj_to_gpu(vma->obj, rq, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unlock(vma); if (unlikely(err)) goto out_request; - err = ce->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); + if (ce->engine->emit_init_breadcrumb) + err = ce->engine->emit_init_breadcrumb(rq); + + if (likely(!err)) + err = ce->engine->emit_bb_start(rq, + batch->node.start, + batch->node.size, + 0); out_request: if (unlikely(err)) i915_request_set_error_once(rq, err); @@ -317,16 +317,6 @@ out_pm: return ERR_PTR(err); } -static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write) -{ - struct drm_i915_gem_object *obj = vma->obj; - - if (obj->cache_dirty & ~obj->cache_coherent) - i915_gem_clflush_object(obj, 0); - - return i915_request_await_object(rq, obj, write); -} - int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, struct drm_i915_gem_object *dst, struct intel_context *ce) @@ -375,7 +365,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, goto out_request; for (i = 0; i < ARRAY_SIZE(vma); i++) { - err = move_to_gpu(vma[i], rq, i); + err = move_obj_to_gpu(vma[i]->obj, rq, i); if (unlikely(err)) goto out_unlock; } -- 2.7.4