drm/i915: Move intel_lrc_context_pin() to avoid the forward declaration
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 18 Dec 2016 15:37:19 +0000 (15:37 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Sun, 18 Dec 2016 16:18:49 +0000 (16:18 +0000)
Just a simple move to avoid a forward declaration, though the diff likes
to present itself as a move of intel_logical_ring_alloc_request_extras()
in the opposite direction.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/intel_lrc.c

index 877aded..b848b5f 100644 (file)
@@ -230,8 +230,6 @@ enum {
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine);
-static int intel_lr_context_pin(struct i915_gem_context *ctx,
-                               struct intel_engine_cs *engine);
 static void execlists_init_reg_state(u32 *reg_state,
                                     struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine,
@@ -774,71 +772,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        /* XXX Do we need to preempt to make room for us and our deps? */
 }
 
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
-{
-       struct intel_engine_cs *engine = request->engine;
-       struct intel_context *ce = &request->ctx->engine[engine->id];
-       int ret;
-
-       /* Flush enough space to reduce the likelihood of waiting after
-        * we start building the request - in which case we will just
-        * have to repeat work.
-        */
-       request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
-       if (!ce->state) {
-               ret = execlists_context_deferred_alloc(request->ctx, engine);
-               if (ret)
-                       return ret;
-       }
-
-       request->ring = ce->ring;
-
-       ret = intel_lr_context_pin(request->ctx, engine);
-       if (ret)
-               return ret;
-
-       if (i915.enable_guc_submission) {
-               /*
-                * Check that the GuC has space for the request before
-                * going any further, as the i915_add_request() call
-                * later on mustn't fail ...
-                */
-               ret = i915_guc_wq_reserve(request);
-               if (ret)
-                       goto err_unpin;
-       }
-
-       ret = intel_ring_begin(request, 0);
-       if (ret)
-               goto err_unreserve;
-
-       if (!ce->initialised) {
-               ret = engine->init_context(request);
-               if (ret)
-                       goto err_unreserve;
-
-               ce->initialised = true;
-       }
-
-       /* Note that after this point, we have committed to using
-        * this request as it is being used to both track the
-        * state of engine initialisation and liveness of the
-        * golden renderstate above. Think twice before you try
-        * to cancel/unwind this request now.
-        */
-
-       request->reserved_space -= EXECLISTS_REQUEST_SIZE;
-       return 0;
-
-err_unreserve:
-       if (i915.enable_guc_submission)
-               i915_guc_wq_unreserve(request);
-err_unpin:
-       intel_lr_context_unpin(request->ctx, engine);
-       return ret;
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
                                struct intel_engine_cs *engine)
 {
@@ -911,6 +844,71 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
        i915_gem_context_put(ctx);
 }
 
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+{
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_context *ce = &request->ctx->engine[engine->id];
+       int ret;
+
+       /* Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
+        */
+       request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+       if (!ce->state) {
+               ret = execlists_context_deferred_alloc(request->ctx, engine);
+               if (ret)
+                       return ret;
+       }
+
+       request->ring = ce->ring;
+
+       ret = intel_lr_context_pin(request->ctx, engine);
+       if (ret)
+               return ret;
+
+       if (i915.enable_guc_submission) {
+               /*
+                * Check that the GuC has space for the request before
+                * going any further, as the i915_add_request() call
+                * later on mustn't fail ...
+                */
+               ret = i915_guc_wq_reserve(request);
+               if (ret)
+                       goto err_unpin;
+       }
+
+       ret = intel_ring_begin(request, 0);
+       if (ret)
+               goto err_unreserve;
+
+       if (!ce->initialised) {
+               ret = engine->init_context(request);
+               if (ret)
+                       goto err_unreserve;
+
+               ce->initialised = true;
+       }
+
+       /* Note that after this point, we have committed to using
+        * this request as it is being used to both track the
+        * state of engine initialisation and liveness of the
+        * golden renderstate above. Think twice before you try
+        * to cancel/unwind this request now.
+        */
+
+       request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+       return 0;
+
+err_unreserve:
+       if (i915.enable_guc_submission)
+               i915_guc_wq_unreserve(request);
+err_unpin:
+       intel_lr_context_unpin(request->ctx, engine);
+       return ret;
+}
+
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;