For virtual engines, we need to keep the HW context alive while it
remains in use. For regular HW contexts, they are created and kept alive
until the end of the GEM context. For simplicity, generalise the
requirements and keep an active reference to each HW context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318212347.30146-2-chris@chris-wilson.co.uk
i915_ppgtt_put(ctx->ppgtt);
rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
- it->ops->destroy(it);
+ intel_context_put(it);
kfree(ctx->name);
put_pid(ctx->pid);
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
+ intel_context_get(ce);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
return;
/* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
mutex_unlock(&ce->gem_context->mutex);
i915_gem_context_put(ce->gem_context);
+ intel_context_put(ce);
}
mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
}
static void intel_context_retire(struct i915_active_request *active,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ kref_init(&ce->ref);
+
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
void intel_context_unpin(struct intel_context *ce);
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
- void (*destroy)(struct intel_context *ce);
+
+ void (*destroy)(struct kref *kref);
};
/*
};
struct intel_context {
+ struct kref ref;
+
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *active;
i915_gem_object_put(ce->state->obj);
}
-static void execlists_context_destroy(struct intel_context *ce)
+static void execlists_context_destroy(struct kref *kref)
{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
i915_gem_object_put(ce->state->obj);
}
-static void ring_context_destroy(struct intel_context *ce)
+static void ring_context_destroy(struct kref *ref)
{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
mock_timeline_unpin(ce->ring->timeline);
}
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->ring)
mock_ring_free(ce->ring);
+
+ intel_context_free(ce);
}
static int mock_context_pin(struct intel_context *ce)
static const struct intel_context_ops mock_context_ops = {
.pin = mock_context_pin,
.unpin = mock_context_unpin,
+
.destroy = mock_context_destroy,
};