patch-5.15.79-rt54.patch
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gt / intel_context.h
index f83a73a..601274b 100644 (file)
@@ -16,6 +16,7 @@
 #include "intel_engine_types.h"
 #include "intel_ring_types.h"
 #include "intel_timeline_types.h"
+#include "i915_trace.h"
 
 #define CE_TRACE(ce, fmt, ...) do {                                    \
        const struct intel_context *ce__ = (ce);                        \
@@ -30,6 +31,9 @@ void intel_context_init(struct intel_context *ce,
                        struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
+void i915_context_module_exit(void);
+int i915_context_module_init(void);
+
 struct intel_context *
 intel_context_create(struct intel_engine_cs *engine);
 
@@ -69,6 +73,13 @@ intel_context_is_pinned(struct intel_context *ce)
        return atomic_read(&ce->pin_count);
 }
 
+static inline void intel_context_cancel_request(struct intel_context *ce,
+                                               struct i915_request *rq)
+{
+       GEM_BUG_ON(!ce->ops->cancel_request);
+       return ce->ops->cancel_request(ce, rq);
+}
+
 /**
  * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
  * @ce - the context
@@ -113,7 +124,32 @@ static inline void __intel_context_pin(struct intel_context *ce)
        atomic_inc(&ce->pin_count);
 }
 
-void intel_context_unpin(struct intel_context *ce);
+void __intel_context_do_unpin(struct intel_context *ce, int sub);
+
+static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
+{
+       __intel_context_do_unpin(ce, 2);
+}
+
+static inline void intel_context_unpin(struct intel_context *ce)
+{
+       if (!ce->ops->sched_disable) {
+               __intel_context_do_unpin(ce, 1);
+       } else {
+               /*
+                * Move ownership of this pin to the scheduling disable which is
+                * an async operation. When that operation completes the above
+                * intel_context_sched_disable_unpin is called potentially
+                * unpinning the context.
+                */
+               while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
+                       if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
+                               ce->ops->sched_disable(ce);
+                               break;
+                       }
+               }
+       }
+}
 
 void intel_context_enter_engine(struct intel_context *ce);
 void intel_context_exit_engine(struct intel_context *ce);
@@ -127,7 +163,8 @@ static inline void intel_context_enter(struct intel_context *ce)
 
 static inline void intel_context_mark_active(struct intel_context *ce)
 {
-       lockdep_assert_held(&ce->timeline->mutex);
+       lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
+                      test_bit(CONTEXT_IS_PARKED, &ce->flags));
        ++ce->active_count;
 }
 
@@ -175,10 +212,8 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 
 struct i915_request *intel_context_create_request(struct intel_context *ce);
 
-static inline struct intel_ring *__intel_context_ring_size(u64 sz)
-{
-       return u64_to_ptr(struct intel_ring, sz);
-}
+struct i915_request *
+intel_context_find_active_request(struct intel_context *ce);
 
 static inline bool intel_context_is_barrier(const struct intel_context *ce)
 {
@@ -220,6 +255,18 @@ static inline bool intel_context_set_banned(struct intel_context *ce)
        return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
 }
 
+static inline bool intel_context_ban(struct intel_context *ce,
+                                    struct i915_request *rq)
+{
+       bool ret = intel_context_set_banned(ce);
+
+       trace_intel_context_ban(ce);
+       if (ce->ops->ban)
+               ce->ops->ban(ce, rq);
+
+       return ret;
+}
+
 static inline bool
 intel_context_force_single_submission(const struct intel_context *ce)
 {