1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #ifndef __INTEL_CONTEXT_H__
7 #define __INTEL_CONTEXT_H__
9 #include <linux/bitops.h>
10 #include <linux/lockdep.h>
11 #include <linux/types.h>
13 #include "i915_active.h"
15 #include "intel_context_types.h"
16 #include "intel_engine_types.h"
17 #include "intel_ring_types.h"
18 #include "intel_timeline_types.h"
19 #include "i915_trace.h"
21 #define CE_TRACE(ce, fmt, ...) do { \
22 const struct intel_context *ce__ = (ce); \
23 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
24 ce__->timeline->fence_context, \
28 struct i915_gem_ww_ctx;
30 void intel_context_init(struct intel_context *ce,
31 struct intel_engine_cs *engine);
32 void intel_context_fini(struct intel_context *ce);
34 void i915_context_module_exit(void);
35 int i915_context_module_init(void);
37 struct intel_context *
38 intel_context_create(struct intel_engine_cs *engine);
40 int intel_context_alloc_state(struct intel_context *ce);
42 void intel_context_free(struct intel_context *ce);
44 int intel_context_reconfigure_sseu(struct intel_context *ce,
45 const struct intel_sseu sseu);
48 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
51 * Acquire a lock on the pinned status of the HW context, such that the context
52 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
53 * intel_context_is_pinned() remains stable.
55 static inline int intel_context_lock_pinned(struct intel_context *ce)
56 __acquires(ce->pin_mutex)
58 return mutex_lock_interruptible(&ce->pin_mutex);
62 * intel_context_is_pinned - Reports the 'pinned' status
65 * While in use by the GPU, the context, along with its ring and page
66 * tables is pinned into memory and the GTT.
68 * Returns: true if the context is currently pinned for use by the GPU.
71 intel_context_is_pinned(struct intel_context *ce)
73 return atomic_read(&ce->pin_count);
76 static inline void intel_context_cancel_request(struct intel_context *ce,
77 struct i915_request *rq)
79 GEM_BUG_ON(!ce->ops->cancel_request);
80 return ce->ops->cancel_request(ce, rq);
84 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
87 * Releases the lock earlier acquired by intel_context_unlock_pinned().
89 static inline void intel_context_unlock_pinned(struct intel_context *ce)
90 __releases(ce->pin_mutex)
92 mutex_unlock(&ce->pin_mutex);
95 int __intel_context_do_pin(struct intel_context *ce);
96 int __intel_context_do_pin_ww(struct intel_context *ce,
97 struct i915_gem_ww_ctx *ww);
99 static inline bool intel_context_pin_if_active(struct intel_context *ce)
101 return atomic_inc_not_zero(&ce->pin_count);
104 static inline int intel_context_pin(struct intel_context *ce)
106 if (likely(intel_context_pin_if_active(ce)))
109 return __intel_context_do_pin(ce);
112 static inline int intel_context_pin_ww(struct intel_context *ce,
113 struct i915_gem_ww_ctx *ww)
115 if (likely(intel_context_pin_if_active(ce)))
118 return __intel_context_do_pin_ww(ce, ww);
121 static inline void __intel_context_pin(struct intel_context *ce)
123 GEM_BUG_ON(!intel_context_is_pinned(ce));
124 atomic_inc(&ce->pin_count);
127 void __intel_context_do_unpin(struct intel_context *ce, int sub);
129 static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
131 __intel_context_do_unpin(ce, 2);
134 static inline void intel_context_unpin(struct intel_context *ce)
136 if (!ce->ops->sched_disable) {
137 __intel_context_do_unpin(ce, 1);
140 * Move ownership of this pin to the scheduling disable which is
141 * an async operation. When that operation completes the above
142 * intel_context_sched_disable_unpin is called potentially
143 * unpinning the context.
145 while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
146 if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
147 ce->ops->sched_disable(ce);
154 void intel_context_enter_engine(struct intel_context *ce);
155 void intel_context_exit_engine(struct intel_context *ce);
157 static inline void intel_context_enter(struct intel_context *ce)
159 lockdep_assert_held(&ce->timeline->mutex);
160 if (!ce->active_count++)
164 static inline void intel_context_mark_active(struct intel_context *ce)
166 lockdep_assert_held(&ce->timeline->mutex);
170 static inline void intel_context_exit(struct intel_context *ce)
172 lockdep_assert_held(&ce->timeline->mutex);
173 GEM_BUG_ON(!ce->active_count);
174 if (!--ce->active_count)
178 static inline struct intel_context *intel_context_get(struct intel_context *ce)
184 static inline void intel_context_put(struct intel_context *ce)
186 kref_put(&ce->ref, ce->ops->destroy);
189 static inline struct intel_timeline *__must_check
190 intel_context_timeline_lock(struct intel_context *ce)
191 __acquires(&ce->timeline->mutex)
193 struct intel_timeline *tl = ce->timeline;
196 err = mutex_lock_interruptible(&tl->mutex);
203 static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
204 __releases(&tl->mutex)
206 mutex_unlock(&tl->mutex);
209 int intel_context_prepare_remote_request(struct intel_context *ce,
210 struct i915_request *rq);
212 struct i915_request *intel_context_create_request(struct intel_context *ce);
214 struct i915_request *
215 intel_context_find_active_request(struct intel_context *ce);
217 static inline bool intel_context_is_barrier(const struct intel_context *ce)
219 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
222 static inline bool intel_context_is_closed(const struct intel_context *ce)
224 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
227 static inline bool intel_context_has_inflight(const struct intel_context *ce)
229 return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
232 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
234 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
237 static inline void intel_context_set_use_semaphores(struct intel_context *ce)
239 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
242 static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
244 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
247 static inline bool intel_context_is_banned(const struct intel_context *ce)
249 return test_bit(CONTEXT_BANNED, &ce->flags);
252 static inline bool intel_context_set_banned(struct intel_context *ce)
254 return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
257 static inline bool intel_context_ban(struct intel_context *ce,
258 struct i915_request *rq)
260 bool ret = intel_context_set_banned(ce);
262 trace_intel_context_ban(ce);
264 ce->ops->ban(ce, rq);
270 intel_context_force_single_submission(const struct intel_context *ce)
272 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
276 intel_context_set_single_submission(struct intel_context *ce)
278 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
282 intel_context_nopreempt(const struct intel_context *ce)
284 return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
288 intel_context_set_nopreempt(struct intel_context *ce)
290 set_bit(CONTEXT_NOPREEMPT, &ce->flags);
294 intel_context_clear_nopreempt(struct intel_context *ce)
296 clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
299 static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
301 const u32 period = ce->engine->gt->clock_period_ns;
303 return READ_ONCE(ce->runtime.total) * period;
306 static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
308 const u32 period = ce->engine->gt->clock_period_ns;
310 return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
313 #endif /* __INTEL_CONTEXT_H__ */