Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gt / intel_context.h
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #ifndef __INTEL_CONTEXT_H__
7 #define __INTEL_CONTEXT_H__
8
9 #include <linux/bitops.h>
10 #include <linux/lockdep.h>
11 #include <linux/types.h>
12
13 #include "i915_active.h"
14 #include "i915_drv.h"
15 #include "intel_context_types.h"
16 #include "intel_engine_types.h"
17 #include "intel_ring_types.h"
18 #include "intel_timeline_types.h"
19 #include "i915_trace.h"
20
21 #define CE_TRACE(ce, fmt, ...) do {                                     \
22         const struct intel_context *ce__ = (ce);                        \
23         ENGINE_TRACE(ce__->engine, "context:%llx " fmt,                 \
24                      ce__->timeline->fence_context,                     \
25                      ##__VA_ARGS__);                                    \
26 } while (0)
27
28 struct i915_gem_ww_ctx;
29
30 void intel_context_init(struct intel_context *ce,
31                         struct intel_engine_cs *engine);
32 void intel_context_fini(struct intel_context *ce);
33
34 void i915_context_module_exit(void);
35 int i915_context_module_init(void);
36
37 struct intel_context *
38 intel_context_create(struct intel_engine_cs *engine);
39
40 int intel_context_alloc_state(struct intel_context *ce);
41
42 void intel_context_free(struct intel_context *ce);
43
44 int intel_context_reconfigure_sseu(struct intel_context *ce,
45                                    const struct intel_sseu sseu);
46
47 /**
48  * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
49  * @ce - the context
50  *
51  * Acquire a lock on the pinned status of the HW context, such that the context
52  * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
53  * intel_context_is_pinned() remains stable.
54  */
55 static inline int intel_context_lock_pinned(struct intel_context *ce)
56         __acquires(ce->pin_mutex)
57 {
58         return mutex_lock_interruptible(&ce->pin_mutex);
59 }
60
61 /**
62  * intel_context_is_pinned - Reports the 'pinned' status
63  * @ce - the context
64  *
65  * While in use by the GPU, the context, along with its ring and page
66  * tables is pinned into memory and the GTT.
67  *
68  * Returns: true if the context is currently pinned for use by the GPU.
69  */
70 static inline bool
71 intel_context_is_pinned(struct intel_context *ce)
72 {
73         return atomic_read(&ce->pin_count);
74 }
75
76 static inline void intel_context_cancel_request(struct intel_context *ce,
77                                                 struct i915_request *rq)
78 {
79         GEM_BUG_ON(!ce->ops->cancel_request);
80         return ce->ops->cancel_request(ce, rq);
81 }
82
83 /**
84  * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
85  * @ce - the context
86  *
87  * Releases the lock earlier acquired by intel_context_unlock_pinned().
88  */
89 static inline void intel_context_unlock_pinned(struct intel_context *ce)
90         __releases(ce->pin_mutex)
91 {
92         mutex_unlock(&ce->pin_mutex);
93 }
94
95 int __intel_context_do_pin(struct intel_context *ce);
96 int __intel_context_do_pin_ww(struct intel_context *ce,
97                               struct i915_gem_ww_ctx *ww);
98
99 static inline bool intel_context_pin_if_active(struct intel_context *ce)
100 {
101         return atomic_inc_not_zero(&ce->pin_count);
102 }
103
104 static inline int intel_context_pin(struct intel_context *ce)
105 {
106         if (likely(intel_context_pin_if_active(ce)))
107                 return 0;
108
109         return __intel_context_do_pin(ce);
110 }
111
112 static inline int intel_context_pin_ww(struct intel_context *ce,
113                                        struct i915_gem_ww_ctx *ww)
114 {
115         if (likely(intel_context_pin_if_active(ce)))
116                 return 0;
117
118         return __intel_context_do_pin_ww(ce, ww);
119 }
120
121 static inline void __intel_context_pin(struct intel_context *ce)
122 {
123         GEM_BUG_ON(!intel_context_is_pinned(ce));
124         atomic_inc(&ce->pin_count);
125 }
126
127 void __intel_context_do_unpin(struct intel_context *ce, int sub);
128
129 static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
130 {
131         __intel_context_do_unpin(ce, 2);
132 }
133
134 static inline void intel_context_unpin(struct intel_context *ce)
135 {
136         if (!ce->ops->sched_disable) {
137                 __intel_context_do_unpin(ce, 1);
138         } else {
139                 /*
140                  * Move ownership of this pin to the scheduling disable which is
141                  * an async operation. When that operation completes the above
142                  * intel_context_sched_disable_unpin is called potentially
143                  * unpinning the context.
144                  */
145                 while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
146                         if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
147                                 ce->ops->sched_disable(ce);
148                                 break;
149                         }
150                 }
151         }
152 }
153
154 void intel_context_enter_engine(struct intel_context *ce);
155 void intel_context_exit_engine(struct intel_context *ce);
156
157 static inline void intel_context_enter(struct intel_context *ce)
158 {
159         lockdep_assert_held(&ce->timeline->mutex);
160         if (!ce->active_count++)
161                 ce->ops->enter(ce);
162 }
163
164 static inline void intel_context_mark_active(struct intel_context *ce)
165 {
166         lockdep_assert_held(&ce->timeline->mutex);
167         ++ce->active_count;
168 }
169
170 static inline void intel_context_exit(struct intel_context *ce)
171 {
172         lockdep_assert_held(&ce->timeline->mutex);
173         GEM_BUG_ON(!ce->active_count);
174         if (!--ce->active_count)
175                 ce->ops->exit(ce);
176 }
177
178 static inline struct intel_context *intel_context_get(struct intel_context *ce)
179 {
180         kref_get(&ce->ref);
181         return ce;
182 }
183
184 static inline void intel_context_put(struct intel_context *ce)
185 {
186         kref_put(&ce->ref, ce->ops->destroy);
187 }
188
189 static inline struct intel_timeline *__must_check
190 intel_context_timeline_lock(struct intel_context *ce)
191         __acquires(&ce->timeline->mutex)
192 {
193         struct intel_timeline *tl = ce->timeline;
194         int err;
195
196         err = mutex_lock_interruptible(&tl->mutex);
197         if (err)
198                 return ERR_PTR(err);
199
200         return tl;
201 }
202
203 static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
204         __releases(&tl->mutex)
205 {
206         mutex_unlock(&tl->mutex);
207 }
208
209 int intel_context_prepare_remote_request(struct intel_context *ce,
210                                          struct i915_request *rq);
211
212 struct i915_request *intel_context_create_request(struct intel_context *ce);
213
214 struct i915_request *
215 intel_context_find_active_request(struct intel_context *ce);
216
217 static inline bool intel_context_is_barrier(const struct intel_context *ce)
218 {
219         return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
220 }
221
222 static inline bool intel_context_is_closed(const struct intel_context *ce)
223 {
224         return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
225 }
226
227 static inline bool intel_context_has_inflight(const struct intel_context *ce)
228 {
229         return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
230 }
231
232 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
233 {
234         return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235 }
236
237 static inline void intel_context_set_use_semaphores(struct intel_context *ce)
238 {
239         set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
240 }
241
242 static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
243 {
244         clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
245 }
246
247 static inline bool intel_context_is_banned(const struct intel_context *ce)
248 {
249         return test_bit(CONTEXT_BANNED, &ce->flags);
250 }
251
252 static inline bool intel_context_set_banned(struct intel_context *ce)
253 {
254         return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
255 }
256
257 static inline bool intel_context_ban(struct intel_context *ce,
258                                      struct i915_request *rq)
259 {
260         bool ret = intel_context_set_banned(ce);
261
262         trace_intel_context_ban(ce);
263         if (ce->ops->ban)
264                 ce->ops->ban(ce, rq);
265
266         return ret;
267 }
268
269 static inline bool
270 intel_context_force_single_submission(const struct intel_context *ce)
271 {
272         return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
273 }
274
275 static inline void
276 intel_context_set_single_submission(struct intel_context *ce)
277 {
278         __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
279 }
280
281 static inline bool
282 intel_context_nopreempt(const struct intel_context *ce)
283 {
284         return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
285 }
286
287 static inline void
288 intel_context_set_nopreempt(struct intel_context *ce)
289 {
290         set_bit(CONTEXT_NOPREEMPT, &ce->flags);
291 }
292
293 static inline void
294 intel_context_clear_nopreempt(struct intel_context *ce)
295 {
296         clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
297 }
298
299 static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
300 {
301         const u32 period = ce->engine->gt->clock_period_ns;
302
303         return READ_ONCE(ce->runtime.total) * period;
304 }
305
306 static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
307 {
308         const u32 period = ce->engine->gt->clock_period_ns;
309
310         return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
311 }
312
313 #endif /* __INTEL_CONTEXT_H__ */