1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "gem/i915_gem_context.h"
7 #include "gem/i915_gem_pm.h"
10 #include "i915_trace.h"
12 #include "intel_context.h"
13 #include "intel_engine.h"
14 #include "intel_engine_pm.h"
15 #include "intel_ring.h"
17 static struct kmem_cache *slab_ce;
19 static struct intel_context *intel_context_alloc(void)
21 return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
24 static void rcu_context_free(struct rcu_head *rcu)
26 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
28 trace_intel_context_free(ce);
29 kmem_cache_free(slab_ce, ce);
32 void intel_context_free(struct intel_context *ce)
34 call_rcu(&ce->rcu, rcu_context_free);
37 struct intel_context *
38 intel_context_create(struct intel_engine_cs *engine)
40 struct intel_context *ce;
42 ce = intel_context_alloc();
44 return ERR_PTR(-ENOMEM);
46 intel_context_init(ce, engine);
47 trace_intel_context_create(ce);
51 int intel_context_alloc_state(struct intel_context *ce)
55 if (mutex_lock_interruptible(&ce->pin_mutex))
58 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
59 if (intel_context_is_banned(ce)) {
64 err = ce->ops->alloc(ce);
68 set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
72 mutex_unlock(&ce->pin_mutex);
76 static int intel_context_active_acquire(struct intel_context *ce)
80 __i915_active_acquire(&ce->active);
82 if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
85 /* Preallocate tracking nodes */
86 err = i915_active_acquire_preallocate_barrier(&ce->active,
89 i915_active_release(&ce->active);
94 static void intel_context_active_release(struct intel_context *ce)
96 /* Nodes preallocated in intel_context_active() */
97 i915_active_acquire_barrier(&ce->active);
98 i915_active_release(&ce->active);
101 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
103 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
106 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
110 err = i915_active_acquire(&vma->active);
115 * And mark it as a globally pinned object to let the shrinker know
116 * it cannot reclaim the object until we release it.
118 i915_vma_make_unshrinkable(vma);
119 vma->obj->mm.dirty = true;
128 static void __context_unpin_state(struct i915_vma *vma)
130 i915_vma_make_shrinkable(vma);
131 i915_active_release(&vma->active);
132 __i915_vma_unpin(vma);
135 static int __ring_active(struct intel_ring *ring,
136 struct i915_gem_ww_ctx *ww)
140 err = intel_ring_pin(ring, ww);
144 err = i915_active_acquire(&ring->vma->active);
151 intel_ring_unpin(ring);
155 static void __ring_retire(struct intel_ring *ring)
157 i915_active_release(&ring->vma->active);
158 intel_ring_unpin(ring);
161 static int intel_context_pre_pin(struct intel_context *ce,
162 struct i915_gem_ww_ctx *ww)
166 CE_TRACE(ce, "active\n");
168 err = __ring_active(ce->ring, ww);
172 err = intel_timeline_pin(ce->timeline, ww);
179 err = __context_pin_state(ce->state, ww);
187 intel_timeline_unpin(ce->timeline);
189 __ring_retire(ce->ring);
193 static void intel_context_post_unpin(struct intel_context *ce)
196 __context_unpin_state(ce->state);
198 intel_timeline_unpin(ce->timeline);
199 __ring_retire(ce->ring);
202 int __intel_context_do_pin_ww(struct intel_context *ce,
203 struct i915_gem_ww_ctx *ww)
205 bool handoff = false;
209 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
210 err = intel_context_alloc_state(ce);
216 * We always pin the context/ring/timeline here, to ensure a pin
217 * refcount for __intel_context_active(), which prevent a lock
218 * inversion of ce->pin_mutex vs dma_resv_lock().
221 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
223 err = i915_gem_object_lock(ce->ring->vma->obj, ww);
224 if (!err && ce->state)
225 err = i915_gem_object_lock(ce->state->obj, ww);
227 err = intel_context_pre_pin(ce, ww);
231 err = ce->ops->pre_pin(ce, ww, &vaddr);
235 err = i915_active_acquire(&ce->active);
239 err = mutex_lock_interruptible(&ce->pin_mutex);
243 intel_engine_pm_might_get(ce->engine);
245 if (unlikely(intel_context_is_closed(ce))) {
250 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
251 err = intel_context_active_acquire(ce);
255 err = ce->ops->pin(ce, vaddr);
257 intel_context_active_release(ce);
261 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
262 i915_ggtt_offset(ce->ring->vma),
263 ce->ring->head, ce->ring->tail);
266 smp_mb__before_atomic(); /* flush pin before it is visible */
267 atomic_inc(&ce->pin_count);
270 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
272 trace_intel_context_do_pin(ce);
275 mutex_unlock(&ce->pin_mutex);
277 i915_active_release(&ce->active);
280 ce->ops->post_unpin(ce);
282 intel_context_post_unpin(ce);
285 * Unlock the hwsp_ggtt object since it's shared.
286 * In principle we can unlock all the global state locked above
287 * since it's pinned and doesn't need fencing, and will
288 * thus remain resident until it is explicitly unpinned.
290 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
295 int __intel_context_do_pin(struct intel_context *ce)
297 struct i915_gem_ww_ctx ww;
300 i915_gem_ww_ctx_init(&ww, true);
302 err = __intel_context_do_pin_ww(ce, &ww);
303 if (err == -EDEADLK) {
304 err = i915_gem_ww_ctx_backoff(&ww);
308 i915_gem_ww_ctx_fini(&ww);
312 void __intel_context_do_unpin(struct intel_context *ce, int sub)
314 if (!atomic_sub_and_test(sub, &ce->pin_count))
317 CE_TRACE(ce, "unpin\n");
319 ce->ops->post_unpin(ce);
322 * Once released, we may asynchronously drop the active reference.
323 * As that may be the only reference keeping the context alive,
324 * take an extra now so that it is not freed before we finish
327 intel_context_get(ce);
328 intel_context_active_release(ce);
329 trace_intel_context_do_unpin(ce);
330 intel_context_put(ce);
333 static void __intel_context_retire(struct i915_active *active)
335 struct intel_context *ce = container_of(active, typeof(*ce), active);
337 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
338 intel_context_get_total_runtime_ns(ce),
339 intel_context_get_avg_runtime_ns(ce));
341 set_bit(CONTEXT_VALID_BIT, &ce->flags);
342 intel_context_post_unpin(ce);
343 intel_context_put(ce);
346 static int __intel_context_active(struct i915_active *active)
348 struct intel_context *ce = container_of(active, typeof(*ce), active);
350 intel_context_get(ce);
352 /* everything should already be activated by intel_context_pre_pin() */
353 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
354 __intel_ring_pin(ce->ring);
356 __intel_timeline_pin(ce->timeline);
359 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
360 __i915_vma_pin(ce->state);
361 i915_vma_make_unshrinkable(ce->state);
367 static int __i915_sw_fence_call
368 sw_fence_dummy_notify(struct i915_sw_fence *sf,
369 enum i915_sw_fence_notify state)
375 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
377 GEM_BUG_ON(!engine->cops);
378 GEM_BUG_ON(!engine->gt->vm);
383 ce->ops = engine->cops;
384 ce->sseu = engine->sseu;
386 ce->ring_size = SZ_4K;
388 ewma_runtime_init(&ce->runtime.avg);
390 ce->vm = i915_vm_get(engine->gt->vm);
392 /* NB ce->signal_link/lock is used under RCU */
393 spin_lock_init(&ce->signal_lock);
394 INIT_LIST_HEAD(&ce->signals);
396 mutex_init(&ce->pin_mutex);
398 spin_lock_init(&ce->guc_state.lock);
399 INIT_LIST_HEAD(&ce->guc_state.fences);
400 INIT_LIST_HEAD(&ce->guc_state.requests);
402 ce->guc_id.id = GUC_INVALID_LRC_ID;
403 INIT_LIST_HEAD(&ce->guc_id.link);
405 INIT_LIST_HEAD(&ce->destroyed_link);
407 INIT_LIST_HEAD(&ce->parallel.child_list);
410 * Initialize fence to be complete as this is expected to be complete
411 * unless there is a pending schedule disable outstanding.
413 i915_sw_fence_init(&ce->guc_state.blocked,
414 sw_fence_dummy_notify);
415 i915_sw_fence_commit(&ce->guc_state.blocked);
417 i915_active_init(&ce->active,
418 __intel_context_active, __intel_context_retire, 0);
421 void intel_context_fini(struct intel_context *ce)
423 struct intel_context *child, *next;
426 intel_timeline_put(ce->timeline);
429 /* Need to put the creation ref for the children */
430 if (intel_context_is_parent(ce))
431 for_each_child_safe(ce, child, next)
432 intel_context_put(child);
434 mutex_destroy(&ce->pin_mutex);
435 i915_active_fini(&ce->active);
436 i915_sw_fence_fini(&ce->guc_state.blocked);
439 void i915_context_module_exit(void)
441 kmem_cache_destroy(slab_ce);
444 int __init i915_context_module_init(void)
446 slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
453 void intel_context_enter_engine(struct intel_context *ce)
455 intel_engine_pm_get(ce->engine);
456 intel_timeline_enter(ce->timeline);
459 void intel_context_exit_engine(struct intel_context *ce)
461 intel_timeline_exit(ce->timeline);
462 intel_engine_pm_put(ce->engine);
465 int intel_context_prepare_remote_request(struct intel_context *ce,
466 struct i915_request *rq)
468 struct intel_timeline *tl = ce->timeline;
471 /* Only suitable for use in remotely modifying this context */
472 GEM_BUG_ON(rq->context == ce);
474 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
475 /* Queue this switch after current activity by this context. */
476 err = i915_active_fence_set(&tl->last_request, rq);
482 * Guarantee context image and the timeline remains pinned until the
483 * modifying request is retired by setting the ce activity tracker.
485 * But we only need to take one pin on the account of it. Or in other
486 * words transfer the pinned ce object to tracked active request.
488 GEM_BUG_ON(i915_active_is_idle(&ce->active));
489 return i915_active_add_request(&ce->active, rq);
492 struct i915_request *intel_context_create_request(struct intel_context *ce)
494 struct i915_gem_ww_ctx ww;
495 struct i915_request *rq;
498 i915_gem_ww_ctx_init(&ww, true);
500 err = intel_context_pin_ww(ce, &ww);
502 rq = i915_request_create(ce);
503 intel_context_unpin(ce);
504 } else if (err == -EDEADLK) {
505 err = i915_gem_ww_ctx_backoff(&ww);
513 i915_gem_ww_ctx_fini(&ww);
519 * timeline->mutex should be the inner lock, but is used as outer lock.
520 * Hack around this to shut up lockdep in selftests..
522 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
523 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
524 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
525 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
530 struct i915_request *intel_context_find_active_request(struct intel_context *ce)
532 struct intel_context *parent = intel_context_to_parent(ce);
533 struct i915_request *rq, *active = NULL;
536 GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
539 * We search the parent list to find an active request on the submitted
540 * context. The parent list contains the requests for all the contexts
541 * in the relationship so we have to do a compare of each request's
544 spin_lock_irqsave(&parent->guc_state.lock, flags);
545 list_for_each_entry_reverse(rq, &parent->guc_state.requests,
547 if (rq->context != ce)
549 if (i915_request_completed(rq))
554 spin_unlock_irqrestore(&parent->guc_state.lock, flags);
559 void intel_context_bind_parent_child(struct intel_context *parent,
560 struct intel_context *child)
563 * Callers responsibility to validate that this function is used
564 * correctly but we use GEM_BUG_ON here ensure that they do.
566 GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
567 GEM_BUG_ON(intel_context_is_pinned(parent));
568 GEM_BUG_ON(intel_context_is_child(parent));
569 GEM_BUG_ON(intel_context_is_pinned(child));
570 GEM_BUG_ON(intel_context_is_child(child));
571 GEM_BUG_ON(intel_context_is_parent(child));
573 parent->parallel.child_index = parent->parallel.number_children++;
574 list_add_tail(&child->parallel.child_link,
575 &parent->parallel.child_list);
576 child->parallel.parent = parent;
579 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
580 #include "selftest_context.c"