2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/kref.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
12 #include "i915_selftest.h"
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
18 struct i915_active base;
23 static void __live_get(struct live_active *active)
25 kref_get(&active->ref);
28 static void __live_free(struct live_active *active)
30 i915_active_fini(&active->base);
34 static void __live_release(struct kref *ref)
36 struct live_active *active = container_of(ref, typeof(*active), ref);
41 static void __live_put(struct live_active *active)
43 kref_put(&active->ref, __live_release);
46 static int __live_active(struct i915_active *base)
48 struct live_active *active = container_of(base, typeof(*active), base);
54 static void __live_retire(struct i915_active *base)
56 struct live_active *active = container_of(base, typeof(*active), base);
58 active->retired = true;
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
64 struct live_active *active;
66 active = kzalloc(sizeof(*active), GFP_KERNEL);
70 kref_init(&active->ref);
71 i915_active_init(&active->base, __live_active, __live_retire);
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
79 struct intel_engine_cs *engine;
80 struct i915_sw_fence *submit;
81 struct live_active *active;
82 unsigned int count = 0;
85 active = __live_alloc(i915);
87 return ERR_PTR(-ENOMEM);
89 submit = heap_fence_create(GFP_KERNEL);
92 return ERR_PTR(-ENOMEM);
95 err = i915_active_acquire(&active->base);
99 for_each_uabi_engine(engine, i915) {
100 struct i915_request *rq;
102 rq = intel_engine_create_kernel_request(engine);
108 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
112 err = i915_active_add_request(&active->base, rq);
113 i915_request_add(rq);
115 pr_err("Failed to track active ref!\n");
122 i915_active_release(&active->base);
123 if (READ_ONCE(active->retired) && count) {
124 pr_err("i915_active retired before submission!\n");
127 if (atomic_read(&active->base.count) != count) {
128 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
129 atomic_read(&active->base.count), count);
134 i915_sw_fence_commit(submit);
135 heap_fence_put(submit);
138 active = ERR_PTR(err);
144 static int live_active_wait(void *arg)
146 struct drm_i915_private *i915 = arg;
147 struct live_active *active;
150 /* Check that we get a callback when requests retire upon waiting */
152 active = __live_active_setup(i915);
154 return PTR_ERR(active);
156 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
157 if (!READ_ONCE(active->retired)) {
158 struct drm_printer p = drm_err_printer(__func__);
160 pr_err("i915_active not retired after waiting!\n");
161 i915_active_print(&active->base, &p);
168 if (igt_flush_test(i915))
174 static int live_active_retire(void *arg)
176 struct drm_i915_private *i915 = arg;
177 struct live_active *active;
180 /* Check that we get a callback when requests are indirectly retired */
182 active = __live_active_setup(i915);
184 return PTR_ERR(active);
186 /* waits for & retires all requests */
187 if (igt_flush_test(i915))
190 if (!READ_ONCE(active->retired)) {
191 struct drm_printer p = drm_err_printer(__func__);
193 pr_err("i915_active not retired after flushing!\n");
194 i915_active_print(&active->base, &p);
204 static int live_active_barrier(void *arg)
206 struct drm_i915_private *i915 = arg;
207 struct intel_engine_cs *engine;
208 struct live_active *active;
211 /* Check that we get a callback when requests retire upon waiting */
213 active = __live_alloc(i915);
217 err = i915_active_acquire(&active->base);
221 for_each_uabi_engine(engine, i915) {
222 err = i915_active_acquire_preallocate_barrier(&active->base,
227 i915_active_acquire_barrier(&active->base);
230 i915_active_release(&active->base);
234 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
235 if (!READ_ONCE(active->retired)) {
236 pr_err("i915_active not retired after flushing barriers!\n");
243 if (igt_flush_test(i915))
249 int i915_active_live_selftests(struct drm_i915_private *i915)
251 static const struct i915_subtest tests[] = {
252 SUBTEST(live_active_wait),
253 SUBTEST(live_active_retire),
254 SUBTEST(live_active_barrier),
257 if (intel_gt_is_wedged(&i915->gt))
260 return i915_subtests(tests, i915);
263 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
265 struct intel_engine_cs *engine;
267 if (!is_barrier(&it->base))
270 engine = __barrier_to_engine(it);
271 smp_rmb(); /* serialise with add_active_barriers */
272 if (!is_barrier(&it->base))
278 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
280 drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
281 drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
282 drm_printf(m, "\tpreallocated barriers? %s\n",
283 yesno(!llist_empty(&ref->preallocated_barriers)));
285 if (i915_active_acquire_if_busy(ref)) {
286 struct active_node *it, *n;
288 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
289 struct intel_engine_cs *engine;
291 engine = node_to_barrier(it);
293 drm_printf(m, "\tbarrier: %s\n", engine->name);
297 if (i915_active_fence_isset(&it->base)) {
299 "\ttimeline: %llx\n", it->timeline);
304 i915_active_release(ref);
308 static void spin_unlock_wait(spinlock_t *lock)
311 spin_unlock_irq(lock);
314 static void active_flush(struct i915_active *ref,
315 struct i915_active_fence *active)
317 struct dma_fence *fence;
319 fence = xchg(__active_fence_slot(active), NULL);
323 spin_lock_irq(fence->lock);
324 __list_del_entry(&active->cb.node);
325 spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
326 atomic_dec(&ref->count);
328 GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
331 void i915_active_unlock_wait(struct i915_active *ref)
333 if (i915_active_acquire_if_busy(ref)) {
334 struct active_node *it, *n;
336 /* Wait for all active callbacks */
338 active_flush(ref, &ref->excl);
339 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
340 active_flush(ref, &it->base);
343 i915_active_release(ref);
346 /* And wait for the retire callback */
347 spin_unlock_wait(&ref->tree_lock);
349 /* ... which may have been on a thread instead */
350 flush_work(&ref->work);