1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020-2021 Intel Corporation
6 #include "gt/intel_migrate.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gem/i915_gem_ttm_move.h"
10 #include "i915_deps.h"
12 #include "selftests/igt_spinner.h"
14 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned int i, count = obj->base.size / sizeof(u32);
19 enum i915_map_type map_type =
20 i915_coherent_map_type(i915, obj, false);
24 assert_object_held(obj);
25 cur = i915_gem_object_pin_map(obj, map_type);
30 for (i = 0; i < count; ++i)
33 for (i = 0; i < count; ++i)
35 pr_err("Object content mismatch at location %d of %d\n", i, count);
40 i915_gem_object_unpin_map(obj);
45 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
46 enum intel_region_id dst)
48 struct drm_i915_private *i915 = gt->i915;
49 struct intel_memory_region *src_mr = i915->mm.regions[src];
50 struct drm_i915_gem_object *obj;
51 struct i915_gem_ww_ctx ww;
56 /* Switch object backing-store on create */
57 obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
61 for_i915_gem_ww(&ww, err, true) {
62 err = i915_gem_object_lock(obj, &ww);
66 err = igt_fill_check_buffer(obj, true);
70 err = i915_gem_object_migrate(obj, &ww, dst);
74 err = i915_gem_object_pin_pages(obj);
78 if (i915_gem_object_can_migrate(obj, src))
81 i915_gem_object_unpin_pages(obj);
82 err = i915_gem_object_wait_migration(obj, true);
86 err = igt_fill_check_buffer(obj, false);
88 i915_gem_object_put(obj);
93 static int igt_smem_create_migrate(void *arg)
95 return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM);
98 static int igt_lmem_create_migrate(void *arg)
100 return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0);
103 static int igt_same_create_migrate(void *arg)
105 return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0);
108 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
109 struct drm_i915_gem_object *obj,
110 struct i915_vma *vma)
114 err = i915_gem_object_lock(obj, ww);
119 err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
120 0UL | PIN_OFFSET_FIXED |
123 if (err != -EINTR && err != ERESTARTSYS &&
125 pr_err("Failed to pin vma.\n");
133 * Migration will implicitly unbind (asynchronously) any bound
136 if (i915_gem_object_is_lmem(obj)) {
137 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
139 pr_err("Object failed migration to smem\n");
144 if (i915_gem_object_is_lmem(obj)) {
145 pr_err("object still backed by lmem\n");
149 if (!i915_gem_object_has_struct_page(obj)) {
150 pr_err("object not backed by struct page\n");
155 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
157 pr_err("Object failed migration to lmem\n");
162 if (i915_gem_object_has_struct_page(obj)) {
163 pr_err("object still backed by struct page\n");
167 if (!i915_gem_object_is_lmem(obj)) {
168 pr_err("object not backed by lmem\n");
176 static int __igt_lmem_pages_migrate(struct intel_gt *gt,
177 struct i915_address_space *vm,
178 struct i915_deps *deps,
179 struct igt_spinner *spin,
180 struct dma_fence *spin_fence)
182 struct drm_i915_private *i915 = gt->i915;
183 struct drm_i915_gem_object *obj;
184 struct i915_vma *vma = NULL;
185 struct i915_gem_ww_ctx ww;
186 struct i915_request *rq;
190 /* From LMEM to shmem and back again */
192 obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
197 vma = i915_vma_instance(obj, vm, NULL);
204 /* Initial GPU fill, sync, CPU initialization. */
205 for_i915_gem_ww(&ww, err, true) {
206 err = i915_gem_object_lock(obj, &ww);
210 err = ____i915_gem_object_get_pages(obj);
214 err = intel_migrate_clear(>->migrate, &ww, deps,
215 obj->mm.pages->sgl, obj->cache_level,
216 i915_gem_object_is_lmem(obj),
219 err = dma_resv_reserve_fences(obj->base.resv, 1);
221 dma_resv_add_excl_fence(obj->base.resv,
223 i915_gem_object_set_moving_fence(obj, &rq->fence);
224 i915_request_put(rq);
230 err = igt_fill_check_buffer(obj, true);
239 * Migrate to and from smem without explicitly syncing.
240 * Finalize with data in smem for fast readout.
242 for (i = 1; i <= 5; ++i) {
243 for_i915_gem_ww(&ww, err, true)
244 err = lmem_pages_migrate_one(&ww, obj, vma);
249 err = i915_gem_object_lock_interruptible(obj, NULL);
254 if (dma_fence_is_signaled(spin_fence)) {
255 pr_err("Spinner was terminated by hangcheck.\n");
259 igt_spinner_end(spin);
262 /* Finally sync migration and check content. */
263 err = i915_gem_object_wait_migration(obj, true);
268 err = i915_vma_wait_for_bind(vma);
272 err = igt_fill_check_buffer(obj, false);
276 i915_gem_object_unlock(obj);
278 i915_gem_object_put(obj);
283 static int igt_lmem_pages_failsafe_migrate(void *arg)
285 int fail_gpu, fail_alloc, ret;
286 struct intel_gt *gt = arg;
288 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
289 for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
290 pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
291 fail_gpu, fail_alloc);
292 i915_ttm_migrate_set_failure_modes(fail_gpu,
294 ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
301 i915_ttm_migrate_set_failure_modes(false, false);
306 * This subtest tests that unbinding at migration is indeed performed
307 * async. We launch a spinner and a number of migrations depending on
308 * that spinner to have terminated. Before each migration we bind a
309 * vma, which should then be async unbound by the migration operation.
310 * If we are able to schedule migrations without blocking while the
311 * spinner is still running, those unbinds are indeed async and non-
314 * Note that each async bind operation is awaiting the previous migration
315 * due to the moving fence resulting from the migration.
317 static int igt_async_migrate(struct intel_gt *gt)
319 struct intel_engine_cs *engine;
320 enum intel_engine_id id;
321 struct i915_ppgtt *ppgtt;
322 struct igt_spinner spin;
325 ppgtt = i915_ppgtt_create(gt, 0);
327 return PTR_ERR(ppgtt);
329 if (igt_spinner_init(&spin, gt)) {
334 for_each_engine(engine, gt, id) {
335 struct ttm_operation_ctx ctx = {
336 .interruptible = true
338 struct dma_fence *spin_fence;
339 struct intel_context *ce;
340 struct i915_request *rq;
341 struct i915_deps deps;
343 ce = intel_context_create(engine);
350 * Use MI_NOOP, making the spinner non-preemptible. If there
351 * is a code path where we fail async operation due to the
352 * running spinner, we will block and fail to end the
353 * spinner resulting in a deadlock. But with a non-
354 * preemptible spinner, hangcheck will terminate the spinner
355 * for us, and we will later detect that and fail the test.
357 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
358 intel_context_put(ce);
364 i915_deps_init(&deps, GFP_KERNEL);
365 err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
366 spin_fence = dma_fence_get(&rq->fence);
367 i915_request_add(rq);
371 err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
373 i915_deps_fini(&deps);
374 dma_fence_put(spin_fence);
380 igt_spinner_fini(&spin);
382 i915_vm_put(&ppgtt->vm);
388 * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
389 * arming the migration error check and block async migration. This
390 * will cause us to deadlock and hangcheck will terminate the spinner
391 * causing the test to fail.
393 #define ASYNC_FAIL_ALLOC 1
394 static int igt_lmem_async_migrate(void *arg)
396 int fail_gpu, fail_alloc, ret;
397 struct intel_gt *gt = arg;
399 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
400 for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
401 pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
402 fail_gpu, fail_alloc);
403 i915_ttm_migrate_set_failure_modes(fail_gpu,
405 ret = igt_async_migrate(gt);
412 i915_ttm_migrate_set_failure_modes(false, false);
416 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
418 static const struct i915_subtest tests[] = {
419 SUBTEST(igt_smem_create_migrate),
420 SUBTEST(igt_lmem_create_migrate),
421 SUBTEST(igt_same_create_migrate),
422 SUBTEST(igt_lmem_pages_failsafe_migrate),
423 SUBTEST(igt_lmem_async_migrate),
429 return intel_gt_live_subtests(tests, to_gt(i915));