1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_bo_driver.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_ttm_move.h"
18 #include "gt/intel_engine_pm.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_migrate.h"
23 * DOC: Selftest failure modes for failsafe migration:
25 * For fail_gpu_migration, the gpu blit scheduled is always a clear blit
26 * rather than a copy blit, and then we force the failure paths as if
27 * the blit fence returned an error.
29 * For fail_work_allocation we fail the kmalloc of the async worker, we
30 * sync the gpu blit. If it then fails, or fail_gpu_migration is set to
31 * true, then a memcpy operation is performed sync.
33 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
34 static bool fail_gpu_migration;
35 static bool fail_work_allocation;
37 void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
40 fail_gpu_migration = gpu_migration;
41 fail_work_allocation = work_allocation;
45 static enum i915_cache_level
46 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
49 return ((HAS_LLC(i915) || HAS_SNOOP(i915)) &&
50 !i915_ttm_gtt_binds_lmem(res) &&
51 ttm->caching == ttm_cached) ? I915_CACHE_LLC :
55 static struct intel_memory_region *
56 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
58 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
60 /* There's some room for optimization here... */
61 GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
62 ttm_mem_type < I915_PL_LMEM0);
63 if (ttm_mem_type == I915_PL_SYSTEM)
64 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
67 return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
68 ttm_mem_type - I915_PL_LMEM0);
72 * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a
74 * @obj: The gem object
76 void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
78 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
80 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
81 obj->write_domain = I915_GEM_DOMAIN_WC;
82 obj->read_domains = I915_GEM_DOMAIN_WC;
84 obj->write_domain = I915_GEM_DOMAIN_CPU;
85 obj->read_domains = I915_GEM_DOMAIN_CPU;
90 * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move
91 * @obj: The gem object
93 * Adjusts the GEM object's region, mem_flags and cache coherency after a
96 void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
98 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
99 unsigned int cache_level;
103 * If object was moved to an allowable region, update the object
104 * region to consider it migrated. Note that if it's currently not
105 * in an allowable region, it's evicted and we don't update the
108 if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
109 for (i = 0; i < obj->mm.n_placements; ++i) {
110 struct intel_memory_region *mr = obj->mm.placements[i];
112 if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
113 mr != obj->mm.region) {
114 i915_gem_object_release_memory_region(obj);
115 i915_gem_object_init_memory_region(obj, mr);
121 obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
123 obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
124 I915_BO_FLAG_STRUCT_PAGE;
126 cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
128 i915_gem_object_set_cache_coherency(obj, cache_level);
132 * i915_ttm_move_notify - Prepare an object for move
133 * @bo: The ttm buffer object.
135 * This function prepares an object for move by removing all GPU bindings,
136 * removing all CPU mapings and finally releasing the pages sg-table.
138 * Return: 0 if successful, negative error code on error.
140 int i915_ttm_move_notify(struct ttm_buffer_object *bo)
142 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
146 * Note: The async unbinding here will actually transform the
147 * blocking wait for unbind into a wait before finally submitting
148 * evict / migration blit and thus stall the migration timeline
149 * which may not be good for overall throughput. We should make
150 * sure we await the unbind fences *after* the migration blit
151 * instead of *before* as we currently do.
153 ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE |
154 I915_GEM_OBJECT_UNBIND_ASYNC);
158 ret = __i915_gem_object_put_pages(obj);
165 static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
167 struct ttm_resource *dst_mem,
168 struct ttm_tt *dst_ttm,
169 struct sg_table *dst_st,
170 const struct i915_deps *deps)
172 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
174 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
175 struct i915_request *rq;
176 struct ttm_tt *src_ttm = bo->ttm;
177 enum i915_cache_level src_level, dst_level;
180 if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
181 return ERR_PTR(-EINVAL);
183 /* With fail_gpu_migration, we always perform a GPU clear. */
184 if (I915_SELFTEST_ONLY(fail_gpu_migration))
187 dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
189 if (bo->type == ttm_bo_type_kernel &&
190 !I915_SELFTEST_ONLY(fail_gpu_migration))
191 return ERR_PTR(-EINVAL);
193 intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
194 ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
195 dst_st->sgl, dst_level,
196 i915_ttm_gtt_binds_lmem(dst_mem),
199 struct i915_refct_sgt *src_rsgt =
200 i915_ttm_resource_get_st(obj, bo->resource);
202 if (IS_ERR(src_rsgt))
203 return ERR_CAST(src_rsgt);
205 src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
206 intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
207 ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
208 deps, src_rsgt->table.sgl,
210 i915_ttm_gtt_binds_lmem(bo->resource),
211 dst_st->sgl, dst_level,
212 i915_ttm_gtt_binds_lmem(dst_mem),
215 i915_refct_sgt_put(src_rsgt);
218 intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
221 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
222 i915_request_put(rq);
225 return ret ? ERR_PTR(ret) : &rq->fence;
229 * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality.
230 * @_dst_iter: Storage space for the destination kmap iterator.
231 * @_src_iter: Storage space for the source kmap iterator.
232 * @dst_iter: Pointer to the destination kmap iterator.
233 * @src_iter: Pointer to the source kmap iterator.
234 * @clear: Whether to clear instead of copy.
235 * @src_rsgt: Refcounted scatter-gather list of source memory.
236 * @dst_rsgt: Refcounted scatter-gather list of destination memory.
238 struct i915_ttm_memcpy_arg {
240 struct ttm_kmap_iter_tt tt;
241 struct ttm_kmap_iter_iomap io;
244 struct ttm_kmap_iter *dst_iter;
245 struct ttm_kmap_iter *src_iter;
246 unsigned long num_pages;
248 struct i915_refct_sgt *src_rsgt;
249 struct i915_refct_sgt *dst_rsgt;
253 * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence.
254 * @fence: The dma-fence.
255 * @work: The work struct use for the memcpy work.
256 * @lock: The fence lock. Not used to protect anything else ATM.
257 * @irq_work: Low latency worker to signal the fence since it can't be done
258 * from the callback for lockdep reasons.
259 * @cb: Callback for the accelerated migration fence.
260 * @arg: The argument for the memcpy functionality.
262 struct i915_ttm_memcpy_work {
263 struct dma_fence fence;
264 struct work_struct work;
267 struct irq_work irq_work;
268 struct dma_fence_cb cb;
269 struct i915_ttm_memcpy_arg arg;
272 static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
274 ttm_move_memcpy(arg->clear, arg->num_pages,
275 arg->dst_iter, arg->src_iter);
278 static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
279 struct ttm_buffer_object *bo, bool clear,
280 struct ttm_resource *dst_mem,
281 struct ttm_tt *dst_ttm,
282 struct i915_refct_sgt *dst_rsgt)
284 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
285 struct intel_memory_region *dst_reg, *src_reg;
287 dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
288 src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
289 GEM_BUG_ON(!dst_reg || !src_reg);
291 arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ?
292 ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) :
293 ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap,
294 &dst_rsgt->table, dst_reg->region.start);
296 arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ?
297 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) :
298 ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap,
299 &obj->ttm.cached_io_rsgt->table,
300 src_reg->region.start);
302 arg->num_pages = bo->base.size >> PAGE_SHIFT;
304 arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
305 arg->src_rsgt = clear ? NULL :
306 i915_ttm_resource_get_st(obj, bo->resource);
309 static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)
311 i915_refct_sgt_put(arg->src_rsgt);
312 i915_refct_sgt_put(arg->dst_rsgt);
315 static void __memcpy_work(struct work_struct *work)
317 struct i915_ttm_memcpy_work *copy_work =
318 container_of(work, typeof(*copy_work), work);
319 struct i915_ttm_memcpy_arg *arg = ©_work->arg;
320 bool cookie = dma_fence_begin_signalling();
322 i915_ttm_move_memcpy(arg);
323 dma_fence_end_signalling(cookie);
325 dma_fence_signal(©_work->fence);
327 i915_ttm_memcpy_release(arg);
328 dma_fence_put(©_work->fence);
331 static void __memcpy_irq_work(struct irq_work *irq_work)
333 struct i915_ttm_memcpy_work *copy_work =
334 container_of(irq_work, typeof(*copy_work), irq_work);
335 struct i915_ttm_memcpy_arg *arg = ©_work->arg;
337 dma_fence_signal(©_work->fence);
338 i915_ttm_memcpy_release(arg);
339 dma_fence_put(©_work->fence);
342 static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
344 struct i915_ttm_memcpy_work *copy_work =
345 container_of(cb, typeof(*copy_work), cb);
347 if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
348 INIT_WORK(©_work->work, __memcpy_work);
349 queue_work(system_unbound_wq, ©_work->work);
351 init_irq_work(©_work->irq_work, __memcpy_irq_work);
352 irq_work_queue(©_work->irq_work);
356 static const char *get_driver_name(struct dma_fence *fence)
358 return "i915_ttm_memcpy_work";
361 static const char *get_timeline_name(struct dma_fence *fence)
366 static const struct dma_fence_ops dma_fence_memcpy_ops = {
367 .get_driver_name = get_driver_name,
368 .get_timeline_name = get_timeline_name,
371 static struct dma_fence *
372 i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
373 struct dma_fence *dep)
377 spin_lock_init(&work->lock);
378 dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
379 dma_fence_get(&work->fence);
380 ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
383 dma_fence_wait(dep, false);
385 return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL :
392 static struct dma_fence *
393 __i915_ttm_move(struct ttm_buffer_object *bo,
394 const struct ttm_operation_ctx *ctx, bool clear,
395 struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm,
396 struct i915_refct_sgt *dst_rsgt, bool allow_accel,
397 const struct i915_deps *move_deps)
399 struct i915_ttm_memcpy_work *copy_work = NULL;
400 struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
401 struct dma_fence *fence = ERR_PTR(-EINVAL);
404 fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm,
405 &dst_rsgt->table, move_deps);
408 * We only need to intercept the error when moving to lmem.
409 * When moving to system, TTM or shmem will provide us with
412 if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) &&
413 !I915_SELFTEST_ONLY(fail_gpu_migration ||
414 fail_work_allocation))
418 /* If we've scheduled gpu migration. Try to arm error intercept. */
419 if (!IS_ERR(fence)) {
420 struct dma_fence *dep = fence;
422 if (!I915_SELFTEST_ONLY(fail_work_allocation))
423 copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
426 arg = ©_work->arg;
427 i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
429 fence = i915_ttm_memcpy_work_arm(copy_work, dep);
431 dma_fence_wait(dep, false);
432 fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ?
433 -EINVAL : fence->error);
440 int err = PTR_ERR(fence);
442 if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
446 err = i915_deps_sync(move_deps, ctx);
452 /* Error intercept failed or no accelerated migration to start with */
454 i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
456 i915_ttm_move_memcpy(arg);
457 i915_ttm_memcpy_release(arg);
462 if (!fence && copy_work) {
463 i915_ttm_memcpy_release(arg);
471 prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
472 struct i915_deps *deps)
476 ret = i915_deps_add_dependency(deps, bo->moving, ctx);
478 ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
484 * i915_ttm_move - The TTM move callback used by i915.
485 * @bo: The buffer object.
486 * @evict: Whether this is an eviction.
487 * @dst_mem: The destination ttm resource.
488 * @hop: If we need multihop, what temporary memory type to move to.
490 * Return: 0 if successful, negative error code otherwise.
492 int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
493 struct ttm_operation_ctx *ctx,
494 struct ttm_resource *dst_mem,
495 struct ttm_place *hop)
497 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
498 struct ttm_resource_manager *dst_man =
499 ttm_manager_type(bo->bdev, dst_mem->mem_type);
500 struct dma_fence *migration_fence = NULL;
501 struct ttm_tt *ttm = bo->ttm;
502 struct i915_refct_sgt *dst_rsgt;
506 if (GEM_WARN_ON(!obj)) {
507 ttm_bo_move_null(bo, dst_mem);
511 ret = i915_ttm_move_notify(bo);
515 if (obj->mm.madv != I915_MADV_WILLNEED) {
517 ttm_resource_free(bo, &dst_mem);
521 /* Populate ttm with pages if needed. Typically system memory. */
522 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
523 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
528 dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem);
529 if (IS_ERR(dst_rsgt))
530 return PTR_ERR(dst_rsgt);
532 clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
533 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) {
534 struct i915_deps deps;
536 i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
537 ret = prev_deps(bo, ctx, &deps);
539 i915_refct_sgt_put(dst_rsgt);
543 migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm,
544 dst_rsgt, true, &deps);
545 i915_deps_fini(&deps);
548 /* We can possibly get an -ERESTARTSYS here */
549 if (IS_ERR(migration_fence)) {
550 i915_refct_sgt_put(dst_rsgt);
551 return PTR_ERR(migration_fence);
554 if (migration_fence) {
555 ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
558 dma_fence_wait(migration_fence, false);
559 ttm_bo_move_sync_cleanup(bo, dst_mem);
561 dma_fence_put(migration_fence);
563 ttm_bo_move_sync_cleanup(bo, dst_mem);
566 i915_ttm_adjust_domains_after_move(obj);
567 i915_ttm_free_cached_io_rsgt(obj);
569 if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) {
570 obj->ttm.cached_io_rsgt = dst_rsgt;
571 obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
572 obj->ttm.get_io_page.sg_idx = 0;
574 i915_refct_sgt_put(dst_rsgt);
577 i915_ttm_adjust_lru(obj);
578 i915_ttm_adjust_gem_after_move(obj);
583 * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
585 * @dst: The destination object
586 * @src: The source object
587 * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
588 * @intr: Whether to perform waits interruptible:
590 * Note: The caller is responsible for assuring that the underlying
591 * TTM objects are populated if needed and locked.
593 * Return: Zero on success. Negative error code on error. If @intr == true,
594 * then it may return -ERESTARTSYS or -EINTR.
596 int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
597 struct drm_i915_gem_object *src,
598 bool allow_accel, bool intr)
600 struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
601 struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
602 struct ttm_operation_ctx ctx = {
603 .interruptible = intr,
605 struct i915_refct_sgt *dst_rsgt;
606 struct dma_fence *copy_fence;
607 struct i915_deps deps;
610 assert_object_held(dst);
611 assert_object_held(src);
612 i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
614 ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
618 ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
622 ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
626 dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
627 copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource,
628 dst_bo->ttm, dst_rsgt, allow_accel,
631 i915_deps_fini(&deps);
632 i915_refct_sgt_put(dst_rsgt);
633 if (IS_ERR_OR_NULL(copy_fence))
634 return PTR_ERR_OR_ZERO(copy_fence);
636 dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
637 dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
639 dma_fence_put(copy_fence);