1ebe6e4086a1d4023e96b7dd1e58a5df7822e380
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / i915_gem_ttm_move.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <drm/ttm/ttm_bo_driver.h>
7
8 #include "i915_deps.h"
9 #include "i915_drv.h"
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
12
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_ttm_move.h"
17
18 #include "gt/intel_engine_pm.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_migrate.h"
21
22 /**
23  * DOC: Selftest failure modes for failsafe migration:
24  *
25  * For fail_gpu_migration, the gpu blit scheduled is always a clear blit
26  * rather than a copy blit, and then we force the failure paths as if
27  * the blit fence returned an error.
28  *
29  * For fail_work_allocation we fail the kmalloc of the async worker, we
30  * sync the gpu blit. If it then fails, or fail_gpu_migration is set to
31  * true, then a memcpy operation is performed sync.
32  */
33 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
34 static bool fail_gpu_migration;
35 static bool fail_work_allocation;
36
37 void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
38                                         bool work_allocation)
39 {
40         fail_gpu_migration = gpu_migration;
41         fail_work_allocation = work_allocation;
42 }
43 #endif
44
45 static enum i915_cache_level
46 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
47                      struct ttm_tt *ttm)
48 {
49         return ((HAS_LLC(i915) || HAS_SNOOP(i915)) &&
50                 !i915_ttm_gtt_binds_lmem(res) &&
51                 ttm->caching == ttm_cached) ? I915_CACHE_LLC :
52                 I915_CACHE_NONE;
53 }
54
55 static struct intel_memory_region *
56 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
57 {
58         struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
59
60         /* There's some room for optimization here... */
61         GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
62                    ttm_mem_type < I915_PL_LMEM0);
63         if (ttm_mem_type == I915_PL_SYSTEM)
64                 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
65                                                   0);
66
67         return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
68                                           ttm_mem_type - I915_PL_LMEM0);
69 }
70
71 /**
72  * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a
73  * TTM move
74  * @obj: The gem object
75  */
76 void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
77 {
78         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
79
80         if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
81                 obj->write_domain = I915_GEM_DOMAIN_WC;
82                 obj->read_domains = I915_GEM_DOMAIN_WC;
83         } else {
84                 obj->write_domain = I915_GEM_DOMAIN_CPU;
85                 obj->read_domains = I915_GEM_DOMAIN_CPU;
86         }
87 }
88
89 /**
90  * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move
91  * @obj: The gem object
92  *
93  * Adjusts the GEM object's region, mem_flags and cache coherency after a
94  * TTM move.
95  */
96 void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
97 {
98         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
99         unsigned int cache_level;
100         unsigned int i;
101
102         /*
103          * If object was moved to an allowable region, update the object
104          * region to consider it migrated. Note that if it's currently not
105          * in an allowable region, it's evicted and we don't update the
106          * object region.
107          */
108         if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
109                 for (i = 0; i < obj->mm.n_placements; ++i) {
110                         struct intel_memory_region *mr = obj->mm.placements[i];
111
112                         if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
113                             mr != obj->mm.region) {
114                                 i915_gem_object_release_memory_region(obj);
115                                 i915_gem_object_init_memory_region(obj, mr);
116                                 break;
117                         }
118                 }
119         }
120
121         obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
122
123         obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
124                 I915_BO_FLAG_STRUCT_PAGE;
125
126         cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
127                                            bo->ttm);
128         i915_gem_object_set_cache_coherency(obj, cache_level);
129 }
130
131 /**
132  * i915_ttm_move_notify - Prepare an object for move
133  * @bo: The ttm buffer object.
134  *
135  * This function prepares an object for move by removing all GPU bindings,
136  * removing all CPU mapings and finally releasing the pages sg-table.
137  *
138  * Return: 0 if successful, negative error code on error.
139  */
140 int i915_ttm_move_notify(struct ttm_buffer_object *bo)
141 {
142         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
143         int ret;
144
145         /*
146          * Note: The async unbinding here will actually transform the
147          * blocking wait for unbind into a wait before finally submitting
148          * evict / migration blit and thus stall the migration timeline
149          * which may not be good for overall throughput. We should make
150          * sure we await the unbind fences *after* the migration blit
151          * instead of *before* as we currently do.
152          */
153         ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE |
154                                      I915_GEM_OBJECT_UNBIND_ASYNC);
155         if (ret)
156                 return ret;
157
158         ret = __i915_gem_object_put_pages(obj);
159         if (ret)
160                 return ret;
161
162         return 0;
163 }
164
165 static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
166                                              bool clear,
167                                              struct ttm_resource *dst_mem,
168                                              struct ttm_tt *dst_ttm,
169                                              struct sg_table *dst_st,
170                                              const struct i915_deps *deps)
171 {
172         struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
173                                                      bdev);
174         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
175         struct i915_request *rq;
176         struct ttm_tt *src_ttm = bo->ttm;
177         enum i915_cache_level src_level, dst_level;
178         int ret;
179
180         if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
181                 return ERR_PTR(-EINVAL);
182
183         /* With fail_gpu_migration, we always perform a GPU clear. */
184         if (I915_SELFTEST_ONLY(fail_gpu_migration))
185                 clear = true;
186
187         dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
188         if (clear) {
189                 if (bo->type == ttm_bo_type_kernel &&
190                     !I915_SELFTEST_ONLY(fail_gpu_migration))
191                         return ERR_PTR(-EINVAL);
192
193                 intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
194                 ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
195                                                   dst_st->sgl, dst_level,
196                                                   i915_ttm_gtt_binds_lmem(dst_mem),
197                                                   0, &rq);
198         } else {
199                 struct i915_refct_sgt *src_rsgt =
200                         i915_ttm_resource_get_st(obj, bo->resource);
201
202                 if (IS_ERR(src_rsgt))
203                         return ERR_CAST(src_rsgt);
204
205                 src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
206                 intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
207                 ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
208                                                  deps, src_rsgt->table.sgl,
209                                                  src_level,
210                                                  i915_ttm_gtt_binds_lmem(bo->resource),
211                                                  dst_st->sgl, dst_level,
212                                                  i915_ttm_gtt_binds_lmem(dst_mem),
213                                                  &rq);
214
215                 i915_refct_sgt_put(src_rsgt);
216         }
217
218         intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
219
220         if (ret && rq) {
221                 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
222                 i915_request_put(rq);
223         }
224
225         return ret ? ERR_PTR(ret) : &rq->fence;
226 }
227
228 /**
229  * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality.
230  * @_dst_iter: Storage space for the destination kmap iterator.
231  * @_src_iter: Storage space for the source kmap iterator.
232  * @dst_iter: Pointer to the destination kmap iterator.
233  * @src_iter: Pointer to the source kmap iterator.
234  * @clear: Whether to clear instead of copy.
235  * @src_rsgt: Refcounted scatter-gather list of source memory.
236  * @dst_rsgt: Refcounted scatter-gather list of destination memory.
237  */
238 struct i915_ttm_memcpy_arg {
239         union {
240                 struct ttm_kmap_iter_tt tt;
241                 struct ttm_kmap_iter_iomap io;
242         } _dst_iter,
243         _src_iter;
244         struct ttm_kmap_iter *dst_iter;
245         struct ttm_kmap_iter *src_iter;
246         unsigned long num_pages;
247         bool clear;
248         struct i915_refct_sgt *src_rsgt;
249         struct i915_refct_sgt *dst_rsgt;
250 };
251
252 /**
253  * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence.
254  * @fence: The dma-fence.
255  * @work: The work struct use for the memcpy work.
256  * @lock: The fence lock. Not used to protect anything else ATM.
257  * @irq_work: Low latency worker to signal the fence since it can't be done
258  * from the callback for lockdep reasons.
259  * @cb: Callback for the accelerated migration fence.
260  * @arg: The argument for the memcpy functionality.
261  */
262 struct i915_ttm_memcpy_work {
263         struct dma_fence fence;
264         struct work_struct work;
265         /* The fence lock */
266         spinlock_t lock;
267         struct irq_work irq_work;
268         struct dma_fence_cb cb;
269         struct i915_ttm_memcpy_arg arg;
270 };
271
272 static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
273 {
274         ttm_move_memcpy(arg->clear, arg->num_pages,
275                         arg->dst_iter, arg->src_iter);
276 }
277
278 static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
279                                  struct ttm_buffer_object *bo, bool clear,
280                                  struct ttm_resource *dst_mem,
281                                  struct ttm_tt *dst_ttm,
282                                  struct i915_refct_sgt *dst_rsgt)
283 {
284         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
285         struct intel_memory_region *dst_reg, *src_reg;
286
287         dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
288         src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
289         GEM_BUG_ON(!dst_reg || !src_reg);
290
291         arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ?
292                 ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) :
293                 ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap,
294                                          &dst_rsgt->table, dst_reg->region.start);
295
296         arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ?
297                 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) :
298                 ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap,
299                                          &obj->ttm.cached_io_rsgt->table,
300                                          src_reg->region.start);
301         arg->clear = clear;
302         arg->num_pages = bo->base.size >> PAGE_SHIFT;
303
304         arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
305         arg->src_rsgt = clear ? NULL :
306                 i915_ttm_resource_get_st(obj, bo->resource);
307 }
308
309 static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)
310 {
311         i915_refct_sgt_put(arg->src_rsgt);
312         i915_refct_sgt_put(arg->dst_rsgt);
313 }
314
315 static void __memcpy_work(struct work_struct *work)
316 {
317         struct i915_ttm_memcpy_work *copy_work =
318                 container_of(work, typeof(*copy_work), work);
319         struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
320         bool cookie = dma_fence_begin_signalling();
321
322         i915_ttm_move_memcpy(arg);
323         dma_fence_end_signalling(cookie);
324
325         dma_fence_signal(&copy_work->fence);
326
327         i915_ttm_memcpy_release(arg);
328         dma_fence_put(&copy_work->fence);
329 }
330
331 static void __memcpy_irq_work(struct irq_work *irq_work)
332 {
333         struct i915_ttm_memcpy_work *copy_work =
334                 container_of(irq_work, typeof(*copy_work), irq_work);
335         struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
336
337         dma_fence_signal(&copy_work->fence);
338         i915_ttm_memcpy_release(arg);
339         dma_fence_put(&copy_work->fence);
340 }
341
342 static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
343 {
344         struct i915_ttm_memcpy_work *copy_work =
345                 container_of(cb, typeof(*copy_work), cb);
346
347         if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
348                 INIT_WORK(&copy_work->work, __memcpy_work);
349                 queue_work(system_unbound_wq, &copy_work->work);
350         } else {
351                 init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
352                 irq_work_queue(&copy_work->irq_work);
353         }
354 }
355
356 static const char *get_driver_name(struct dma_fence *fence)
357 {
358         return "i915_ttm_memcpy_work";
359 }
360
361 static const char *get_timeline_name(struct dma_fence *fence)
362 {
363         return "unbound";
364 }
365
366 static const struct dma_fence_ops dma_fence_memcpy_ops = {
367         .get_driver_name = get_driver_name,
368         .get_timeline_name = get_timeline_name,
369 };
370
371 static struct dma_fence *
372 i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
373                          struct dma_fence *dep)
374 {
375         int ret;
376
377         spin_lock_init(&work->lock);
378         dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
379         dma_fence_get(&work->fence);
380         ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
381         if (ret) {
382                 if (ret != -ENOENT)
383                         dma_fence_wait(dep, false);
384
385                 return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL :
386                                dep->error);
387         }
388
389         return &work->fence;
390 }
391
392 static struct dma_fence *
393 __i915_ttm_move(struct ttm_buffer_object *bo,
394                 const struct ttm_operation_ctx *ctx, bool clear,
395                 struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm,
396                 struct i915_refct_sgt *dst_rsgt, bool allow_accel,
397                 const struct i915_deps *move_deps)
398 {
399         struct i915_ttm_memcpy_work *copy_work = NULL;
400         struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
401         struct dma_fence *fence = ERR_PTR(-EINVAL);
402
403         if (allow_accel) {
404                 fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm,
405                                             &dst_rsgt->table, move_deps);
406
407                 /*
408                  * We only need to intercept the error when moving to lmem.
409                  * When moving to system, TTM or shmem will provide us with
410                  * cleared pages.
411                  */
412                 if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) &&
413                     !I915_SELFTEST_ONLY(fail_gpu_migration ||
414                                         fail_work_allocation))
415                         goto out;
416         }
417
418         /* If we've scheduled gpu migration. Try to arm error intercept. */
419         if (!IS_ERR(fence)) {
420                 struct dma_fence *dep = fence;
421
422                 if (!I915_SELFTEST_ONLY(fail_work_allocation))
423                         copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
424
425                 if (copy_work) {
426                         arg = &copy_work->arg;
427                         i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
428                                              dst_rsgt);
429                         fence = i915_ttm_memcpy_work_arm(copy_work, dep);
430                 } else {
431                         dma_fence_wait(dep, false);
432                         fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ?
433                                         -EINVAL : fence->error);
434                 }
435                 dma_fence_put(dep);
436
437                 if (!IS_ERR(fence))
438                         goto out;
439         } else {
440                 int err = PTR_ERR(fence);
441
442                 if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
443                         return fence;
444
445                 if (move_deps) {
446                         err = i915_deps_sync(move_deps, ctx);
447                         if (err)
448                                 return ERR_PTR(err);
449                 }
450         }
451
452         /* Error intercept failed or no accelerated migration to start with */
453         if (!copy_work)
454                 i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
455                                      dst_rsgt);
456         i915_ttm_move_memcpy(arg);
457         i915_ttm_memcpy_release(arg);
458         kfree(copy_work);
459
460         return NULL;
461 out:
462         if (!fence && copy_work) {
463                 i915_ttm_memcpy_release(arg);
464                 kfree(copy_work);
465         }
466
467         return fence;
468 }
469
470 static int
471 prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
472           struct i915_deps *deps)
473 {
474         int ret;
475
476         ret = i915_deps_add_dependency(deps, bo->moving, ctx);
477         if (!ret)
478                 ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
479
480         return ret;
481 }
482
483 /**
484  * i915_ttm_move - The TTM move callback used by i915.
485  * @bo: The buffer object.
486  * @evict: Whether this is an eviction.
487  * @dst_mem: The destination ttm resource.
488  * @hop: If we need multihop, what temporary memory type to move to.
489  *
490  * Return: 0 if successful, negative error code otherwise.
491  */
492 int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
493                   struct ttm_operation_ctx *ctx,
494                   struct ttm_resource *dst_mem,
495                   struct ttm_place *hop)
496 {
497         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
498         struct ttm_resource_manager *dst_man =
499                 ttm_manager_type(bo->bdev, dst_mem->mem_type);
500         struct dma_fence *migration_fence = NULL;
501         struct ttm_tt *ttm = bo->ttm;
502         struct i915_refct_sgt *dst_rsgt;
503         bool clear;
504         int ret;
505
506         if (GEM_WARN_ON(!obj)) {
507                 ttm_bo_move_null(bo, dst_mem);
508                 return 0;
509         }
510
511         ret = i915_ttm_move_notify(bo);
512         if (ret)
513                 return ret;
514
515         if (obj->mm.madv != I915_MADV_WILLNEED) {
516                 i915_ttm_purge(obj);
517                 ttm_resource_free(bo, &dst_mem);
518                 return 0;
519         }
520
521         /* Populate ttm with pages if needed. Typically system memory. */
522         if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
523                 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
524                 if (ret)
525                         return ret;
526         }
527
528         dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem);
529         if (IS_ERR(dst_rsgt))
530                 return PTR_ERR(dst_rsgt);
531
532         clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
533         if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) {
534                 struct i915_deps deps;
535
536                 i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
537                 ret = prev_deps(bo, ctx, &deps);
538                 if (ret) {
539                         i915_refct_sgt_put(dst_rsgt);
540                         return ret;
541                 }
542
543                 migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm,
544                                                   dst_rsgt, true, &deps);
545                 i915_deps_fini(&deps);
546         }
547
548         /* We can possibly get an -ERESTARTSYS here */
549         if (IS_ERR(migration_fence)) {
550                 i915_refct_sgt_put(dst_rsgt);
551                 return PTR_ERR(migration_fence);
552         }
553
554         if (migration_fence) {
555                 ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
556                                                 true, dst_mem);
557                 if (ret) {
558                         dma_fence_wait(migration_fence, false);
559                         ttm_bo_move_sync_cleanup(bo, dst_mem);
560                 }
561                 dma_fence_put(migration_fence);
562         } else {
563                 ttm_bo_move_sync_cleanup(bo, dst_mem);
564         }
565
566         i915_ttm_adjust_domains_after_move(obj);
567         i915_ttm_free_cached_io_rsgt(obj);
568
569         if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) {
570                 obj->ttm.cached_io_rsgt = dst_rsgt;
571                 obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
572                 obj->ttm.get_io_page.sg_idx = 0;
573         } else {
574                 i915_refct_sgt_put(dst_rsgt);
575         }
576
577         i915_ttm_adjust_lru(obj);
578         i915_ttm_adjust_gem_after_move(obj);
579         return 0;
580 }
581
582 /**
583  * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
584  * another
585  * @dst: The destination object
586  * @src: The source object
587  * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
588  * @intr: Whether to perform waits interruptible:
589  *
590  * Note: The caller is responsible for assuring that the underlying
591  * TTM objects are populated if needed and locked.
592  *
593  * Return: Zero on success. Negative error code on error. If @intr == true,
594  * then it may return -ERESTARTSYS or -EINTR.
595  */
596 int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
597                           struct drm_i915_gem_object *src,
598                           bool allow_accel, bool intr)
599 {
600         struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
601         struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
602         struct ttm_operation_ctx ctx = {
603                 .interruptible = intr,
604         };
605         struct i915_refct_sgt *dst_rsgt;
606         struct dma_fence *copy_fence;
607         struct i915_deps deps;
608         int ret;
609
610         assert_object_held(dst);
611         assert_object_held(src);
612         i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
613
614         ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
615         if (ret)
616                 return ret;
617
618         ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
619         if (ret)
620                 return ret;
621
622         ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
623         if (ret)
624                 return ret;
625
626         dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
627         copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource,
628                                      dst_bo->ttm, dst_rsgt, allow_accel,
629                                      &deps);
630
631         i915_deps_fini(&deps);
632         i915_refct_sgt_put(dst_rsgt);
633         if (IS_ERR_OR_NULL(copy_fence))
634                 return PTR_ERR_OR_ZERO(copy_fence);
635
636         dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
637         dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
638
639         dma_fence_put(copy_fence);
640
641         return 0;
642 }