cgroupv2, docs: fix misinformation in "device controller" section
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / i915_gem_ttm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <drm/ttm/ttm_bo_driver.h>
7 #include <drm/ttm/ttm_placement.h>
8
9 #include "i915_drv.h"
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
12
13 #include "gem/i915_gem_object.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_mman.h"
17
18 #include "gt/intel_migrate.h"
19 #include "gt/intel_engine_pm.h"
20
21 #define I915_PL_LMEM0 TTM_PL_PRIV
22 #define I915_PL_SYSTEM TTM_PL_SYSTEM
23 #define I915_PL_STOLEN TTM_PL_VRAM
24 #define I915_PL_GGTT TTM_PL_TT
25
26 #define I915_TTM_PRIO_PURGE     0
27 #define I915_TTM_PRIO_NO_PAGES  1
28 #define I915_TTM_PRIO_HAS_PAGES 2
29
30 /*
31  * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
32  */
33 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
34
35 /**
36  * struct i915_ttm_tt - TTM page vector with additional private information
37  * @ttm: The base TTM page vector.
38  * @dev: The struct device used for dma mapping and unmapping.
39  * @cached_st: The cached scatter-gather table.
40  *
41  * Note that DMA may be going on right up to the point where the page-
42  * vector is unpopulated in delayed destroy. Hence keep the
43  * scatter-gather table mapped and cached up to that point. This is
44  * different from the cached gem object io scatter-gather table which
45  * doesn't have an associated dma mapping.
46  */
47 struct i915_ttm_tt {
48         struct ttm_tt ttm;
49         struct device *dev;
50         struct sg_table *cached_st;
51 };
52
53 static const struct ttm_place sys_placement_flags = {
54         .fpfn = 0,
55         .lpfn = 0,
56         .mem_type = I915_PL_SYSTEM,
57         .flags = 0,
58 };
59
60 static struct ttm_placement i915_sys_placement = {
61         .num_placement = 1,
62         .placement = &sys_placement_flags,
63         .num_busy_placement = 1,
64         .busy_placement = &sys_placement_flags,
65 };
66
67 static int i915_ttm_err_to_gem(int err)
68 {
69         /* Fastpath */
70         if (likely(!err))
71                 return 0;
72
73         switch (err) {
74         case -EBUSY:
75                 /*
76                  * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
77                  * restart the operation, since we don't record the contending
78                  * lock. We use -EAGAIN to restart.
79                  */
80                 return -EAGAIN;
81         case -ENOSPC:
82                 /*
83                  * Memory type / region is full, and we can't evict.
84                  * Except possibly system, that returns -ENOMEM;
85                  */
86                 return -ENXIO;
87         default:
88                 break;
89         }
90
91         return err;
92 }
93
94 static bool gpu_binds_iomem(struct ttm_resource *mem)
95 {
96         return mem->mem_type != TTM_PL_SYSTEM;
97 }
98
99 static bool cpu_maps_iomem(struct ttm_resource *mem)
100 {
101         /* Once / if we support GGTT, this is also false for cached ttm_tts */
102         return mem->mem_type != TTM_PL_SYSTEM;
103 }
104
105 static enum i915_cache_level
106 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
107                      struct ttm_tt *ttm)
108 {
109         return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
110                 ttm->caching == ttm_cached) ? I915_CACHE_LLC :
111                 I915_CACHE_NONE;
112 }
113
114 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
115
116 static enum ttm_caching
117 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
118 {
119         /*
120          * Objects only allowed in system get cached cpu-mappings.
121          * Other objects get WC mapping for now. Even if in system.
122          */
123         if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
124             obj->mm.n_placements <= 1)
125                 return ttm_cached;
126
127         return ttm_write_combined;
128 }
129
130 static void
131 i915_ttm_place_from_region(const struct intel_memory_region *mr,
132                            struct ttm_place *place,
133                            unsigned int flags)
134 {
135         memset(place, 0, sizeof(*place));
136         place->mem_type = intel_region_to_ttm_type(mr);
137
138         if (flags & I915_BO_ALLOC_CONTIGUOUS)
139                 place->flags = TTM_PL_FLAG_CONTIGUOUS;
140 }
141
142 static void
143 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
144                             struct ttm_place *requested,
145                             struct ttm_place *busy,
146                             struct ttm_placement *placement)
147 {
148         unsigned int num_allowed = obj->mm.n_placements;
149         unsigned int flags = obj->flags;
150         unsigned int i;
151
152         placement->num_placement = 1;
153         i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
154                                    obj->mm.region, requested, flags);
155
156         /* Cache this on object? */
157         placement->num_busy_placement = num_allowed;
158         for (i = 0; i < placement->num_busy_placement; ++i)
159                 i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
160
161         if (num_allowed == 0) {
162                 *busy = *requested;
163                 placement->num_busy_placement = 1;
164         }
165
166         placement->placement = requested;
167         placement->busy_placement = busy;
168 }
169
170 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
171                                          uint32_t page_flags)
172 {
173         struct ttm_resource_manager *man =
174                 ttm_manager_type(bo->bdev, bo->resource->mem_type);
175         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
176         struct i915_ttm_tt *i915_tt;
177         int ret;
178
179         i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
180         if (!i915_tt)
181                 return NULL;
182
183         if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
184             man->use_tt)
185                 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
186
187         ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
188                           i915_ttm_select_tt_caching(obj));
189         if (ret) {
190                 kfree(i915_tt);
191                 return NULL;
192         }
193
194         i915_tt->dev = obj->base.dev->dev;
195
196         return &i915_tt->ttm;
197 }
198
199 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
200 {
201         struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
202
203         if (i915_tt->cached_st) {
204                 dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
205                                   DMA_BIDIRECTIONAL, 0);
206                 sg_free_table(i915_tt->cached_st);
207                 kfree(i915_tt->cached_st);
208                 i915_tt->cached_st = NULL;
209         }
210         ttm_pool_free(&bdev->pool, ttm);
211 }
212
213 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
214 {
215         struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
216
217         ttm_tt_destroy_common(bdev, ttm);
218         ttm_tt_fini(ttm);
219         kfree(i915_tt);
220 }
221
222 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
223                                        const struct ttm_place *place)
224 {
225         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
226
227         /* Will do for now. Our pinned objects are still on TTM's LRU lists */
228         return i915_gem_object_evictable(obj);
229 }
230
231 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
232                                  struct ttm_placement *placement)
233 {
234         *placement = i915_sys_placement;
235 }
236
237 static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
238 {
239         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
240         int ret;
241
242         ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
243         if (ret)
244                 return ret;
245
246         ret = __i915_gem_object_put_pages(obj);
247         if (ret)
248                 return ret;
249
250         return 0;
251 }
252
253 static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
254 {
255         struct radix_tree_iter iter;
256         void __rcu **slot;
257
258         if (!obj->ttm.cached_io_st)
259                 return;
260
261         rcu_read_lock();
262         radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
263                 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
264         rcu_read_unlock();
265
266         sg_free_table(obj->ttm.cached_io_st);
267         kfree(obj->ttm.cached_io_st);
268         obj->ttm.cached_io_st = NULL;
269 }
270
271 static void
272 i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
273 {
274         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
275
276         if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
277                 obj->write_domain = I915_GEM_DOMAIN_WC;
278                 obj->read_domains = I915_GEM_DOMAIN_WC;
279         } else {
280                 obj->write_domain = I915_GEM_DOMAIN_CPU;
281                 obj->read_domains = I915_GEM_DOMAIN_CPU;
282         }
283 }
284
285 static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
286 {
287         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
288         unsigned int cache_level;
289         unsigned int i;
290
291         /*
292          * If object was moved to an allowable region, update the object
293          * region to consider it migrated. Note that if it's currently not
294          * in an allowable region, it's evicted and we don't update the
295          * object region.
296          */
297         if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
298                 for (i = 0; i < obj->mm.n_placements; ++i) {
299                         struct intel_memory_region *mr = obj->mm.placements[i];
300
301                         if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
302                             mr != obj->mm.region) {
303                                 i915_gem_object_release_memory_region(obj);
304                                 i915_gem_object_init_memory_region(obj, mr);
305                                 break;
306                         }
307                 }
308         }
309
310         obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
311
312         obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
313                 I915_BO_FLAG_STRUCT_PAGE;
314
315         cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
316                                            bo->ttm);
317         i915_gem_object_set_cache_coherency(obj, cache_level);
318 }
319
320 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
321 {
322         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
323         struct ttm_operation_ctx ctx = {
324                 .interruptible = true,
325                 .no_wait_gpu = false,
326         };
327         struct ttm_placement place = {};
328         int ret;
329
330         if (obj->mm.madv == __I915_MADV_PURGED)
331                 return;
332
333         /* TTM's purge interface. Note that we might be reentering. */
334         ret = ttm_bo_validate(bo, &place, &ctx);
335         if (!ret) {
336                 obj->write_domain = 0;
337                 obj->read_domains = 0;
338                 i915_ttm_adjust_gem_after_move(obj);
339                 i915_ttm_free_cached_io_st(obj);
340                 obj->mm.madv = __I915_MADV_PURGED;
341         }
342 }
343
344 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
345 {
346         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
347         int ret = i915_ttm_move_notify(bo);
348
349         GEM_WARN_ON(ret);
350         GEM_WARN_ON(obj->ttm.cached_io_st);
351         if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
352                 i915_ttm_purge(obj);
353 }
354
355 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
356 {
357         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
358
359         if (likely(obj)) {
360                 /* This releases all gem object bindings to the backend. */
361                 i915_ttm_free_cached_io_st(obj);
362                 __i915_gem_free_object(obj);
363         }
364 }
365
366 static struct intel_memory_region *
367 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
368 {
369         struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
370
371         /* There's some room for optimization here... */
372         GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
373                    ttm_mem_type < I915_PL_LMEM0);
374         if (ttm_mem_type == I915_PL_SYSTEM)
375                 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
376                                                   0);
377
378         return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
379                                           ttm_mem_type - I915_PL_LMEM0);
380 }
381
382 static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
383 {
384         struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
385         struct sg_table *st;
386         int ret;
387
388         if (i915_tt->cached_st)
389                 return i915_tt->cached_st;
390
391         st = kzalloc(sizeof(*st), GFP_KERNEL);
392         if (!st)
393                 return ERR_PTR(-ENOMEM);
394
395         ret = sg_alloc_table_from_pages_segment(st,
396                         ttm->pages, ttm->num_pages,
397                         0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
398                         i915_sg_segment_size(), GFP_KERNEL);
399         if (ret) {
400                 kfree(st);
401                 return ERR_PTR(ret);
402         }
403
404         ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
405         if (ret) {
406                 sg_free_table(st);
407                 kfree(st);
408                 return ERR_PTR(ret);
409         }
410
411         i915_tt->cached_st = st;
412         return st;
413 }
414
415 static struct sg_table *
416 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
417                          struct ttm_resource *res)
418 {
419         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
420
421         if (!gpu_binds_iomem(res))
422                 return i915_ttm_tt_get_st(bo->ttm);
423
424         /*
425          * If CPU mapping differs, we need to add the ttm_tt pages to
426          * the resulting st. Might make sense for GGTT.
427          */
428         GEM_WARN_ON(!cpu_maps_iomem(res));
429         return intel_region_ttm_resource_to_st(obj->mm.region, res);
430 }
431
432 static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
433                                struct ttm_resource *dst_mem,
434                                struct sg_table *dst_st)
435 {
436         struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
437                                                      bdev);
438         struct ttm_resource_manager *src_man =
439                 ttm_manager_type(bo->bdev, bo->resource->mem_type);
440         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
441         struct sg_table *src_st;
442         struct i915_request *rq;
443         struct ttm_tt *ttm = bo->ttm;
444         enum i915_cache_level src_level, dst_level;
445         int ret;
446
447         if (!i915->gt.migrate.context)
448                 return -EINVAL;
449
450         dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
451         if (!ttm || !ttm_tt_is_populated(ttm)) {
452                 if (bo->type == ttm_bo_type_kernel)
453                         return -EINVAL;
454
455                 if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
456                         return 0;
457
458                 intel_engine_pm_get(i915->gt.migrate.context->engine);
459                 ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
460                                                   dst_st->sgl, dst_level,
461                                                   gpu_binds_iomem(dst_mem),
462                                                   0, &rq);
463
464                 if (!ret && rq) {
465                         i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
466                         i915_request_put(rq);
467                 }
468                 intel_engine_pm_put(i915->gt.migrate.context->engine);
469         } else {
470                 src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
471                         obj->ttm.cached_io_st;
472
473                 src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
474                 intel_engine_pm_get(i915->gt.migrate.context->engine);
475                 ret = intel_context_migrate_copy(i915->gt.migrate.context,
476                                                  NULL, src_st->sgl, src_level,
477                                                  gpu_binds_iomem(bo->resource),
478                                                  dst_st->sgl, dst_level,
479                                                  gpu_binds_iomem(dst_mem),
480                                                  &rq);
481                 if (!ret && rq) {
482                         i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
483                         i915_request_put(rq);
484                 }
485                 intel_engine_pm_put(i915->gt.migrate.context->engine);
486         }
487
488         return ret;
489 }
490
491 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
492                          struct ttm_operation_ctx *ctx,
493                          struct ttm_resource *dst_mem,
494                          struct ttm_place *hop)
495 {
496         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
497         struct ttm_resource_manager *dst_man =
498                 ttm_manager_type(bo->bdev, dst_mem->mem_type);
499         struct intel_memory_region *dst_reg, *src_reg;
500         union {
501                 struct ttm_kmap_iter_tt tt;
502                 struct ttm_kmap_iter_iomap io;
503         } _dst_iter, _src_iter;
504         struct ttm_kmap_iter *dst_iter, *src_iter;
505         struct sg_table *dst_st;
506         int ret;
507
508         dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
509         src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
510         GEM_BUG_ON(!dst_reg || !src_reg);
511
512         /* Sync for now. We could do the actual copy async. */
513         ret = ttm_bo_wait_ctx(bo, ctx);
514         if (ret)
515                 return ret;
516
517         ret = i915_ttm_move_notify(bo);
518         if (ret)
519                 return ret;
520
521         if (obj->mm.madv != I915_MADV_WILLNEED) {
522                 i915_ttm_purge(obj);
523                 ttm_resource_free(bo, &dst_mem);
524                 return 0;
525         }
526
527         /* Populate ttm with pages if needed. Typically system memory. */
528         if (bo->ttm && (dst_man->use_tt ||
529                         (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
530                 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
531                 if (ret)
532                         return ret;
533         }
534
535         dst_st = i915_ttm_resource_get_st(obj, dst_mem);
536         if (IS_ERR(dst_st))
537                 return PTR_ERR(dst_st);
538
539         ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
540         if (ret) {
541                 /* If we start mapping GGTT, we can no longer use man::use_tt here. */
542                 dst_iter = !cpu_maps_iomem(dst_mem) ?
543                         ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
544                         ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
545                                                  dst_st, dst_reg->region.start);
546
547                 src_iter = !cpu_maps_iomem(bo->resource) ?
548                         ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
549                         ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
550                                                  obj->ttm.cached_io_st,
551                                                  src_reg->region.start);
552
553                 ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
554         }
555         /* Below dst_mem becomes bo->resource. */
556         ttm_bo_move_sync_cleanup(bo, dst_mem);
557         i915_ttm_adjust_domains_after_move(obj);
558         i915_ttm_free_cached_io_st(obj);
559
560         if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
561                 obj->ttm.cached_io_st = dst_st;
562                 obj->ttm.get_io_page.sg_pos = dst_st->sgl;
563                 obj->ttm.get_io_page.sg_idx = 0;
564         }
565
566         i915_ttm_adjust_gem_after_move(obj);
567         return 0;
568 }
569
570 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
571 {
572         if (!cpu_maps_iomem(mem))
573                 return 0;
574
575         mem->bus.caching = ttm_write_combined;
576         mem->bus.is_iomem = true;
577
578         return 0;
579 }
580
581 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
582                                          unsigned long page_offset)
583 {
584         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
585         unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
586         struct scatterlist *sg;
587         unsigned int ofs;
588
589         GEM_WARN_ON(bo->ttm);
590
591         sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
592
593         return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
594 }
595
596 static struct ttm_device_funcs i915_ttm_bo_driver = {
597         .ttm_tt_create = i915_ttm_tt_create,
598         .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
599         .ttm_tt_destroy = i915_ttm_tt_destroy,
600         .eviction_valuable = i915_ttm_eviction_valuable,
601         .evict_flags = i915_ttm_evict_flags,
602         .move = i915_ttm_move,
603         .swap_notify = i915_ttm_swap_notify,
604         .delete_mem_notify = i915_ttm_delete_mem_notify,
605         .io_mem_reserve = i915_ttm_io_mem_reserve,
606         .io_mem_pfn = i915_ttm_io_mem_pfn,
607 };
608
609 /**
610  * i915_ttm_driver - Return a pointer to the TTM device funcs
611  *
612  * Return: Pointer to statically allocated TTM device funcs.
613  */
614 struct ttm_device_funcs *i915_ttm_driver(void)
615 {
616         return &i915_ttm_bo_driver;
617 }
618
619 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
620                                 struct ttm_placement *placement)
621 {
622         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
623         struct ttm_operation_ctx ctx = {
624                 .interruptible = true,
625                 .no_wait_gpu = false,
626         };
627         struct sg_table *st;
628         int real_num_busy;
629         int ret;
630
631         /* First try only the requested placement. No eviction. */
632         real_num_busy = fetch_and_zero(&placement->num_busy_placement);
633         ret = ttm_bo_validate(bo, placement, &ctx);
634         if (ret) {
635                 ret = i915_ttm_err_to_gem(ret);
636                 /*
637                  * Anything that wants to restart the operation gets to
638                  * do that.
639                  */
640                 if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
641                     ret == -EAGAIN)
642                         return ret;
643
644                 /*
645                  * If the initial attempt fails, allow all accepted placements,
646                  * evicting if necessary.
647                  */
648                 placement->num_busy_placement = real_num_busy;
649                 ret = ttm_bo_validate(bo, placement, &ctx);
650                 if (ret)
651                         return i915_ttm_err_to_gem(ret);
652         }
653
654         i915_ttm_adjust_lru(obj);
655         if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
656                 ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
657                 if (ret)
658                         return ret;
659
660                 i915_ttm_adjust_domains_after_move(obj);
661                 i915_ttm_adjust_gem_after_move(obj);
662         }
663
664         if (!i915_gem_object_has_pages(obj)) {
665                 /* Object either has a page vector or is an iomem object */
666                 st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
667                 if (IS_ERR(st))
668                         return PTR_ERR(st);
669
670                 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
671         }
672
673         return ret;
674 }
675
676 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
677 {
678         struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
679         struct ttm_placement placement;
680
681         GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
682
683         /* Move to the requested placement. */
684         i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
685
686         return __i915_ttm_get_pages(obj, &placement);
687 }
688
689 /**
690  * DOC: Migration vs eviction
691  *
692  * GEM migration may not be the same as TTM migration / eviction. If
693  * the TTM core decides to evict an object it may be evicted to a
694  * TTM memory type that is not in the object's allowable GEM regions, or
695  * in fact theoretically to a TTM memory type that doesn't correspond to
696  * a GEM memory region. In that case the object's GEM region is not
697  * updated, and the data is migrated back to the GEM region at
698  * get_pages time. TTM may however set up CPU ptes to the object even
699  * when it is evicted.
700  * Gem forced migration using the i915_ttm_migrate() op, is allowed even
701  * to regions that are not in the object's list of allowable placements.
702  */
703 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
704                             struct intel_memory_region *mr)
705 {
706         struct ttm_place requested;
707         struct ttm_placement placement;
708         int ret;
709
710         i915_ttm_place_from_region(mr, &requested, obj->flags);
711         placement.num_placement = 1;
712         placement.num_busy_placement = 1;
713         placement.placement = &requested;
714         placement.busy_placement = &requested;
715
716         ret = __i915_ttm_get_pages(obj, &placement);
717         if (ret)
718                 return ret;
719
720         /*
721          * Reinitialize the region bindings. This is primarily
722          * required for objects where the new region is not in
723          * its allowable placements.
724          */
725         if (obj->mm.region != mr) {
726                 i915_gem_object_release_memory_region(obj);
727                 i915_gem_object_init_memory_region(obj, mr);
728         }
729
730         return 0;
731 }
732
733 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
734                                struct sg_table *st)
735 {
736         /*
737          * We're currently not called from a shrinker, so put_pages()
738          * typically means the object is about to destroyed, or called
739          * from move_notify(). So just avoid doing much for now.
740          * If the object is not destroyed next, The TTM eviction logic
741          * and shrinkers will move it out if needed.
742          */
743
744         i915_ttm_adjust_lru(obj);
745 }
746
747 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
748 {
749         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
750
751         /*
752          * Don't manipulate the TTM LRUs while in TTM bo destruction.
753          * We're called through i915_ttm_delete_mem_notify().
754          */
755         if (!kref_read(&bo->kref))
756                 return;
757
758         /*
759          * Put on the correct LRU list depending on the MADV status
760          */
761         spin_lock(&bo->bdev->lru_lock);
762         if (obj->mm.madv != I915_MADV_WILLNEED) {
763                 bo->priority = I915_TTM_PRIO_PURGE;
764         } else if (!i915_gem_object_has_pages(obj)) {
765                 if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
766                         bo->priority = I915_TTM_PRIO_HAS_PAGES;
767         } else {
768                 if (bo->priority > I915_TTM_PRIO_NO_PAGES)
769                         bo->priority = I915_TTM_PRIO_NO_PAGES;
770         }
771
772         ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
773         spin_unlock(&bo->bdev->lru_lock);
774 }
775
776 /*
777  * TTM-backed gem object destruction requires some clarification.
778  * Basically we have two possibilities here. We can either rely on the
779  * i915 delayed destruction and put the TTM object when the object
780  * is idle. This would be detected by TTM which would bypass the
781  * TTM delayed destroy handling. The other approach is to put the TTM
782  * object early and rely on the TTM destroyed handling, and then free
783  * the leftover parts of the GEM object once TTM's destroyed list handling is
784  * complete. For now, we rely on the latter for two reasons:
785  * a) TTM can evict an object even when it's on the delayed destroy list,
786  * which in theory allows for complete eviction.
787  * b) There is work going on in TTM to allow freeing an object even when
788  * it's not idle, and using the TTM destroyed list handling could help us
789  * benefit from that.
790  */
791 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
792 {
793         if (obj->ttm.created) {
794                 ttm_bo_put(i915_gem_to_ttm(obj));
795         } else {
796                 __i915_gem_free_object(obj);
797                 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
798         }
799 }
800
801 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
802 {
803         struct vm_area_struct *area = vmf->vma;
804         struct drm_i915_gem_object *obj =
805                 i915_ttm_to_gem(area->vm_private_data);
806
807         /* Sanity check that we allow writing into this object */
808         if (unlikely(i915_gem_object_is_readonly(obj) &&
809                      area->vm_flags & VM_WRITE))
810                 return VM_FAULT_SIGBUS;
811
812         return ttm_bo_vm_fault(vmf);
813 }
814
815 static int
816 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
817               void *buf, int len, int write)
818 {
819         struct drm_i915_gem_object *obj =
820                 i915_ttm_to_gem(area->vm_private_data);
821
822         if (i915_gem_object_is_readonly(obj) && write)
823                 return -EACCES;
824
825         return ttm_bo_vm_access(area, addr, buf, len, write);
826 }
827
828 static void ttm_vm_open(struct vm_area_struct *vma)
829 {
830         struct drm_i915_gem_object *obj =
831                 i915_ttm_to_gem(vma->vm_private_data);
832
833         GEM_BUG_ON(!obj);
834         i915_gem_object_get(obj);
835 }
836
837 static void ttm_vm_close(struct vm_area_struct *vma)
838 {
839         struct drm_i915_gem_object *obj =
840                 i915_ttm_to_gem(vma->vm_private_data);
841
842         GEM_BUG_ON(!obj);
843         i915_gem_object_put(obj);
844 }
845
846 static const struct vm_operations_struct vm_ops_ttm = {
847         .fault = vm_fault_ttm,
848         .access = vm_access_ttm,
849         .open = ttm_vm_open,
850         .close = ttm_vm_close,
851 };
852
853 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
854 {
855         /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
856         GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
857
858         return drm_vma_node_offset_addr(&obj->base.vma_node);
859 }
860
861 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
862         .name = "i915_gem_object_ttm",
863
864         .get_pages = i915_ttm_get_pages,
865         .put_pages = i915_ttm_put_pages,
866         .truncate = i915_ttm_purge,
867         .adjust_lru = i915_ttm_adjust_lru,
868         .delayed_free = i915_ttm_delayed_free,
869         .migrate = i915_ttm_migrate,
870         .mmap_offset = i915_ttm_mmap_offset,
871         .mmap_ops = &vm_ops_ttm,
872 };
873
874 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
875 {
876         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
877
878         i915_gem_object_release_memory_region(obj);
879         mutex_destroy(&obj->ttm.get_io_page.lock);
880         if (obj->ttm.created)
881                 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
882 }
883
884 /**
885  * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
886  * @mem: The initial memory region for the object.
887  * @obj: The gem object.
888  * @size: Object size in bytes.
889  * @flags: gem object flags.
890  *
891  * Return: 0 on success, negative error code on failure.
892  */
893 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
894                                struct drm_i915_gem_object *obj,
895                                resource_size_t size,
896                                resource_size_t page_size,
897                                unsigned int flags)
898 {
899         static struct lock_class_key lock_class;
900         struct drm_i915_private *i915 = mem->i915;
901         struct ttm_operation_ctx ctx = {
902                 .interruptible = true,
903                 .no_wait_gpu = false,
904         };
905         enum ttm_bo_type bo_type;
906         int ret;
907
908         drm_gem_private_object_init(&i915->drm, &obj->base, size);
909         i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
910         i915_gem_object_init_memory_region(obj, mem);
911         i915_gem_object_make_unshrinkable(obj);
912         INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
913         mutex_init(&obj->ttm.get_io_page.lock);
914         bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
915                 ttm_bo_type_kernel;
916
917         obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
918
919         /* Forcing the page size is kernel internal only */
920         GEM_BUG_ON(page_size && obj->mm.n_placements);
921
922         /*
923          * If this function fails, it will call the destructor, but
924          * our caller still owns the object. So no freeing in the
925          * destructor until obj->ttm.created is true.
926          * Similarly, in delayed_destroy, we can't call ttm_bo_put()
927          * until successful initialization.
928          */
929         ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
930                                    bo_type, &i915_sys_placement,
931                                    page_size >> PAGE_SHIFT,
932                                    &ctx, NULL, NULL, i915_ttm_bo_destroy);
933         if (ret)
934                 return i915_ttm_err_to_gem(ret);
935
936         obj->ttm.created = true;
937         i915_ttm_adjust_domains_after_move(obj);
938         i915_ttm_adjust_gem_after_move(obj);
939         i915_gem_object_unlock(obj);
940
941         return 0;
942 }
943
944 static const struct intel_memory_region_ops ttm_system_region_ops = {
945         .init_object = __i915_gem_ttm_object_init,
946 };
947
948 struct intel_memory_region *
949 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
950                           u16 type, u16 instance)
951 {
952         struct intel_memory_region *mr;
953
954         mr = intel_memory_region_create(i915, 0,
955                                         totalram_pages() << PAGE_SHIFT,
956                                         PAGE_SIZE, 0,
957                                         type, instance,
958                                         &ttm_system_region_ops);
959         if (IS_ERR(mr))
960                 return mr;
961
962         intel_memory_region_set_name(mr, "system-ttm");
963         return mr;
964 }