8d6c38a6220161dd04602cfa48e19e0f27b056e4
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
13 #include "gt/intel_gt.h"
14
15 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
16                                  struct sg_table *pages,
17                                  unsigned int sg_page_sizes)
18 {
19         struct drm_i915_private *i915 = to_i915(obj->base.dev);
20         unsigned long supported = INTEL_INFO(i915)->page_sizes;
21         bool shrinkable;
22         int i;
23
24         assert_object_held_shared(obj);
25
26         if (i915_gem_object_is_volatile(obj))
27                 obj->mm.madv = I915_MADV_DONTNEED;
28
29         /* Make the pages coherent with the GPU (flushing any swapin). */
30         if (obj->cache_dirty) {
31                 obj->write_domain = 0;
32                 if (i915_gem_object_has_struct_page(obj))
33                         drm_clflush_sg(pages);
34                 obj->cache_dirty = false;
35         }
36
37         obj->mm.get_page.sg_pos = pages->sgl;
38         obj->mm.get_page.sg_idx = 0;
39         obj->mm.get_dma_page.sg_pos = pages->sgl;
40         obj->mm.get_dma_page.sg_idx = 0;
41
42         obj->mm.pages = pages;
43
44         GEM_BUG_ON(!sg_page_sizes);
45         obj->mm.page_sizes.phys = sg_page_sizes;
46
47         /*
48          * Calculate the supported page-sizes which fit into the given
49          * sg_page_sizes. This will give us the page-sizes which we may be able
50          * to use opportunistically when later inserting into the GTT. For
51          * example if phys=2G, then in theory we should be able to use 1G, 2M,
52          * 64K or 4K pages, although in practice this will depend on a number of
53          * other factors.
54          */
55         obj->mm.page_sizes.sg = 0;
56         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
57                 if (obj->mm.page_sizes.phys & ~0u << i)
58                         obj->mm.page_sizes.sg |= BIT(i);
59         }
60         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
61
62         shrinkable = i915_gem_object_is_shrinkable(obj);
63
64         if (i915_gem_object_is_tiled(obj) &&
65             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
66                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
67                 i915_gem_object_set_tiling_quirk(obj);
68                 GEM_BUG_ON(!list_empty(&obj->mm.link));
69                 atomic_inc(&obj->mm.shrink_pin);
70                 shrinkable = false;
71         }
72
73         if (shrinkable) {
74                 struct list_head *list;
75                 unsigned long flags;
76
77                 assert_object_held(obj);
78                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
79
80                 i915->mm.shrink_count++;
81                 i915->mm.shrink_memory += obj->base.size;
82
83                 if (obj->mm.madv != I915_MADV_WILLNEED)
84                         list = &i915->mm.purge_list;
85                 else
86                         list = &i915->mm.shrink_list;
87                 list_add_tail(&obj->mm.link, list);
88
89                 atomic_set(&obj->mm.shrink_pin, 0);
90                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
91         }
92 }
93
94 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
95 {
96         struct drm_i915_private *i915 = to_i915(obj->base.dev);
97         int err;
98
99         assert_object_held_shared(obj);
100
101         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
102                 drm_dbg(&i915->drm,
103                         "Attempting to obtain a purgeable object\n");
104                 return -EFAULT;
105         }
106
107         err = obj->ops->get_pages(obj);
108         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
109
110         return err;
111 }
112
113 /* Ensure that the associated pages are gathered from the backing storage
114  * and pinned into our object. i915_gem_object_pin_pages() may be called
115  * multiple times before they are released by a single call to
116  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
117  * either as a result of memory pressure (reaping pages under the shrinker)
118  * or as the object is itself released.
119  */
120 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
121 {
122         int err;
123
124         assert_object_held(obj);
125
126         assert_object_held_shared(obj);
127
128         if (unlikely(!i915_gem_object_has_pages(obj))) {
129                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
130
131                 err = ____i915_gem_object_get_pages(obj);
132                 if (err)
133                         return err;
134
135                 smp_mb__before_atomic();
136         }
137         atomic_inc(&obj->mm.pages_pin_count);
138
139         return 0;
140 }
141
142 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
143 {
144         struct i915_gem_ww_ctx ww;
145         int err;
146
147         i915_gem_ww_ctx_init(&ww, true);
148 retry:
149         err = i915_gem_object_lock(obj, &ww);
150         if (!err)
151                 err = i915_gem_object_pin_pages(obj);
152
153         if (err == -EDEADLK) {
154                 err = i915_gem_ww_ctx_backoff(&ww);
155                 if (!err)
156                         goto retry;
157         }
158         i915_gem_ww_ctx_fini(&ww);
159         return err;
160 }
161
162 /* Immediately discard the backing storage */
163 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
164 {
165         drm_gem_free_mmap_offset(&obj->base);
166         if (obj->ops->truncate)
167                 obj->ops->truncate(obj);
168 }
169
170 /* Try to discard unwanted pages */
171 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
172 {
173         assert_object_held_shared(obj);
174         GEM_BUG_ON(i915_gem_object_has_pages(obj));
175
176         if (obj->ops->writeback)
177                 obj->ops->writeback(obj);
178 }
179
180 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
181 {
182         struct radix_tree_iter iter;
183         void __rcu **slot;
184
185         rcu_read_lock();
186         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
187                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
188         radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
189                 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
190         rcu_read_unlock();
191 }
192
193 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
194 {
195         if (is_vmalloc_addr(ptr))
196                 vunmap(ptr);
197 }
198
199 struct sg_table *
200 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
201 {
202         struct sg_table *pages;
203
204         assert_object_held_shared(obj);
205
206         pages = fetch_and_zero(&obj->mm.pages);
207         if (IS_ERR_OR_NULL(pages))
208                 return pages;
209
210         if (i915_gem_object_is_volatile(obj))
211                 obj->mm.madv = I915_MADV_WILLNEED;
212
213         i915_gem_object_make_unshrinkable(obj);
214
215         if (obj->mm.mapping) {
216                 unmap_object(obj, page_mask_bits(obj->mm.mapping));
217                 obj->mm.mapping = NULL;
218         }
219
220         __i915_gem_object_reset_page_iter(obj);
221         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
222
223         if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
224                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
225                 intel_wakeref_t wakeref;
226
227                 with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
228                         intel_gt_invalidate_tlbs(&i915->gt);
229         }
230
231         return pages;
232 }
233
234 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
235 {
236         struct sg_table *pages;
237
238         if (i915_gem_object_has_pinned_pages(obj))
239                 return -EBUSY;
240
241         /* May be called by shrinker from within get_pages() (on another bo) */
242         assert_object_held_shared(obj);
243
244         i915_gem_object_release_mmap_offset(obj);
245
246         /*
247          * ->put_pages might need to allocate memory for the bit17 swizzle
248          * array, hence protect them from being reaped by removing them from gtt
249          * lists early.
250          */
251         pages = __i915_gem_object_unset_pages(obj);
252
253         /*
254          * XXX Temporary hijinx to avoid updating all backends to handle
255          * NULL pages. In the future, when we have more asynchronous
256          * get_pages backends we should be better able to handle the
257          * cancellation of the async task in a more uniform manner.
258          */
259         if (!IS_ERR_OR_NULL(pages))
260                 obj->ops->put_pages(obj, pages);
261
262         return 0;
263 }
264
265 /* The 'mapping' part of i915_gem_object_pin_map() below */
266 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
267                                       enum i915_map_type type)
268 {
269         unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
270         struct page *stack[32], **pages = stack, *page;
271         struct sgt_iter iter;
272         pgprot_t pgprot;
273         void *vaddr;
274
275         switch (type) {
276         default:
277                 MISSING_CASE(type);
278                 fallthrough;    /* to use PAGE_KERNEL anyway */
279         case I915_MAP_WB:
280                 /*
281                  * On 32b, highmem using a finite set of indirect PTE (i.e.
282                  * vmap) to provide virtual mappings of the high pages.
283                  * As these are finite, map_new_virtual() must wait for some
284                  * other kmap() to finish when it runs out. If we map a large
285                  * number of objects, there is no method for it to tell us
286                  * to release the mappings, and we deadlock.
287                  *
288                  * However, if we make an explicit vmap of the page, that
289                  * uses a larger vmalloc arena, and also has the ability
290                  * to tell us to release unwanted mappings. Most importantly,
291                  * it will fail and propagate an error instead of waiting
292                  * forever.
293                  *
294                  * So if the page is beyond the 32b boundary, make an explicit
295                  * vmap.
296                  */
297                 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
298                         return page_address(sg_page(obj->mm.pages->sgl));
299                 pgprot = PAGE_KERNEL;
300                 break;
301         case I915_MAP_WC:
302                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
303                 break;
304         }
305
306         if (n_pages > ARRAY_SIZE(stack)) {
307                 /* Too big for stack -- allocate temporary array instead */
308                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
309                 if (!pages)
310                         return ERR_PTR(-ENOMEM);
311         }
312
313         i = 0;
314         for_each_sgt_page(page, iter, obj->mm.pages)
315                 pages[i++] = page;
316         vaddr = vmap(pages, n_pages, 0, pgprot);
317         if (pages != stack)
318                 kvfree(pages);
319
320         return vaddr ?: ERR_PTR(-ENOMEM);
321 }
322
323 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
324                                      enum i915_map_type type)
325 {
326         resource_size_t iomap = obj->mm.region->iomap.base -
327                 obj->mm.region->region.start;
328         unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
329         unsigned long stack[32], *pfns = stack, i;
330         struct sgt_iter iter;
331         dma_addr_t addr;
332         void *vaddr;
333
334         GEM_BUG_ON(type != I915_MAP_WC);
335
336         if (n_pfn > ARRAY_SIZE(stack)) {
337                 /* Too big for stack -- allocate temporary array instead */
338                 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
339                 if (!pfns)
340                         return ERR_PTR(-ENOMEM);
341         }
342
343         i = 0;
344         for_each_sgt_daddr(addr, iter, obj->mm.pages)
345                 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
346         vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
347         if (pfns != stack)
348                 kvfree(pfns);
349
350         return vaddr ?: ERR_PTR(-ENOMEM);
351 }
352
353 /* get, pin, and map the pages of the object into kernel space */
354 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
355                               enum i915_map_type type)
356 {
357         enum i915_map_type has_type;
358         bool pinned;
359         void *ptr;
360         int err;
361
362         if (!i915_gem_object_has_struct_page(obj) &&
363             !i915_gem_object_has_iomem(obj))
364                 return ERR_PTR(-ENXIO);
365
366         assert_object_held(obj);
367
368         pinned = !(type & I915_MAP_OVERRIDE);
369         type &= ~I915_MAP_OVERRIDE;
370
371         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
372                 if (unlikely(!i915_gem_object_has_pages(obj))) {
373                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
374
375                         err = ____i915_gem_object_get_pages(obj);
376                         if (err)
377                                 return ERR_PTR(err);
378
379                         smp_mb__before_atomic();
380                 }
381                 atomic_inc(&obj->mm.pages_pin_count);
382                 pinned = false;
383         }
384         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
385
386         /*
387          * For discrete our CPU mappings needs to be consistent in order to
388          * function correctly on !x86. When mapping things through TTM, we use
389          * the same rules to determine the caching type.
390          *
391          * The caching rules, starting from DG1:
392          *
393          *      - If the object can be placed in device local-memory, then the
394          *        pages should be allocated and mapped as write-combined only.
395          *
396          *      - Everything else is always allocated and mapped as write-back,
397          *        with the guarantee that everything is also coherent with the
398          *        GPU.
399          *
400          * Internal users of lmem are already expected to get this right, so no
401          * fudging needed there.
402          */
403         if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
404                 if (type != I915_MAP_WC && !obj->mm.n_placements) {
405                         ptr = ERR_PTR(-ENODEV);
406                         goto err_unpin;
407                 }
408
409                 type = I915_MAP_WC;
410         } else if (IS_DGFX(to_i915(obj->base.dev))) {
411                 type = I915_MAP_WB;
412         }
413
414         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
415         if (ptr && has_type != type) {
416                 if (pinned) {
417                         ptr = ERR_PTR(-EBUSY);
418                         goto err_unpin;
419                 }
420
421                 unmap_object(obj, ptr);
422
423                 ptr = obj->mm.mapping = NULL;
424         }
425
426         if (!ptr) {
427                 if (GEM_WARN_ON(type == I915_MAP_WC &&
428                                 !static_cpu_has(X86_FEATURE_PAT)))
429                         ptr = ERR_PTR(-ENODEV);
430                 else if (i915_gem_object_has_struct_page(obj))
431                         ptr = i915_gem_object_map_page(obj, type);
432                 else
433                         ptr = i915_gem_object_map_pfn(obj, type);
434                 if (IS_ERR(ptr))
435                         goto err_unpin;
436
437                 obj->mm.mapping = page_pack_bits(ptr, type);
438         }
439
440         return ptr;
441
442 err_unpin:
443         atomic_dec(&obj->mm.pages_pin_count);
444         return ptr;
445 }
446
447 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
448                                        enum i915_map_type type)
449 {
450         void *ret;
451
452         i915_gem_object_lock(obj, NULL);
453         ret = i915_gem_object_pin_map(obj, type);
454         i915_gem_object_unlock(obj);
455
456         return ret;
457 }
458
459 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
460                                  unsigned long offset,
461                                  unsigned long size)
462 {
463         enum i915_map_type has_type;
464         void *ptr;
465
466         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
467         GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
468                                      offset, size, obj->base.size));
469
470         wmb(); /* let all previous writes be visible to coherent partners */
471         obj->mm.dirty = true;
472
473         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
474                 return;
475
476         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
477         if (has_type == I915_MAP_WC)
478                 return;
479
480         drm_clflush_virt_range(ptr + offset, size);
481         if (size == obj->base.size) {
482                 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
483                 obj->cache_dirty = false;
484         }
485 }
486
487 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
488 {
489         GEM_BUG_ON(!obj->mm.mapping);
490
491         /*
492          * We allow removing the mapping from underneath pinned pages!
493          *
494          * Furthermore, since this is an unsafe operation reserved only
495          * for construction time manipulation, we ignore locking prudence.
496          */
497         unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
498
499         i915_gem_object_unpin_map(obj);
500 }
501
502 struct scatterlist *
503 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
504                          struct i915_gem_object_page_iter *iter,
505                          unsigned int n,
506                          unsigned int *offset,
507                          bool dma)
508 {
509         struct scatterlist *sg;
510         unsigned int idx, count;
511
512         might_sleep();
513         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
514         if (!i915_gem_object_has_pinned_pages(obj))
515                 assert_object_held(obj);
516
517         /* As we iterate forward through the sg, we record each entry in a
518          * radixtree for quick repeated (backwards) lookups. If we have seen
519          * this index previously, we will have an entry for it.
520          *
521          * Initial lookup is O(N), but this is amortized to O(1) for
522          * sequential page access (where each new request is consecutive
523          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
524          * i.e. O(1) with a large constant!
525          */
526         if (n < READ_ONCE(iter->sg_idx))
527                 goto lookup;
528
529         mutex_lock(&iter->lock);
530
531         /* We prefer to reuse the last sg so that repeated lookup of this
532          * (or the subsequent) sg are fast - comparing against the last
533          * sg is faster than going through the radixtree.
534          */
535
536         sg = iter->sg_pos;
537         idx = iter->sg_idx;
538         count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
539
540         while (idx + count <= n) {
541                 void *entry;
542                 unsigned long i;
543                 int ret;
544
545                 /* If we cannot allocate and insert this entry, or the
546                  * individual pages from this range, cancel updating the
547                  * sg_idx so that on this lookup we are forced to linearly
548                  * scan onwards, but on future lookups we will try the
549                  * insertion again (in which case we need to be careful of
550                  * the error return reporting that we have already inserted
551                  * this index).
552                  */
553                 ret = radix_tree_insert(&iter->radix, idx, sg);
554                 if (ret && ret != -EEXIST)
555                         goto scan;
556
557                 entry = xa_mk_value(idx);
558                 for (i = 1; i < count; i++) {
559                         ret = radix_tree_insert(&iter->radix, idx + i, entry);
560                         if (ret && ret != -EEXIST)
561                                 goto scan;
562                 }
563
564                 idx += count;
565                 sg = ____sg_next(sg);
566                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
567         }
568
569 scan:
570         iter->sg_pos = sg;
571         iter->sg_idx = idx;
572
573         mutex_unlock(&iter->lock);
574
575         if (unlikely(n < idx)) /* insertion completed by another thread */
576                 goto lookup;
577
578         /* In case we failed to insert the entry into the radixtree, we need
579          * to look beyond the current sg.
580          */
581         while (idx + count <= n) {
582                 idx += count;
583                 sg = ____sg_next(sg);
584                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
585         }
586
587         *offset = n - idx;
588         return sg;
589
590 lookup:
591         rcu_read_lock();
592
593         sg = radix_tree_lookup(&iter->radix, n);
594         GEM_BUG_ON(!sg);
595
596         /* If this index is in the middle of multi-page sg entry,
597          * the radix tree will contain a value entry that points
598          * to the start of that range. We will return the pointer to
599          * the base page and the offset of this page within the
600          * sg entry's range.
601          */
602         *offset = 0;
603         if (unlikely(xa_is_value(sg))) {
604                 unsigned long base = xa_to_value(sg);
605
606                 sg = radix_tree_lookup(&iter->radix, base);
607                 GEM_BUG_ON(!sg);
608
609                 *offset = n - base;
610         }
611
612         rcu_read_unlock();
613
614         return sg;
615 }
616
617 struct page *
618 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
619 {
620         struct scatterlist *sg;
621         unsigned int offset;
622
623         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
624
625         sg = i915_gem_object_get_sg(obj, n, &offset);
626         return nth_page(sg_page(sg), offset);
627 }
628
629 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
630 struct page *
631 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
632                                unsigned int n)
633 {
634         struct page *page;
635
636         page = i915_gem_object_get_page(obj, n);
637         if (!obj->mm.dirty)
638                 set_page_dirty(page);
639
640         return page;
641 }
642
643 dma_addr_t
644 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
645                                     unsigned long n,
646                                     unsigned int *len)
647 {
648         struct scatterlist *sg;
649         unsigned int offset;
650
651         sg = i915_gem_object_get_sg_dma(obj, n, &offset);
652
653         if (len)
654                 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
655
656         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
657 }
658
659 dma_addr_t
660 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
661                                 unsigned long n)
662 {
663         return i915_gem_object_get_dma_address_len(obj, n, NULL);
664 }