drm/ttm: stop setting page->index for the ttm_tt
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
37 #include <drm/drm_drv.h>
38 #include <drm/drm_managed.h>
39 #include <linux/mm.h>
40 #include <linux/pfn_t.h>
41 #include <linux/rbtree.h>
42 #include <linux/module.h>
43 #include <linux/uaccess.h>
44 #include <linux/mem_encrypt.h>
45
46 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
47                                 struct vm_fault *vmf)
48 {
49         vm_fault_t ret = 0;
50         int err = 0;
51
52         if (likely(!bo->moving))
53                 goto out_unlock;
54
55         /*
56          * Quick non-stalling check for idle.
57          */
58         if (dma_fence_is_signaled(bo->moving))
59                 goto out_clear;
60
61         /*
62          * If possible, avoid waiting for GPU with mmap_lock
63          * held.  We only do this if the fault allows retry and this
64          * is the first attempt.
65          */
66         if (fault_flag_allow_retry_first(vmf->flags)) {
67                 ret = VM_FAULT_RETRY;
68                 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69                         goto out_unlock;
70
71                 ttm_bo_get(bo);
72                 mmap_read_unlock(vmf->vma->vm_mm);
73                 (void) dma_fence_wait(bo->moving, true);
74                 dma_resv_unlock(bo->base.resv);
75                 ttm_bo_put(bo);
76                 goto out_unlock;
77         }
78
79         /*
80          * Ordinary wait.
81          */
82         err = dma_fence_wait(bo->moving, true);
83         if (unlikely(err != 0)) {
84                 ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
85                         VM_FAULT_NOPAGE;
86                 goto out_unlock;
87         }
88
89 out_clear:
90         dma_fence_put(bo->moving);
91         bo->moving = NULL;
92
93 out_unlock:
94         return ret;
95 }
96
97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
98                                        unsigned long page_offset)
99 {
100         struct ttm_device *bdev = bo->bdev;
101
102         if (bdev->funcs->io_mem_pfn)
103                 return bdev->funcs->io_mem_pfn(bo, page_offset);
104
105         return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
106 }
107
108 /**
109  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
110  * @bo: The buffer object
111  * @vmf: The fault structure handed to the callback
112  *
113  * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
114  * during long waits, and after the wait the callback will be restarted. This
115  * is to allow other threads using the same virtual memory space concurrent
116  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
117  * object reservations sometimes wait for GPU and should therefore be
118  * considered long waits. This function reserves the buffer object interruptibly
119  * taking this into account. Starvation is avoided by the vm system not
120  * allowing too many repeated restarts.
121  * This function is intended to be used in customized fault() and _mkwrite()
122  * handlers.
123  *
124  * Return:
125  *    0 on success and the bo was reserved.
126  *    VM_FAULT_RETRY if blocking wait.
127  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
128  */
129 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
130                              struct vm_fault *vmf)
131 {
132         /*
133          * Work around locking order reversal in fault / nopfn
134          * between mmap_lock and bo_reserve: Perform a trylock operation
135          * for reserve, and if it fails, retry the fault after waiting
136          * for the buffer to become unreserved.
137          */
138         if (unlikely(!dma_resv_trylock(bo->base.resv))) {
139                 /*
140                  * If the fault allows retry and this is the first
141                  * fault attempt, we try to release the mmap_lock
142                  * before waiting
143                  */
144                 if (fault_flag_allow_retry_first(vmf->flags)) {
145                         if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
146                                 ttm_bo_get(bo);
147                                 mmap_read_unlock(vmf->vma->vm_mm);
148                                 if (!dma_resv_lock_interruptible(bo->base.resv,
149                                                                  NULL))
150                                         dma_resv_unlock(bo->base.resv);
151                                 ttm_bo_put(bo);
152                         }
153
154                         return VM_FAULT_RETRY;
155                 }
156
157                 if (dma_resv_lock_interruptible(bo->base.resv, NULL))
158                         return VM_FAULT_NOPAGE;
159         }
160
161         /*
162          * Refuse to fault imported pages. This should be handled
163          * (if at all) by redirecting mmap to the exporter.
164          */
165         if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
166                 dma_resv_unlock(bo->base.resv);
167                 return VM_FAULT_SIGBUS;
168         }
169
170         return 0;
171 }
172 EXPORT_SYMBOL(ttm_bo_vm_reserve);
173
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 /**
176  * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
177  * @vmf: Fault data
178  * @bo: The buffer object
179  * @page_offset: Page offset from bo start
180  * @fault_page_size: The size of the fault in pages.
181  * @pgprot: The page protections.
182  * Does additional checking whether it's possible to insert a PUD or PMD
183  * pfn and performs the insertion.
184  *
185  * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
186  * a huge fault was not possible, or on insertion error.
187  */
188 static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
189                                         struct ttm_buffer_object *bo,
190                                         pgoff_t page_offset,
191                                         pgoff_t fault_page_size,
192                                         pgprot_t pgprot)
193 {
194         pgoff_t i;
195         vm_fault_t ret;
196         unsigned long pfn;
197         pfn_t pfnt;
198         struct ttm_tt *ttm = bo->ttm;
199         bool write = vmf->flags & FAULT_FLAG_WRITE;
200
201         /* Fault should not cross bo boundary. */
202         page_offset &= ~(fault_page_size - 1);
203         if (page_offset + fault_page_size > bo->resource->num_pages)
204                 goto out_fallback;
205
206         if (bo->resource->bus.is_iomem)
207                 pfn = ttm_bo_io_mem_pfn(bo, page_offset);
208         else
209                 pfn = page_to_pfn(ttm->pages[page_offset]);
210
211         /* pfn must be fault_page_size aligned. */
212         if ((pfn & (fault_page_size - 1)) != 0)
213                 goto out_fallback;
214
215         /* Check that memory is contiguous. */
216         if (!bo->resource->bus.is_iomem) {
217                 for (i = 1; i < fault_page_size; ++i) {
218                         if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
219                                 goto out_fallback;
220                 }
221         } else if (bo->bdev->funcs->io_mem_pfn) {
222                 for (i = 1; i < fault_page_size; ++i) {
223                         if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
224                                 goto out_fallback;
225                 }
226         }
227
228         pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
229         if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
230                 ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
231 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
232         else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
233                 ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
234 #endif
235         else
236                 WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
237
238         if (ret != VM_FAULT_NOPAGE)
239                 goto out_fallback;
240
241         return VM_FAULT_NOPAGE;
242 out_fallback:
243         count_vm_event(THP_FAULT_FALLBACK);
244         return VM_FAULT_FALLBACK;
245 }
246 #else
247 static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
248                                         struct ttm_buffer_object *bo,
249                                         pgoff_t page_offset,
250                                         pgoff_t fault_page_size,
251                                         pgprot_t pgprot)
252 {
253         return VM_FAULT_FALLBACK;
254 }
255 #endif
256
257 /**
258  * ttm_bo_vm_fault_reserved - TTM fault helper
259  * @vmf: The struct vm_fault given as argument to the fault callback
260  * @prot: The page protection to be used for this memory area.
261  * @num_prefault: Maximum number of prefault pages. The caller may want to
262  * specify this based on madvice settings and the size of the GPU object
263  * backed by the memory.
264  * @fault_page_size: The size of the fault in pages.
265  *
266  * This function inserts one or more page table entries pointing to the
267  * memory backing the buffer object, and then returns a return code
268  * instructing the caller to retry the page access.
269  *
270  * Return:
271  *   VM_FAULT_NOPAGE on success or pending signal
272  *   VM_FAULT_SIGBUS on unspecified error
273  *   VM_FAULT_OOM on out-of-memory
274  *   VM_FAULT_RETRY if retryable wait
275  */
276 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
277                                     pgprot_t prot,
278                                     pgoff_t num_prefault,
279                                     pgoff_t fault_page_size)
280 {
281         struct vm_area_struct *vma = vmf->vma;
282         struct ttm_buffer_object *bo = vma->vm_private_data;
283         struct ttm_device *bdev = bo->bdev;
284         unsigned long page_offset;
285         unsigned long page_last;
286         unsigned long pfn;
287         struct ttm_tt *ttm = NULL;
288         struct page *page;
289         int err;
290         pgoff_t i;
291         vm_fault_t ret = VM_FAULT_NOPAGE;
292         unsigned long address = vmf->address;
293
294         /*
295          * Wait for buffer data in transit, due to a pipelined
296          * move.
297          */
298         ret = ttm_bo_vm_fault_idle(bo, vmf);
299         if (unlikely(ret != 0))
300                 return ret;
301
302         err = ttm_mem_io_reserve(bdev, bo->resource);
303         if (unlikely(err != 0))
304                 return VM_FAULT_SIGBUS;
305
306         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
307                 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
308         page_last = vma_pages(vma) + vma->vm_pgoff -
309                 drm_vma_node_start(&bo->base.vma_node);
310
311         if (unlikely(page_offset >= bo->resource->num_pages))
312                 return VM_FAULT_SIGBUS;
313
314         prot = ttm_io_prot(bo, bo->resource, prot);
315         if (!bo->resource->bus.is_iomem) {
316                 struct ttm_operation_ctx ctx = {
317                         .interruptible = false,
318                         .no_wait_gpu = false,
319                         .force_alloc = true
320                 };
321
322                 ttm = bo->ttm;
323                 if (ttm_tt_populate(bdev, bo->ttm, &ctx))
324                         return VM_FAULT_OOM;
325         } else {
326                 /* Iomem should not be marked encrypted */
327                 prot = pgprot_decrypted(prot);
328         }
329
330         /* We don't prefault on huge faults. Yet. */
331         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
332                 return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
333                                              fault_page_size, prot);
334
335         /*
336          * Speculatively prefault a number of pages. Only error on
337          * first page.
338          */
339         for (i = 0; i < num_prefault; ++i) {
340                 if (bo->resource->bus.is_iomem) {
341                         pfn = ttm_bo_io_mem_pfn(bo, page_offset);
342                 } else {
343                         page = ttm->pages[page_offset];
344                         if (unlikely(!page && i == 0)) {
345                                 return VM_FAULT_OOM;
346                         } else if (unlikely(!page)) {
347                                 break;
348                         }
349                         pfn = page_to_pfn(page);
350                 }
351
352                 /*
353                  * Note that the value of @prot at this point may differ from
354                  * the value of @vma->vm_page_prot in the caching- and
355                  * encryption bits. This is because the exact location of the
356                  * data may not be known at mmap() time and may also change
357                  * at arbitrary times while the data is mmap'ed.
358                  * See vmf_insert_mixed_prot() for a discussion.
359                  */
360                 ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
361
362                 /* Never error on prefaulted PTEs */
363                 if (unlikely((ret & VM_FAULT_ERROR))) {
364                         if (i == 0)
365                                 return VM_FAULT_NOPAGE;
366                         else
367                                 break;
368                 }
369
370                 address += PAGE_SIZE;
371                 if (unlikely(++page_offset >= page_last))
372                         break;
373         }
374         return ret;
375 }
376 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
377
378 static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
379 {
380         struct page *dummy_page = (struct page *)res;
381
382         __free_page(dummy_page);
383 }
384
385 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
386 {
387         struct vm_area_struct *vma = vmf->vma;
388         struct ttm_buffer_object *bo = vma->vm_private_data;
389         struct drm_device *ddev = bo->base.dev;
390         vm_fault_t ret = VM_FAULT_NOPAGE;
391         unsigned long address;
392         unsigned long pfn;
393         struct page *page;
394
395         /* Allocate new dummy page to map all the VA range in this VMA to it*/
396         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
397         if (!page)
398                 return VM_FAULT_OOM;
399
400         /* Set the page to be freed using drmm release action */
401         if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
402                 return VM_FAULT_OOM;
403
404         pfn = page_to_pfn(page);
405
406         /* Prefault the entire VMA range right away to avoid further faults */
407         for (address = vma->vm_start; address < vma->vm_end;
408              address += PAGE_SIZE)
409                 ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
410
411         return ret;
412 }
413 EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
414
415 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
416 {
417         struct vm_area_struct *vma = vmf->vma;
418         pgprot_t prot;
419         struct ttm_buffer_object *bo = vma->vm_private_data;
420         struct drm_device *ddev = bo->base.dev;
421         vm_fault_t ret;
422         int idx;
423
424         ret = ttm_bo_vm_reserve(bo, vmf);
425         if (ret)
426                 return ret;
427
428         prot = vma->vm_page_prot;
429         if (drm_dev_enter(ddev, &idx)) {
430                 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
431                 drm_dev_exit(idx);
432         } else {
433                 ret = ttm_bo_vm_dummy_page(vmf, prot);
434         }
435         if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
436                 return ret;
437
438         dma_resv_unlock(bo->base.resv);
439
440         return ret;
441 }
442 EXPORT_SYMBOL(ttm_bo_vm_fault);
443
444 void ttm_bo_vm_open(struct vm_area_struct *vma)
445 {
446         struct ttm_buffer_object *bo = vma->vm_private_data;
447
448         WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
449
450         ttm_bo_get(bo);
451 }
452 EXPORT_SYMBOL(ttm_bo_vm_open);
453
454 void ttm_bo_vm_close(struct vm_area_struct *vma)
455 {
456         struct ttm_buffer_object *bo = vma->vm_private_data;
457
458         ttm_bo_put(bo);
459         vma->vm_private_data = NULL;
460 }
461 EXPORT_SYMBOL(ttm_bo_vm_close);
462
463 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
464                                  unsigned long offset,
465                                  uint8_t *buf, int len, int write)
466 {
467         unsigned long page = offset >> PAGE_SHIFT;
468         unsigned long bytes_left = len;
469         int ret;
470
471         /* Copy a page at a time, that way no extra virtual address
472          * mapping is needed
473          */
474         offset -= page << PAGE_SHIFT;
475         do {
476                 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
477                 struct ttm_bo_kmap_obj map;
478                 void *ptr;
479                 bool is_iomem;
480
481                 ret = ttm_bo_kmap(bo, page, 1, &map);
482                 if (ret)
483                         return ret;
484
485                 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
486                 WARN_ON_ONCE(is_iomem);
487                 if (write)
488                         memcpy(ptr, buf, bytes);
489                 else
490                         memcpy(buf, ptr, bytes);
491                 ttm_bo_kunmap(&map);
492
493                 page++;
494                 buf += bytes;
495                 bytes_left -= bytes;
496                 offset = 0;
497         } while (bytes_left);
498
499         return len;
500 }
501
502 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
503                      void *buf, int len, int write)
504 {
505         struct ttm_buffer_object *bo = vma->vm_private_data;
506         unsigned long offset = (addr) - vma->vm_start +
507                 ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
508                  << PAGE_SHIFT);
509         int ret;
510
511         if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
512                 return -EIO;
513
514         ret = ttm_bo_reserve(bo, true, false, NULL);
515         if (ret)
516                 return ret;
517
518         switch (bo->resource->mem_type) {
519         case TTM_PL_SYSTEM:
520                 fallthrough;
521         case TTM_PL_TT:
522                 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
523                 break;
524         default:
525                 if (bo->bdev->funcs->access_memory)
526                         ret = bo->bdev->funcs->access_memory(
527                                 bo, offset, buf, len, write);
528                 else
529                         ret = -EIO;
530         }
531
532         ttm_bo_unreserve(bo);
533
534         return ret;
535 }
536 EXPORT_SYMBOL(ttm_bo_vm_access);
537
538 static const struct vm_operations_struct ttm_bo_vm_ops = {
539         .fault = ttm_bo_vm_fault,
540         .open = ttm_bo_vm_open,
541         .close = ttm_bo_vm_close,
542         .access = ttm_bo_vm_access,
543 };
544
545 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
546 {
547         /* Enforce no COW since would have really strange behavior with it. */
548         if (is_cow_mapping(vma->vm_flags))
549                 return -EINVAL;
550
551         ttm_bo_get(bo);
552
553         /*
554          * Drivers may want to override the vm_ops field. Otherwise we
555          * use TTM's default callbacks.
556          */
557         if (!vma->vm_ops)
558                 vma->vm_ops = &ttm_bo_vm_ops;
559
560         /*
561          * Note: We're transferring the bo reference to
562          * vma->vm_private_data here.
563          */
564
565         vma->vm_private_data = bo;
566
567         vma->vm_flags |= VM_PFNMAP;
568         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
569         return 0;
570 }
571 EXPORT_SYMBOL(ttm_bo_mmap_obj);