nouveau: fix migrate_to_ram() for faulting page
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / nouveau / nouveau_dmem.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
29
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
37
38 #include <nvhw/class/cla0b5.h>
39
40 #include <linux/sched/mm.h>
41 #include <linux/hmm.h>
42 #include <linux/memremap.h>
43 #include <linux/migrate.h>
44
45 /*
46  * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
47  * it in vram while in use. We likely want to overhaul memory management for
48  * nouveau to be more page like (not necessarily with system page size but a
49  * bigger page size) at lowest level and have some shim layer on top that would
50  * provide the same functionality as TTM.
51  */
52 #define DMEM_CHUNK_SIZE (2UL << 20)
53 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
54
55 enum nouveau_aper {
56         NOUVEAU_APER_VIRT,
57         NOUVEAU_APER_VRAM,
58         NOUVEAU_APER_HOST,
59 };
60
61 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
62                                       enum nouveau_aper, u64 dst_addr,
63                                       enum nouveau_aper, u64 src_addr);
64 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
65                                       enum nouveau_aper, u64 dst_addr);
66
67 struct nouveau_dmem_chunk {
68         struct list_head list;
69         struct nouveau_bo *bo;
70         struct nouveau_drm *drm;
71         unsigned long callocated;
72         struct dev_pagemap pagemap;
73 };
74
75 struct nouveau_dmem_migrate {
76         nouveau_migrate_copy_t copy_func;
77         nouveau_clear_page_t clear_func;
78         struct nouveau_channel *chan;
79 };
80
81 struct nouveau_dmem {
82         struct nouveau_drm *drm;
83         struct nouveau_dmem_migrate migrate;
84         struct list_head chunks;
85         struct mutex mutex;
86         struct page *free_pages;
87         spinlock_t lock;
88 };
89
90 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
91 {
92         return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
93 }
94
95 static struct nouveau_drm *page_to_drm(struct page *page)
96 {
97         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
98
99         return chunk->drm;
100 }
101
102 unsigned long nouveau_dmem_page_addr(struct page *page)
103 {
104         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
105         unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
106                                 chunk->pagemap.range.start;
107
108         return chunk->bo->offset + off;
109 }
110
111 static void nouveau_dmem_page_free(struct page *page)
112 {
113         struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
114         struct nouveau_dmem *dmem = chunk->drm->dmem;
115
116         spin_lock(&dmem->lock);
117         page->zone_device_data = dmem->free_pages;
118         dmem->free_pages = page;
119
120         WARN_ON(!chunk->callocated);
121         chunk->callocated--;
122         /*
123          * FIXME when chunk->callocated reach 0 we should add the chunk to
124          * a reclaim list so that it can be freed in case of memory pressure.
125          */
126         spin_unlock(&dmem->lock);
127 }
128
129 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
130 {
131         if (fence) {
132                 nouveau_fence_wait(*fence, true, false);
133                 nouveau_fence_unref(fence);
134         } else {
135                 /*
136                  * FIXME wait for channel to be IDLE before calling finalizing
137                  * the hmem object.
138                  */
139         }
140 }
141
142 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
143                                 struct page *dpage, dma_addr_t *dma_addr)
144 {
145         struct device *dev = drm->dev->dev;
146
147         lock_page(dpage);
148
149         *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
150         if (dma_mapping_error(dev, *dma_addr))
151                 return -EIO;
152
153         if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
154                                          NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
155                 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
156                 return -EIO;
157         }
158
159         return 0;
160 }
161
162 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
163 {
164         struct nouveau_drm *drm = page_to_drm(vmf->page);
165         struct nouveau_dmem *dmem = drm->dmem;
166         struct nouveau_fence *fence;
167         struct nouveau_svmm *svmm;
168         struct page *spage, *dpage;
169         unsigned long src = 0, dst = 0;
170         dma_addr_t dma_addr = 0;
171         vm_fault_t ret = 0;
172         struct migrate_vma args = {
173                 .vma            = vmf->vma,
174                 .start          = vmf->address,
175                 .end            = vmf->address + PAGE_SIZE,
176                 .src            = &src,
177                 .dst            = &dst,
178                 .pgmap_owner    = drm->dev,
179                 .fault_page     = vmf->page,
180                 .flags          = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
181         };
182
183         /*
184          * FIXME what we really want is to find some heuristic to migrate more
185          * than just one page on CPU fault. When such fault happens it is very
186          * likely that more surrounding page will CPU fault too.
187          */
188         if (migrate_vma_setup(&args) < 0)
189                 return VM_FAULT_SIGBUS;
190         if (!args.cpages)
191                 return 0;
192
193         spage = migrate_pfn_to_page(src);
194         if (!spage || !(src & MIGRATE_PFN_MIGRATE))
195                 goto done;
196
197         dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
198         if (!dpage)
199                 goto done;
200
201         dst = migrate_pfn(page_to_pfn(dpage));
202
203         svmm = spage->zone_device_data;
204         mutex_lock(&svmm->mutex);
205         nouveau_svmm_invalidate(svmm, args.start, args.end);
206         ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
207         mutex_unlock(&svmm->mutex);
208         if (ret) {
209                 ret = VM_FAULT_SIGBUS;
210                 goto done;
211         }
212
213         nouveau_fence_new(dmem->migrate.chan, false, &fence);
214         migrate_vma_pages(&args);
215         nouveau_dmem_fence_done(&fence);
216         dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
217 done:
218         migrate_vma_finalize(&args);
219         return ret;
220 }
221
222 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
223         .page_free              = nouveau_dmem_page_free,
224         .migrate_to_ram         = nouveau_dmem_migrate_to_ram,
225 };
226
227 static int
228 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
229 {
230         struct nouveau_dmem_chunk *chunk;
231         struct resource *res;
232         struct page *page;
233         void *ptr;
234         unsigned long i, pfn_first;
235         int ret;
236
237         chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
238         if (chunk == NULL) {
239                 ret = -ENOMEM;
240                 goto out;
241         }
242
243         /* Allocate unused physical address space for device private pages. */
244         res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
245                                       "nouveau_dmem");
246         if (IS_ERR(res)) {
247                 ret = PTR_ERR(res);
248                 goto out_free;
249         }
250
251         chunk->drm = drm;
252         chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
253         chunk->pagemap.range.start = res->start;
254         chunk->pagemap.range.end = res->end;
255         chunk->pagemap.nr_range = 1;
256         chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
257         chunk->pagemap.owner = drm->dev;
258
259         ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
260                              NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
261                              &chunk->bo);
262         if (ret)
263                 goto out_release;
264
265         ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
266         if (ret)
267                 goto out_bo_free;
268
269         ptr = memremap_pages(&chunk->pagemap, numa_node_id());
270         if (IS_ERR(ptr)) {
271                 ret = PTR_ERR(ptr);
272                 goto out_bo_unpin;
273         }
274
275         mutex_lock(&drm->dmem->mutex);
276         list_add(&chunk->list, &drm->dmem->chunks);
277         mutex_unlock(&drm->dmem->mutex);
278
279         pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
280         page = pfn_to_page(pfn_first);
281         spin_lock(&drm->dmem->lock);
282         for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
283                 page->zone_device_data = drm->dmem->free_pages;
284                 drm->dmem->free_pages = page;
285         }
286         *ppage = page;
287         chunk->callocated++;
288         spin_unlock(&drm->dmem->lock);
289
290         NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
291                 DMEM_CHUNK_SIZE >> 20);
292
293         return 0;
294
295 out_bo_unpin:
296         nouveau_bo_unpin(chunk->bo);
297 out_bo_free:
298         nouveau_bo_ref(NULL, &chunk->bo);
299 out_release:
300         release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
301 out_free:
302         kfree(chunk);
303 out:
304         return ret;
305 }
306
307 static struct page *
308 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
309 {
310         struct nouveau_dmem_chunk *chunk;
311         struct page *page = NULL;
312         int ret;
313
314         spin_lock(&drm->dmem->lock);
315         if (drm->dmem->free_pages) {
316                 page = drm->dmem->free_pages;
317                 drm->dmem->free_pages = page->zone_device_data;
318                 chunk = nouveau_page_to_chunk(page);
319                 chunk->callocated++;
320                 spin_unlock(&drm->dmem->lock);
321         } else {
322                 spin_unlock(&drm->dmem->lock);
323                 ret = nouveau_dmem_chunk_alloc(drm, &page);
324                 if (ret)
325                         return NULL;
326         }
327
328         zone_device_page_init(page);
329         return page;
330 }
331
332 static void
333 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
334 {
335         unlock_page(page);
336         put_page(page);
337 }
338
339 void
340 nouveau_dmem_resume(struct nouveau_drm *drm)
341 {
342         struct nouveau_dmem_chunk *chunk;
343         int ret;
344
345         if (drm->dmem == NULL)
346                 return;
347
348         mutex_lock(&drm->dmem->mutex);
349         list_for_each_entry(chunk, &drm->dmem->chunks, list) {
350                 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
351                 /* FIXME handle pin failure */
352                 WARN_ON(ret);
353         }
354         mutex_unlock(&drm->dmem->mutex);
355 }
356
357 void
358 nouveau_dmem_suspend(struct nouveau_drm *drm)
359 {
360         struct nouveau_dmem_chunk *chunk;
361
362         if (drm->dmem == NULL)
363                 return;
364
365         mutex_lock(&drm->dmem->mutex);
366         list_for_each_entry(chunk, &drm->dmem->chunks, list)
367                 nouveau_bo_unpin(chunk->bo);
368         mutex_unlock(&drm->dmem->mutex);
369 }
370
371 /*
372  * Evict all pages mapping a chunk.
373  */
374 static void
375 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
376 {
377         unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
378         unsigned long *src_pfns, *dst_pfns;
379         dma_addr_t *dma_addrs;
380         struct nouveau_fence *fence;
381
382         src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
383         dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
384         dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
385
386         migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
387                         npages);
388
389         for (i = 0; i < npages; i++) {
390                 if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
391                         struct page *dpage;
392
393                         /*
394                          * _GFP_NOFAIL because the GPU is going away and there
395                          * is nothing sensible we can do if we can't copy the
396                          * data back.
397                          */
398                         dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
399                         dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
400                         nouveau_dmem_copy_one(chunk->drm,
401                                         migrate_pfn_to_page(src_pfns[i]), dpage,
402                                         &dma_addrs[i]);
403                 }
404         }
405
406         nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
407         migrate_device_pages(src_pfns, dst_pfns, npages);
408         nouveau_dmem_fence_done(&fence);
409         migrate_device_finalize(src_pfns, dst_pfns, npages);
410         kfree(src_pfns);
411         kfree(dst_pfns);
412         for (i = 0; i < npages; i++)
413                 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
414         kfree(dma_addrs);
415 }
416
417 void
418 nouveau_dmem_fini(struct nouveau_drm *drm)
419 {
420         struct nouveau_dmem_chunk *chunk, *tmp;
421
422         if (drm->dmem == NULL)
423                 return;
424
425         mutex_lock(&drm->dmem->mutex);
426
427         list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
428                 nouveau_dmem_evict_chunk(chunk);
429                 nouveau_bo_unpin(chunk->bo);
430                 nouveau_bo_ref(NULL, &chunk->bo);
431                 WARN_ON(chunk->callocated);
432                 list_del(&chunk->list);
433                 memunmap_pages(&chunk->pagemap);
434                 release_mem_region(chunk->pagemap.range.start,
435                                    range_len(&chunk->pagemap.range));
436                 kfree(chunk);
437         }
438
439         mutex_unlock(&drm->dmem->mutex);
440 }
441
442 static int
443 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
444                     enum nouveau_aper dst_aper, u64 dst_addr,
445                     enum nouveau_aper src_aper, u64 src_addr)
446 {
447         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
448         u32 launch_dma = 0;
449         int ret;
450
451         ret = PUSH_WAIT(push, 13);
452         if (ret)
453                 return ret;
454
455         if (src_aper != NOUVEAU_APER_VIRT) {
456                 switch (src_aper) {
457                 case NOUVEAU_APER_VRAM:
458                         PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
459                                   NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
460                         break;
461                 case NOUVEAU_APER_HOST:
462                         PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
463                                   NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
464                         break;
465                 default:
466                         return -EINVAL;
467                 }
468
469                 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
470         }
471
472         if (dst_aper != NOUVEAU_APER_VIRT) {
473                 switch (dst_aper) {
474                 case NOUVEAU_APER_VRAM:
475                         PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
476                                   NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
477                         break;
478                 case NOUVEAU_APER_HOST:
479                         PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
480                                   NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
481                         break;
482                 default:
483                         return -EINVAL;
484                 }
485
486                 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
487         }
488
489         PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
490                   NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
491
492                                 OFFSET_IN_LOWER, lower_32_bits(src_addr),
493
494                                 OFFSET_OUT_UPPER,
495                   NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
496
497                                 OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
498                                 PITCH_IN, PAGE_SIZE,
499                                 PITCH_OUT, PAGE_SIZE,
500                                 LINE_LENGTH_IN, PAGE_SIZE,
501                                 LINE_COUNT, npages);
502
503         PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
504                   NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
505                   NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
506                   NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
507                   NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
508                   NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
509                   NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
510                   NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
511                   NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
512                   NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
513         return 0;
514 }
515
516 static int
517 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
518                      enum nouveau_aper dst_aper, u64 dst_addr)
519 {
520         struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
521         u32 launch_dma = 0;
522         int ret;
523
524         ret = PUSH_WAIT(push, 12);
525         if (ret)
526                 return ret;
527
528         switch (dst_aper) {
529         case NOUVEAU_APER_VRAM:
530                 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
531                           NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
532                 break;
533         case NOUVEAU_APER_HOST:
534                 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
535                           NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
536                 break;
537         default:
538                 return -EINVAL;
539         }
540
541         launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
542
543         PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
544                                 SET_REMAP_CONST_B, 0,
545
546                                 SET_REMAP_COMPONENTS,
547                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
548                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
549                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
550                   NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
551
552         PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
553                   NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
554
555                                 OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
556
557         PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
558
559         PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
560                   NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
561                   NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
562                   NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
563                   NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
564                   NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
565                   NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
566                   NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
567                   NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
568                   NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
569         return 0;
570 }
571
572 static int
573 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
574 {
575         switch (drm->ttm.copy.oclass) {
576         case PASCAL_DMA_COPY_A:
577         case PASCAL_DMA_COPY_B:
578         case  VOLTA_DMA_COPY_A:
579         case TURING_DMA_COPY_A:
580                 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
581                 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
582                 drm->dmem->migrate.chan = drm->ttm.chan;
583                 return 0;
584         default:
585                 break;
586         }
587         return -ENODEV;
588 }
589
590 void
591 nouveau_dmem_init(struct nouveau_drm *drm)
592 {
593         int ret;
594
595         /* This only make sense on PASCAL or newer */
596         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
597                 return;
598
599         if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
600                 return;
601
602         drm->dmem->drm = drm;
603         mutex_init(&drm->dmem->mutex);
604         INIT_LIST_HEAD(&drm->dmem->chunks);
605         mutex_init(&drm->dmem->mutex);
606         spin_lock_init(&drm->dmem->lock);
607
608         /* Initialize migration dma helpers before registering memory */
609         ret = nouveau_dmem_migrate_init(drm);
610         if (ret) {
611                 kfree(drm->dmem);
612                 drm->dmem = NULL;
613         }
614 }
615
616 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
617                 struct nouveau_svmm *svmm, unsigned long src,
618                 dma_addr_t *dma_addr, u64 *pfn)
619 {
620         struct device *dev = drm->dev->dev;
621         struct page *dpage, *spage;
622         unsigned long paddr;
623
624         spage = migrate_pfn_to_page(src);
625         if (!(src & MIGRATE_PFN_MIGRATE))
626                 goto out;
627
628         dpage = nouveau_dmem_page_alloc_locked(drm);
629         if (!dpage)
630                 goto out;
631
632         paddr = nouveau_dmem_page_addr(dpage);
633         if (spage) {
634                 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
635                                          DMA_BIDIRECTIONAL);
636                 if (dma_mapping_error(dev, *dma_addr))
637                         goto out_free_page;
638                 if (drm->dmem->migrate.copy_func(drm, 1,
639                         NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
640                         goto out_dma_unmap;
641         } else {
642                 *dma_addr = DMA_MAPPING_ERROR;
643                 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
644                         NOUVEAU_APER_VRAM, paddr))
645                         goto out_free_page;
646         }
647
648         dpage->zone_device_data = svmm;
649         *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
650                 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
651         if (src & MIGRATE_PFN_WRITE)
652                 *pfn |= NVIF_VMM_PFNMAP_V0_W;
653         return migrate_pfn(page_to_pfn(dpage));
654
655 out_dma_unmap:
656         dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
657 out_free_page:
658         nouveau_dmem_page_free_locked(drm, dpage);
659 out:
660         *pfn = NVIF_VMM_PFNMAP_V0_NONE;
661         return 0;
662 }
663
664 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
665                 struct nouveau_svmm *svmm, struct migrate_vma *args,
666                 dma_addr_t *dma_addrs, u64 *pfns)
667 {
668         struct nouveau_fence *fence;
669         unsigned long addr = args->start, nr_dma = 0, i;
670
671         for (i = 0; addr < args->end; i++) {
672                 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
673                                 args->src[i], dma_addrs + nr_dma, pfns + i);
674                 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
675                         nr_dma++;
676                 addr += PAGE_SIZE;
677         }
678
679         nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
680         migrate_vma_pages(args);
681         nouveau_dmem_fence_done(&fence);
682         nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
683
684         while (nr_dma--) {
685                 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
686                                 DMA_BIDIRECTIONAL);
687         }
688         migrate_vma_finalize(args);
689 }
690
691 int
692 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
693                          struct nouveau_svmm *svmm,
694                          struct vm_area_struct *vma,
695                          unsigned long start,
696                          unsigned long end)
697 {
698         unsigned long npages = (end - start) >> PAGE_SHIFT;
699         unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
700         dma_addr_t *dma_addrs;
701         struct migrate_vma args = {
702                 .vma            = vma,
703                 .start          = start,
704                 .pgmap_owner    = drm->dev,
705                 .flags          = MIGRATE_VMA_SELECT_SYSTEM,
706         };
707         unsigned long i;
708         u64 *pfns;
709         int ret = -ENOMEM;
710
711         if (drm->dmem == NULL)
712                 return -ENODEV;
713
714         args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
715         if (!args.src)
716                 goto out;
717         args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
718         if (!args.dst)
719                 goto out_free_src;
720
721         dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
722         if (!dma_addrs)
723                 goto out_free_dst;
724
725         pfns = nouveau_pfns_alloc(max);
726         if (!pfns)
727                 goto out_free_dma;
728
729         for (i = 0; i < npages; i += max) {
730                 if (args.start + (max << PAGE_SHIFT) > end)
731                         args.end = end;
732                 else
733                         args.end = args.start + (max << PAGE_SHIFT);
734
735                 ret = migrate_vma_setup(&args);
736                 if (ret)
737                         goto out_free_pfns;
738
739                 if (args.cpages)
740                         nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
741                                                    pfns);
742                 args.start = args.end;
743         }
744
745         ret = 0;
746 out_free_pfns:
747         nouveau_pfns_free(pfns);
748 out_free_dma:
749         kfree(dma_addrs);
750 out_free_dst:
751         kfree(args.dst);
752 out_free_src:
753         kfree(args.src);
754 out:
755         return ret;
756 }