2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
38 #include <nvhw/class/cla0b5.h>
40 #include <linux/sched/mm.h>
41 #include <linux/hmm.h>
42 #include <linux/memremap.h>
43 #include <linux/migrate.h>
46 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
47 * it in vram while in use. We likely want to overhaul memory management for
48 * nouveau to be more page like (not necessarily with system page size but a
49 * bigger page size) at lowest level and have some shim layer on top that would
50 * provide the same functionality as TTM.
52 #define DMEM_CHUNK_SIZE (2UL << 20)
53 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
61 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
62 enum nouveau_aper, u64 dst_addr,
63 enum nouveau_aper, u64 src_addr);
64 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
65 enum nouveau_aper, u64 dst_addr);
67 struct nouveau_dmem_chunk {
68 struct list_head list;
69 struct nouveau_bo *bo;
70 struct nouveau_drm *drm;
71 unsigned long callocated;
72 struct dev_pagemap pagemap;
75 struct nouveau_dmem_migrate {
76 nouveau_migrate_copy_t copy_func;
77 nouveau_clear_page_t clear_func;
78 struct nouveau_channel *chan;
82 struct nouveau_drm *drm;
83 struct nouveau_dmem_migrate migrate;
84 struct list_head chunks;
86 struct page *free_pages;
90 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
92 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
95 static struct nouveau_drm *page_to_drm(struct page *page)
97 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
102 unsigned long nouveau_dmem_page_addr(struct page *page)
104 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
105 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
106 chunk->pagemap.range.start;
108 return chunk->bo->offset + off;
111 static void nouveau_dmem_page_free(struct page *page)
113 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
114 struct nouveau_dmem *dmem = chunk->drm->dmem;
116 spin_lock(&dmem->lock);
117 page->zone_device_data = dmem->free_pages;
118 dmem->free_pages = page;
120 WARN_ON(!chunk->callocated);
123 * FIXME when chunk->callocated reach 0 we should add the chunk to
124 * a reclaim list so that it can be freed in case of memory pressure.
126 spin_unlock(&dmem->lock);
129 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
132 nouveau_fence_wait(*fence, true, false);
133 nouveau_fence_unref(fence);
136 * FIXME wait for channel to be IDLE before calling finalizing
142 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
143 struct page *dpage, dma_addr_t *dma_addr)
145 struct device *dev = drm->dev->dev;
149 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
150 if (dma_mapping_error(dev, *dma_addr))
153 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
154 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
155 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
162 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
164 struct nouveau_drm *drm = page_to_drm(vmf->page);
165 struct nouveau_dmem *dmem = drm->dmem;
166 struct nouveau_fence *fence;
167 struct nouveau_svmm *svmm;
168 struct page *spage, *dpage;
169 unsigned long src = 0, dst = 0;
170 dma_addr_t dma_addr = 0;
172 struct migrate_vma args = {
174 .start = vmf->address,
175 .end = vmf->address + PAGE_SIZE,
178 .pgmap_owner = drm->dev,
179 .fault_page = vmf->page,
180 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
184 * FIXME what we really want is to find some heuristic to migrate more
185 * than just one page on CPU fault. When such fault happens it is very
186 * likely that more surrounding page will CPU fault too.
188 if (migrate_vma_setup(&args) < 0)
189 return VM_FAULT_SIGBUS;
193 spage = migrate_pfn_to_page(src);
194 if (!spage || !(src & MIGRATE_PFN_MIGRATE))
197 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
201 dst = migrate_pfn(page_to_pfn(dpage));
203 svmm = spage->zone_device_data;
204 mutex_lock(&svmm->mutex);
205 nouveau_svmm_invalidate(svmm, args.start, args.end);
206 ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
207 mutex_unlock(&svmm->mutex);
209 ret = VM_FAULT_SIGBUS;
213 nouveau_fence_new(dmem->migrate.chan, false, &fence);
214 migrate_vma_pages(&args);
215 nouveau_dmem_fence_done(&fence);
216 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
218 migrate_vma_finalize(&args);
222 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
223 .page_free = nouveau_dmem_page_free,
224 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
228 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
230 struct nouveau_dmem_chunk *chunk;
231 struct resource *res;
234 unsigned long i, pfn_first;
237 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
243 /* Allocate unused physical address space for device private pages. */
244 res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
252 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
253 chunk->pagemap.range.start = res->start;
254 chunk->pagemap.range.end = res->end;
255 chunk->pagemap.nr_range = 1;
256 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
257 chunk->pagemap.owner = drm->dev;
259 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
260 NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
265 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
269 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
275 mutex_lock(&drm->dmem->mutex);
276 list_add(&chunk->list, &drm->dmem->chunks);
277 mutex_unlock(&drm->dmem->mutex);
279 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
280 page = pfn_to_page(pfn_first);
281 spin_lock(&drm->dmem->lock);
282 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
283 page->zone_device_data = drm->dmem->free_pages;
284 drm->dmem->free_pages = page;
288 spin_unlock(&drm->dmem->lock);
290 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
291 DMEM_CHUNK_SIZE >> 20);
296 nouveau_bo_unpin(chunk->bo);
298 nouveau_bo_ref(NULL, &chunk->bo);
300 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
308 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
310 struct nouveau_dmem_chunk *chunk;
311 struct page *page = NULL;
314 spin_lock(&drm->dmem->lock);
315 if (drm->dmem->free_pages) {
316 page = drm->dmem->free_pages;
317 drm->dmem->free_pages = page->zone_device_data;
318 chunk = nouveau_page_to_chunk(page);
320 spin_unlock(&drm->dmem->lock);
322 spin_unlock(&drm->dmem->lock);
323 ret = nouveau_dmem_chunk_alloc(drm, &page);
328 zone_device_page_init(page);
333 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
340 nouveau_dmem_resume(struct nouveau_drm *drm)
342 struct nouveau_dmem_chunk *chunk;
345 if (drm->dmem == NULL)
348 mutex_lock(&drm->dmem->mutex);
349 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
350 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
351 /* FIXME handle pin failure */
354 mutex_unlock(&drm->dmem->mutex);
358 nouveau_dmem_suspend(struct nouveau_drm *drm)
360 struct nouveau_dmem_chunk *chunk;
362 if (drm->dmem == NULL)
365 mutex_lock(&drm->dmem->mutex);
366 list_for_each_entry(chunk, &drm->dmem->chunks, list)
367 nouveau_bo_unpin(chunk->bo);
368 mutex_unlock(&drm->dmem->mutex);
372 * Evict all pages mapping a chunk.
375 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
377 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
378 unsigned long *src_pfns, *dst_pfns;
379 dma_addr_t *dma_addrs;
380 struct nouveau_fence *fence;
382 src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
383 dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
384 dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
386 migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
389 for (i = 0; i < npages; i++) {
390 if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
394 * _GFP_NOFAIL because the GPU is going away and there
395 * is nothing sensible we can do if we can't copy the
398 dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
399 dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
400 nouveau_dmem_copy_one(chunk->drm,
401 migrate_pfn_to_page(src_pfns[i]), dpage,
406 nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
407 migrate_device_pages(src_pfns, dst_pfns, npages);
408 nouveau_dmem_fence_done(&fence);
409 migrate_device_finalize(src_pfns, dst_pfns, npages);
412 for (i = 0; i < npages; i++)
413 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
418 nouveau_dmem_fini(struct nouveau_drm *drm)
420 struct nouveau_dmem_chunk *chunk, *tmp;
422 if (drm->dmem == NULL)
425 mutex_lock(&drm->dmem->mutex);
427 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
428 nouveau_dmem_evict_chunk(chunk);
429 nouveau_bo_unpin(chunk->bo);
430 nouveau_bo_ref(NULL, &chunk->bo);
431 WARN_ON(chunk->callocated);
432 list_del(&chunk->list);
433 memunmap_pages(&chunk->pagemap);
434 release_mem_region(chunk->pagemap.range.start,
435 range_len(&chunk->pagemap.range));
439 mutex_unlock(&drm->dmem->mutex);
443 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
444 enum nouveau_aper dst_aper, u64 dst_addr,
445 enum nouveau_aper src_aper, u64 src_addr)
447 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
451 ret = PUSH_WAIT(push, 13);
455 if (src_aper != NOUVEAU_APER_VIRT) {
457 case NOUVEAU_APER_VRAM:
458 PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
459 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
461 case NOUVEAU_APER_HOST:
462 PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
463 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
469 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
472 if (dst_aper != NOUVEAU_APER_VIRT) {
474 case NOUVEAU_APER_VRAM:
475 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
476 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
478 case NOUVEAU_APER_HOST:
479 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
480 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
486 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
489 PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
490 NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
492 OFFSET_IN_LOWER, lower_32_bits(src_addr),
495 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
497 OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
499 PITCH_OUT, PAGE_SIZE,
500 LINE_LENGTH_IN, PAGE_SIZE,
503 PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
504 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
505 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
506 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
507 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
508 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
509 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
510 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
511 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
512 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
517 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
518 enum nouveau_aper dst_aper, u64 dst_addr)
520 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
524 ret = PUSH_WAIT(push, 12);
529 case NOUVEAU_APER_VRAM:
530 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
531 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
533 case NOUVEAU_APER_HOST:
534 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
535 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
541 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
543 PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
544 SET_REMAP_CONST_B, 0,
546 SET_REMAP_COMPONENTS,
547 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
548 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
549 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
550 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
552 PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
553 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
555 OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
557 PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
559 PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
560 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
561 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
562 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
563 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
564 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
565 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
566 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
567 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
568 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
573 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
575 switch (drm->ttm.copy.oclass) {
576 case PASCAL_DMA_COPY_A:
577 case PASCAL_DMA_COPY_B:
578 case VOLTA_DMA_COPY_A:
579 case TURING_DMA_COPY_A:
580 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
581 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
582 drm->dmem->migrate.chan = drm->ttm.chan;
591 nouveau_dmem_init(struct nouveau_drm *drm)
595 /* This only make sense on PASCAL or newer */
596 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
599 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
602 drm->dmem->drm = drm;
603 mutex_init(&drm->dmem->mutex);
604 INIT_LIST_HEAD(&drm->dmem->chunks);
605 mutex_init(&drm->dmem->mutex);
606 spin_lock_init(&drm->dmem->lock);
608 /* Initialize migration dma helpers before registering memory */
609 ret = nouveau_dmem_migrate_init(drm);
616 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
617 struct nouveau_svmm *svmm, unsigned long src,
618 dma_addr_t *dma_addr, u64 *pfn)
620 struct device *dev = drm->dev->dev;
621 struct page *dpage, *spage;
624 spage = migrate_pfn_to_page(src);
625 if (!(src & MIGRATE_PFN_MIGRATE))
628 dpage = nouveau_dmem_page_alloc_locked(drm);
632 paddr = nouveau_dmem_page_addr(dpage);
634 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
636 if (dma_mapping_error(dev, *dma_addr))
638 if (drm->dmem->migrate.copy_func(drm, 1,
639 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
642 *dma_addr = DMA_MAPPING_ERROR;
643 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
644 NOUVEAU_APER_VRAM, paddr))
648 dpage->zone_device_data = svmm;
649 *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
650 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
651 if (src & MIGRATE_PFN_WRITE)
652 *pfn |= NVIF_VMM_PFNMAP_V0_W;
653 return migrate_pfn(page_to_pfn(dpage));
656 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
658 nouveau_dmem_page_free_locked(drm, dpage);
660 *pfn = NVIF_VMM_PFNMAP_V0_NONE;
664 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
665 struct nouveau_svmm *svmm, struct migrate_vma *args,
666 dma_addr_t *dma_addrs, u64 *pfns)
668 struct nouveau_fence *fence;
669 unsigned long addr = args->start, nr_dma = 0, i;
671 for (i = 0; addr < args->end; i++) {
672 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
673 args->src[i], dma_addrs + nr_dma, pfns + i);
674 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
679 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
680 migrate_vma_pages(args);
681 nouveau_dmem_fence_done(&fence);
682 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
685 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
688 migrate_vma_finalize(args);
692 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
693 struct nouveau_svmm *svmm,
694 struct vm_area_struct *vma,
698 unsigned long npages = (end - start) >> PAGE_SHIFT;
699 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
700 dma_addr_t *dma_addrs;
701 struct migrate_vma args = {
704 .pgmap_owner = drm->dev,
705 .flags = MIGRATE_VMA_SELECT_SYSTEM,
711 if (drm->dmem == NULL)
714 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
717 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
721 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
725 pfns = nouveau_pfns_alloc(max);
729 for (i = 0; i < npages; i += max) {
730 if (args.start + (max << PAGE_SHIFT) > end)
733 args.end = args.start + (max << PAGE_SHIFT);
735 ret = migrate_vma_setup(&args);
740 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
742 args.start = args.end;
747 nouveau_pfns_free(pfns);