2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
42 * NV10-NV40 tiling helpers
46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg;
51 struct nvkm_device *device = nvxx_device(&drm->device);
52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
55 nouveau_fence_unref(®->fence);
58 nvkm_fb_tile_fini(fb, i, tile);
61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
63 nvkm_fb_tile_prog(fb, i, tile);
66 static struct nouveau_drm_tile *
67 nv10_bo_get_tile_region(struct drm_device *dev, int i)
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
72 spin_lock(&drm->tile.lock);
75 (!tile->fence || nouveau_fence_done(tile->fence)))
80 spin_unlock(&drm->tile.lock);
85 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
88 struct nouveau_drm *drm = nouveau_drm(dev);
91 spin_lock(&drm->tile.lock);
92 tile->fence = (struct nouveau_fence *)fence_get(fence);
94 spin_unlock(&drm->tile.lock);
98 static struct nouveau_drm_tile *
99 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
102 struct nouveau_drm *drm = nouveau_drm(dev);
103 struct nvkm_fb *fb = nvxx_fb(&drm->device);
104 struct nouveau_drm_tile *tile, *found = NULL;
107 for (i = 0; i < fb->tile.regions; i++) {
108 tile = nv10_bo_get_tile_region(dev, i);
110 if (pitch && !found) {
114 } else if (tile && fb->tile.region[i].pitch) {
115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
119 nv10_bo_put_tile_region(dev, tile, NULL);
123 nv10_bo_update_tile_region(dev, found, addr, size,
129 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
133 struct nouveau_bo *nvbo = nouveau_bo(bo);
135 if (unlikely(nvbo->gem.filp))
136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
137 WARN_ON(nvbo->pin_refcnt > 0);
138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
143 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
144 int *align, int *size)
146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
147 struct nvif_device *device = &drm->device;
149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
150 if (nvbo->tile_mode) {
151 if (device->info.chipset >= 0x40) {
153 *size = roundup(*size, 64 * nvbo->tile_mode);
155 } else if (device->info.chipset >= 0x30) {
157 *size = roundup(*size, 64 * nvbo->tile_mode);
159 } else if (device->info.chipset >= 0x20) {
161 *size = roundup(*size, 64 * nvbo->tile_mode);
163 } else if (device->info.chipset >= 0x10) {
165 *size = roundup(*size, 32 * nvbo->tile_mode);
169 *size = roundup(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align);
173 *size = roundup(*size, PAGE_SIZE);
177 nouveau_bo_new(struct drm_device *dev, int size, int align,
178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
179 struct sg_table *sg, struct reservation_object *robj,
180 struct nouveau_bo **pnvbo)
182 struct nouveau_drm *drm = nouveau_drm(dev);
183 struct nouveau_bo *nvbo;
186 int type = ttm_bo_type_device;
191 lpg_shift = drm->client.vm->mmu->lpg_shift;
192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
194 if (size <= 0 || size > max_size) {
195 NV_WARN(drm, "skipped size %x\n", (u32)size);
200 type = ttm_bo_type_sg;
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
205 INIT_LIST_HEAD(&nvbo->head);
206 INIT_LIST_HEAD(&nvbo->entry);
207 INIT_LIST_HEAD(&nvbo->vma_list);
208 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev;
212 if (!nvxx_device(&drm->device)->func->cpu_coherent)
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
215 nvbo->page_shift = 12;
216 if (drm->client.vm) {
217 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
218 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
221 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
222 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
223 nouveau_bo_placement_set(nvbo, flags, 0);
225 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
226 sizeof(struct nouveau_bo));
228 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
229 type, &nvbo->placement,
230 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
231 robj, nouveau_bo_del_ttm);
233 /* ttm will call nouveau_bo_del_ttm if it fails.. */
242 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
246 if (type & TTM_PL_FLAG_VRAM)
247 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
248 if (type & TTM_PL_FLAG_TT)
249 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
250 if (type & TTM_PL_FLAG_SYSTEM)
251 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
255 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
257 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
258 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
259 unsigned i, fpfn, lpfn;
261 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
262 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
263 nvbo->bo.mem.num_pages < vram_pages / 4) {
265 * Make sure that the color and depth buffers are handled
266 * by independent memory controller units. Up to a 9x
267 * speed up when alpha-blending and depth-test are enabled
270 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
271 fpfn = vram_pages / 2;
275 lpfn = vram_pages / 2;
277 for (i = 0; i < nvbo->placement.num_placement; ++i) {
278 nvbo->placements[i].fpfn = fpfn;
279 nvbo->placements[i].lpfn = lpfn;
281 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
282 nvbo->busy_placements[i].fpfn = fpfn;
283 nvbo->busy_placements[i].lpfn = lpfn;
289 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
291 struct ttm_placement *pl = &nvbo->placement;
292 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
293 TTM_PL_MASK_CACHING) |
294 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
296 pl->placement = nvbo->placements;
297 set_placement_list(nvbo->placements, &pl->num_placement,
300 pl->busy_placement = nvbo->busy_placements;
301 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
304 set_placement_range(nvbo, type);
308 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
310 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
311 struct ttm_buffer_object *bo = &nvbo->bo;
312 bool force = false, evict = false;
315 ret = ttm_bo_reserve(bo, false, false, NULL);
319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
320 memtype == TTM_PL_FLAG_VRAM && contig) {
321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
322 if (bo->mem.mem_type == TTM_PL_VRAM) {
323 struct nvkm_mem *mem = bo->mem.mm_node;
324 if (!list_is_singular(&mem->regions))
327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
332 if (nvbo->pin_refcnt) {
333 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
334 NV_ERROR(drm, "bo %p pinned elsewhere: "
335 "0x%08x vs 0x%08x\n", bo,
336 1 << bo->mem.mem_type, memtype);
344 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
345 ret = nouveau_bo_validate(nvbo, false, false);
351 nouveau_bo_placement_set(nvbo, memtype, 0);
353 /* drop pin_refcnt temporarily, so we don't trip the assertion
354 * in nouveau_bo_move() that makes sure we're not trying to
355 * move a pinned buffer
358 ret = nouveau_bo_validate(nvbo, false, false);
363 switch (bo->mem.mem_type) {
365 drm->gem.vram_available -= bo->mem.size;
368 drm->gem.gart_available -= bo->mem.size;
376 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
377 ttm_bo_unreserve(bo);
382 nouveau_bo_unpin(struct nouveau_bo *nvbo)
384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
385 struct ttm_buffer_object *bo = &nvbo->bo;
388 ret = ttm_bo_reserve(bo, false, false, NULL);
392 ref = --nvbo->pin_refcnt;
393 WARN_ON_ONCE(ref < 0);
397 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
399 ret = nouveau_bo_validate(nvbo, false, false);
401 switch (bo->mem.mem_type) {
403 drm->gem.vram_available += bo->mem.size;
406 drm->gem.gart_available += bo->mem.size;
414 ttm_bo_unreserve(bo);
419 nouveau_bo_map(struct nouveau_bo *nvbo)
423 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
427 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
429 ttm_bo_unreserve(&nvbo->bo);
434 nouveau_bo_unmap(struct nouveau_bo *nvbo)
439 ttm_bo_kunmap(&nvbo->kmap);
443 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
445 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
446 struct nvkm_device *device = nvxx_device(&drm->device);
447 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
453 /* Don't waste time looping if the object is coherent */
454 if (nvbo->force_coherent)
457 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
458 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
459 PAGE_SIZE, DMA_TO_DEVICE);
463 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
465 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
466 struct nvkm_device *device = nvxx_device(&drm->device);
467 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
473 /* Don't waste time looping if the object is coherent */
474 if (nvbo->force_coherent)
477 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
478 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
479 PAGE_SIZE, DMA_FROM_DEVICE);
483 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
488 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
489 interruptible, no_wait_gpu);
493 nouveau_bo_sync_for_device(nvbo);
499 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
502 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
507 iowrite16_native(val, (void __force __iomem *)mem);
513 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
516 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
521 return ioread32_native((void __force __iomem *)mem);
527 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
530 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
535 iowrite32_native(val, (void __force __iomem *)mem);
540 static struct ttm_tt *
541 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
542 uint32_t page_flags, struct page *dummy_read)
544 #if IS_ENABLED(CONFIG_AGP)
545 struct nouveau_drm *drm = nouveau_bdev(bdev);
547 if (drm->agp.bridge) {
548 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
549 page_flags, dummy_read);
553 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
557 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
559 /* We'll do this from user space. */
564 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
565 struct ttm_mem_type_manager *man)
567 struct nouveau_drm *drm = nouveau_bdev(bdev);
571 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
572 man->available_caching = TTM_PL_MASK_CACHING;
573 man->default_caching = TTM_PL_FLAG_CACHED;
576 man->flags = TTM_MEMTYPE_FLAG_FIXED |
577 TTM_MEMTYPE_FLAG_MAPPABLE;
578 man->available_caching = TTM_PL_FLAG_UNCACHED |
580 man->default_caching = TTM_PL_FLAG_WC;
582 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
583 /* Some BARs do not support being ioremapped WC */
584 if (nvxx_bar(&drm->device)->iomap_uncached) {
585 man->available_caching = TTM_PL_FLAG_UNCACHED;
586 man->default_caching = TTM_PL_FLAG_UNCACHED;
589 man->func = &nouveau_vram_manager;
590 man->io_reserve_fastpath = false;
591 man->use_io_reserve_lru = true;
593 man->func = &ttm_bo_manager_func;
597 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
598 man->func = &nouveau_gart_manager;
600 if (!drm->agp.bridge)
601 man->func = &nv04_gart_manager;
603 man->func = &ttm_bo_manager_func;
605 if (drm->agp.bridge) {
606 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
607 man->available_caching = TTM_PL_FLAG_UNCACHED |
609 man->default_caching = TTM_PL_FLAG_WC;
611 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
612 TTM_MEMTYPE_FLAG_CMA;
613 man->available_caching = TTM_PL_MASK_CACHING;
614 man->default_caching = TTM_PL_FLAG_CACHED;
625 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
627 struct nouveau_bo *nvbo = nouveau_bo(bo);
629 switch (bo->mem.mem_type) {
631 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
635 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
639 *pl = nvbo->placement;
644 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
646 int ret = RING_SPACE(chan, 2);
648 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
649 OUT_RING (chan, handle & 0x0000ffff);
656 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
657 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
659 struct nvkm_mem *node = old_mem->mm_node;
660 int ret = RING_SPACE(chan, 10);
662 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
663 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
664 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
665 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
666 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
667 OUT_RING (chan, PAGE_SIZE);
668 OUT_RING (chan, PAGE_SIZE);
669 OUT_RING (chan, PAGE_SIZE);
670 OUT_RING (chan, new_mem->num_pages);
671 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
677 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
679 int ret = RING_SPACE(chan, 2);
681 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
682 OUT_RING (chan, handle);
688 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
689 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
691 struct nvkm_mem *node = old_mem->mm_node;
692 u64 src_offset = node->vma[0].offset;
693 u64 dst_offset = node->vma[1].offset;
694 u32 page_count = new_mem->num_pages;
697 page_count = new_mem->num_pages;
699 int line_count = (page_count > 8191) ? 8191 : page_count;
701 ret = RING_SPACE(chan, 11);
705 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
706 OUT_RING (chan, upper_32_bits(src_offset));
707 OUT_RING (chan, lower_32_bits(src_offset));
708 OUT_RING (chan, upper_32_bits(dst_offset));
709 OUT_RING (chan, lower_32_bits(dst_offset));
710 OUT_RING (chan, PAGE_SIZE);
711 OUT_RING (chan, PAGE_SIZE);
712 OUT_RING (chan, PAGE_SIZE);
713 OUT_RING (chan, line_count);
714 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
715 OUT_RING (chan, 0x00000110);
717 page_count -= line_count;
718 src_offset += (PAGE_SIZE * line_count);
719 dst_offset += (PAGE_SIZE * line_count);
726 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
727 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
729 struct nvkm_mem *node = old_mem->mm_node;
730 u64 src_offset = node->vma[0].offset;
731 u64 dst_offset = node->vma[1].offset;
732 u32 page_count = new_mem->num_pages;
735 page_count = new_mem->num_pages;
737 int line_count = (page_count > 2047) ? 2047 : page_count;
739 ret = RING_SPACE(chan, 12);
743 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
744 OUT_RING (chan, upper_32_bits(dst_offset));
745 OUT_RING (chan, lower_32_bits(dst_offset));
746 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
747 OUT_RING (chan, upper_32_bits(src_offset));
748 OUT_RING (chan, lower_32_bits(src_offset));
749 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
750 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
751 OUT_RING (chan, PAGE_SIZE); /* line_length */
752 OUT_RING (chan, line_count);
753 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
754 OUT_RING (chan, 0x00100110);
756 page_count -= line_count;
757 src_offset += (PAGE_SIZE * line_count);
758 dst_offset += (PAGE_SIZE * line_count);
765 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
766 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
768 struct nvkm_mem *node = old_mem->mm_node;
769 u64 src_offset = node->vma[0].offset;
770 u64 dst_offset = node->vma[1].offset;
771 u32 page_count = new_mem->num_pages;
774 page_count = new_mem->num_pages;
776 int line_count = (page_count > 8191) ? 8191 : page_count;
778 ret = RING_SPACE(chan, 11);
782 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
783 OUT_RING (chan, upper_32_bits(src_offset));
784 OUT_RING (chan, lower_32_bits(src_offset));
785 OUT_RING (chan, upper_32_bits(dst_offset));
786 OUT_RING (chan, lower_32_bits(dst_offset));
787 OUT_RING (chan, PAGE_SIZE);
788 OUT_RING (chan, PAGE_SIZE);
789 OUT_RING (chan, PAGE_SIZE);
790 OUT_RING (chan, line_count);
791 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
792 OUT_RING (chan, 0x00000110);
794 page_count -= line_count;
795 src_offset += (PAGE_SIZE * line_count);
796 dst_offset += (PAGE_SIZE * line_count);
803 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
804 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
806 struct nvkm_mem *node = old_mem->mm_node;
807 int ret = RING_SPACE(chan, 7);
809 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
810 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
811 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
812 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
813 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
814 OUT_RING (chan, 0x00000000 /* COPY */);
815 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
821 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
822 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
824 struct nvkm_mem *node = old_mem->mm_node;
825 int ret = RING_SPACE(chan, 7);
827 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
828 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
829 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
830 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
831 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
832 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
833 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
839 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
841 int ret = RING_SPACE(chan, 6);
843 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
844 OUT_RING (chan, handle);
845 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
846 OUT_RING (chan, chan->drm->ntfy.handle);
847 OUT_RING (chan, chan->vram.handle);
848 OUT_RING (chan, chan->vram.handle);
855 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
856 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
858 struct nvkm_mem *node = old_mem->mm_node;
859 u64 length = (new_mem->num_pages << PAGE_SHIFT);
860 u64 src_offset = node->vma[0].offset;
861 u64 dst_offset = node->vma[1].offset;
862 int src_tiled = !!node->memtype;
863 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
867 u32 amount, stride, height;
869 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
873 amount = min(length, (u64)(4 * 1024 * 1024));
875 height = amount / stride;
878 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
881 OUT_RING (chan, stride);
882 OUT_RING (chan, height);
887 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
891 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
894 OUT_RING (chan, stride);
895 OUT_RING (chan, height);
900 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
904 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
905 OUT_RING (chan, upper_32_bits(src_offset));
906 OUT_RING (chan, upper_32_bits(dst_offset));
907 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
908 OUT_RING (chan, lower_32_bits(src_offset));
909 OUT_RING (chan, lower_32_bits(dst_offset));
910 OUT_RING (chan, stride);
911 OUT_RING (chan, stride);
912 OUT_RING (chan, stride);
913 OUT_RING (chan, height);
914 OUT_RING (chan, 0x00000101);
915 OUT_RING (chan, 0x00000000);
916 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
920 src_offset += amount;
921 dst_offset += amount;
928 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
930 int ret = RING_SPACE(chan, 4);
932 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
933 OUT_RING (chan, handle);
934 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
935 OUT_RING (chan, chan->drm->ntfy.handle);
941 static inline uint32_t
942 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
943 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
945 if (mem->mem_type == TTM_PL_TT)
947 return chan->vram.handle;
951 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
952 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
954 u32 src_offset = old_mem->start << PAGE_SHIFT;
955 u32 dst_offset = new_mem->start << PAGE_SHIFT;
956 u32 page_count = new_mem->num_pages;
959 ret = RING_SPACE(chan, 3);
963 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
964 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
965 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
967 page_count = new_mem->num_pages;
969 int line_count = (page_count > 2047) ? 2047 : page_count;
971 ret = RING_SPACE(chan, 11);
975 BEGIN_NV04(chan, NvSubCopy,
976 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
977 OUT_RING (chan, src_offset);
978 OUT_RING (chan, dst_offset);
979 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
980 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
981 OUT_RING (chan, PAGE_SIZE); /* line_length */
982 OUT_RING (chan, line_count);
983 OUT_RING (chan, 0x00000101);
984 OUT_RING (chan, 0x00000000);
985 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
988 page_count -= line_count;
989 src_offset += (PAGE_SIZE * line_count);
990 dst_offset += (PAGE_SIZE * line_count);
997 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
998 struct ttm_mem_reg *mem)
1000 struct nvkm_mem *old_node = bo->mem.mm_node;
1001 struct nvkm_mem *new_node = mem->mm_node;
1002 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1005 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1006 NV_MEM_ACCESS_RW, &old_node->vma[0]);
1010 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1011 NV_MEM_ACCESS_RW, &old_node->vma[1]);
1013 nvkm_vm_put(&old_node->vma[0]);
1017 nvkm_vm_map(&old_node->vma[0], old_node);
1018 nvkm_vm_map(&old_node->vma[1], new_node);
1023 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1024 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1026 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1027 struct nouveau_channel *chan = drm->ttm.chan;
1028 struct nouveau_cli *cli = (void *)chan->user.client;
1029 struct nouveau_fence *fence;
1032 /* create temporary vmas for the transfer and attach them to the
1033 * old nvkm_mem node, these will get cleaned up after ttm has
1034 * destroyed the ttm_mem_reg
1036 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1037 ret = nouveau_bo_move_prep(drm, bo, new_mem);
1042 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1043 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1045 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1047 ret = nouveau_fence_new(chan, false, &fence);
1049 ret = ttm_bo_move_accel_cleanup(bo,
1053 nouveau_fence_unref(&fence);
1057 mutex_unlock(&cli->mutex);
1062 nouveau_bo_move_init(struct nouveau_drm *drm)
1064 static const struct {
1068 int (*exec)(struct nouveau_channel *,
1069 struct ttm_buffer_object *,
1070 struct ttm_mem_reg *, struct ttm_mem_reg *);
1071 int (*init)(struct nouveau_channel *, u32 handle);
1073 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1074 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1075 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1076 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1077 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1078 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1079 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1080 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1081 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1082 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1083 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1084 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1085 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1086 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1087 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1089 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1090 }, *mthd = _methods;
1091 const char *name = "CPU";
1095 struct nouveau_channel *chan;
1100 chan = drm->channel;
1104 ret = nvif_object_init(&chan->user,
1105 mthd->oclass | (mthd->engine << 16),
1106 mthd->oclass, NULL, 0,
1109 ret = mthd->init(chan, drm->ttm.copy.handle);
1111 nvif_object_fini(&drm->ttm.copy);
1115 drm->ttm.move = mthd->exec;
1116 drm->ttm.chan = chan;
1120 } while ((++mthd)->exec);
1122 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1126 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1127 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1129 struct ttm_place placement_memtype = {
1132 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1134 struct ttm_placement placement;
1135 struct ttm_mem_reg tmp_mem;
1138 placement.num_placement = placement.num_busy_placement = 1;
1139 placement.placement = placement.busy_placement = &placement_memtype;
1142 tmp_mem.mm_node = NULL;
1143 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1147 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1151 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1155 ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
1157 ttm_bo_mem_put(bo, &tmp_mem);
1162 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1163 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1165 struct ttm_place placement_memtype = {
1168 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1170 struct ttm_placement placement;
1171 struct ttm_mem_reg tmp_mem;
1174 placement.num_placement = placement.num_busy_placement = 1;
1175 placement.placement = placement.busy_placement = &placement_memtype;
1178 tmp_mem.mm_node = NULL;
1179 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1183 ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
1187 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1192 ttm_bo_mem_put(bo, &tmp_mem);
1197 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1199 struct nouveau_bo *nvbo = nouveau_bo(bo);
1200 struct nvkm_vma *vma;
1202 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1203 if (bo->destroy != nouveau_bo_del_ttm)
1206 list_for_each_entry(vma, &nvbo->vma_list, head) {
1207 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1208 (new_mem->mem_type == TTM_PL_VRAM ||
1209 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1210 nvkm_vm_map(vma, new_mem->mm_node);
1218 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1219 struct nouveau_drm_tile **new_tile)
1221 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1222 struct drm_device *dev = drm->dev;
1223 struct nouveau_bo *nvbo = nouveau_bo(bo);
1224 u64 offset = new_mem->start << PAGE_SHIFT;
1227 if (new_mem->mem_type != TTM_PL_VRAM)
1230 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1231 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1240 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1241 struct nouveau_drm_tile *new_tile,
1242 struct nouveau_drm_tile **old_tile)
1244 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1245 struct drm_device *dev = drm->dev;
1246 struct fence *fence = reservation_object_get_excl(bo->resv);
1248 nv10_bo_put_tile_region(dev, *old_tile, fence);
1249 *old_tile = new_tile;
1253 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1254 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1256 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1257 struct nouveau_bo *nvbo = nouveau_bo(bo);
1258 struct ttm_mem_reg *old_mem = &bo->mem;
1259 struct nouveau_drm_tile *new_tile = NULL;
1262 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1266 if (nvbo->pin_refcnt)
1267 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1269 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1270 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1276 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1277 BUG_ON(bo->mem.mm_node != NULL);
1279 new_mem->mm_node = NULL;
1283 /* Hardware assisted copy. */
1284 if (drm->ttm.move) {
1285 if (new_mem->mem_type == TTM_PL_SYSTEM)
1286 ret = nouveau_bo_move_flipd(bo, evict, intr,
1287 no_wait_gpu, new_mem);
1288 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1289 ret = nouveau_bo_move_flips(bo, evict, intr,
1290 no_wait_gpu, new_mem);
1292 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1293 no_wait_gpu, new_mem);
1298 /* Fallback to software copy. */
1299 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1301 ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
1304 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1306 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1308 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1315 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1317 struct nouveau_bo *nvbo = nouveau_bo(bo);
1319 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1323 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1325 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1326 struct nouveau_drm *drm = nouveau_bdev(bdev);
1327 struct nvkm_device *device = nvxx_device(&drm->device);
1328 struct nvkm_mem *node = mem->mm_node;
1331 mem->bus.addr = NULL;
1332 mem->bus.offset = 0;
1333 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1335 mem->bus.is_iomem = false;
1336 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1338 switch (mem->mem_type) {
1343 #if IS_ENABLED(CONFIG_AGP)
1344 if (drm->agp.bridge) {
1345 mem->bus.offset = mem->start << PAGE_SHIFT;
1346 mem->bus.base = drm->agp.base;
1347 mem->bus.is_iomem = !drm->agp.cma;
1350 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1353 /* fallthrough, tiled memory */
1355 mem->bus.offset = mem->start << PAGE_SHIFT;
1356 mem->bus.base = device->func->resource_addr(device, 1);
1357 mem->bus.is_iomem = true;
1358 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1359 struct nvkm_bar *bar = nvxx_bar(&drm->device);
1360 int page_shift = 12;
1361 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1362 page_shift = node->page_shift;
1364 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1369 nvkm_vm_map(&node->bar_vma, node);
1370 mem->bus.offset = node->bar_vma.offset;
1380 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1382 struct nvkm_mem *node = mem->mm_node;
1384 if (!node->bar_vma.node)
1387 nvkm_vm_unmap(&node->bar_vma);
1388 nvkm_vm_put(&node->bar_vma);
1392 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1394 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1395 struct nouveau_bo *nvbo = nouveau_bo(bo);
1396 struct nvkm_device *device = nvxx_device(&drm->device);
1397 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1400 /* as long as the bo isn't in vram, and isn't tiled, we've got
1401 * nothing to do here.
1403 if (bo->mem.mem_type != TTM_PL_VRAM) {
1404 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1405 !nouveau_bo_tile_layout(nvbo))
1408 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1409 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1411 ret = nouveau_bo_validate(nvbo, false, false);
1418 /* make sure bo is in mappable vram */
1419 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1420 bo->mem.start + bo->mem.num_pages < mappable)
1423 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1424 nvbo->placements[i].fpfn = 0;
1425 nvbo->placements[i].lpfn = mappable;
1428 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1429 nvbo->busy_placements[i].fpfn = 0;
1430 nvbo->busy_placements[i].lpfn = mappable;
1433 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1434 return nouveau_bo_validate(nvbo, false, false);
1438 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1440 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1441 struct nouveau_drm *drm;
1442 struct nvkm_device *device;
1443 struct drm_device *dev;
1444 struct device *pdev;
1447 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1449 if (ttm->state != tt_unpopulated)
1452 if (slave && ttm->sg) {
1453 /* make userspace faulting work */
1454 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1455 ttm_dma->dma_address, ttm->num_pages);
1456 ttm->state = tt_unbound;
1460 drm = nouveau_bdev(ttm->bdev);
1461 device = nvxx_device(&drm->device);
1465 #if IS_ENABLED(CONFIG_AGP)
1466 if (drm->agp.bridge) {
1467 return ttm_agp_tt_populate(ttm);
1471 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1472 if (swiotlb_nr_tbl()) {
1473 return ttm_dma_populate((void *)ttm, dev->dev);
1477 r = ttm_pool_populate(ttm);
1482 for (i = 0; i < ttm->num_pages; i++) {
1485 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1488 if (dma_mapping_error(pdev, addr)) {
1490 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1491 PAGE_SIZE, DMA_BIDIRECTIONAL);
1492 ttm_dma->dma_address[i] = 0;
1494 ttm_pool_unpopulate(ttm);
1498 ttm_dma->dma_address[i] = addr;
1504 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1506 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1507 struct nouveau_drm *drm;
1508 struct nvkm_device *device;
1509 struct drm_device *dev;
1510 struct device *pdev;
1512 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1517 drm = nouveau_bdev(ttm->bdev);
1518 device = nvxx_device(&drm->device);
1522 #if IS_ENABLED(CONFIG_AGP)
1523 if (drm->agp.bridge) {
1524 ttm_agp_tt_unpopulate(ttm);
1529 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1530 if (swiotlb_nr_tbl()) {
1531 ttm_dma_unpopulate((void *)ttm, dev->dev);
1536 for (i = 0; i < ttm->num_pages; i++) {
1537 if (ttm_dma->dma_address[i]) {
1538 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1543 ttm_pool_unpopulate(ttm);
1547 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1549 struct reservation_object *resv = nvbo->bo.resv;
1552 reservation_object_add_excl_fence(resv, &fence->base);
1554 reservation_object_add_shared_fence(resv, &fence->base);
1557 struct ttm_bo_driver nouveau_bo_driver = {
1558 .ttm_tt_create = &nouveau_ttm_tt_create,
1559 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1560 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1561 .invalidate_caches = nouveau_bo_invalidate_caches,
1562 .init_mem_type = nouveau_bo_init_mem_type,
1563 .evict_flags = nouveau_bo_evict_flags,
1564 .move_notify = nouveau_bo_move_ntfy,
1565 .move = nouveau_bo_move,
1566 .verify_access = nouveau_bo_verify_access,
1567 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1568 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1569 .io_mem_free = &nouveau_ttm_io_mem_free,
1570 .lru_tail = &ttm_bo_default_lru_tail,
1571 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
1575 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1577 struct nvkm_vma *vma;
1578 list_for_each_entry(vma, &nvbo->vma_list, head) {
1587 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1588 struct nvkm_vma *vma)
1590 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1593 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1594 NV_MEM_ACCESS_RW, vma);
1598 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1599 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1600 nvbo->page_shift != vma->vm->mmu->lpg_shift))
1601 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1603 list_add_tail(&vma->head, &nvbo->vma_list);
1609 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1612 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1615 list_del(&vma->head);