1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
43 struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
48 int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo,
49 struct ttm_operation_ctx *ctx,
50 struct ttm_resource *new_mem)
54 if (new_mem->mem_type == TTM_PL_SYSTEM)
57 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
58 if (unlikely(ret != 0))
61 ret = ttm_bo_tt_bind(bo, new_mem);
62 if (unlikely(ret != 0))
67 EXPORT_SYMBOL(ttm_bo_move_to_new_tt_mem);
69 static int ttm_bo_move_to_system(struct ttm_buffer_object *bo,
70 struct ttm_operation_ctx *ctx)
72 struct ttm_resource *old_mem = &bo->mem;
75 if (old_mem->mem_type == TTM_PL_SYSTEM)
78 ret = ttm_bo_wait_ctx(bo, ctx);
79 if (unlikely(ret != 0)) {
80 if (ret != -ERESTARTSYS)
81 pr_err("Failed to expire sync object before unbinding TTM\n");
86 ttm_resource_free(bo, &bo->mem);
87 old_mem->mem_type = TTM_PL_SYSTEM;
91 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
92 struct ttm_operation_ctx *ctx,
93 struct ttm_resource *new_mem)
97 ret = ttm_bo_move_to_system(bo, ctx);
98 if (unlikely(ret != 0))
101 ret = ttm_bo_move_to_new_tt_mem(bo, ctx, new_mem);
102 if (unlikely(ret != 0))
105 ttm_bo_assign_mem(bo, new_mem);
108 EXPORT_SYMBOL(ttm_bo_move_ttm);
110 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
111 struct ttm_resource *mem)
113 if (mem->bus.offset || mem->bus.addr)
116 mem->bus.is_iomem = false;
117 if (!bdev->driver->io_mem_reserve)
120 return bdev->driver->io_mem_reserve(bdev, mem);
123 void ttm_mem_io_free(struct ttm_bo_device *bdev,
124 struct ttm_resource *mem)
126 if (!mem->bus.offset && !mem->bus.addr)
129 if (bdev->driver->io_mem_free)
130 bdev->driver->io_mem_free(bdev, mem);
133 mem->bus.addr = NULL;
136 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
137 struct ttm_resource *mem,
144 ret = ttm_mem_io_reserve(bdev, mem);
145 if (ret || !mem->bus.is_iomem)
149 addr = mem->bus.addr;
151 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
153 if (mem->bus.caching == ttm_write_combined)
154 addr = ioremap_wc(mem->bus.offset, bus_size);
156 addr = ioremap(mem->bus.offset, bus_size);
158 ttm_mem_io_free(bdev, mem);
166 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
167 struct ttm_resource *mem,
170 if (virtual && mem->bus.addr == NULL)
172 ttm_mem_io_free(bdev, mem);
175 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
178 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
180 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
183 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
184 iowrite32(ioread32(srcP++), dstP++);
188 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
192 struct page *d = ttm->pages[page];
198 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
199 dst = kmap_atomic_prot(d, prot);
203 memcpy_fromio(dst, src, PAGE_SIZE);
210 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
214 struct page *s = ttm->pages[page];
220 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
221 src = kmap_atomic_prot(s, prot);
225 memcpy_toio(dst, src, PAGE_SIZE);
232 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
233 struct ttm_operation_ctx *ctx,
234 struct ttm_resource *new_mem)
236 struct ttm_bo_device *bdev = bo->bdev;
237 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
238 struct ttm_tt *ttm = bo->ttm;
239 struct ttm_resource *old_mem = &bo->mem;
240 struct ttm_resource old_copy = *old_mem;
246 unsigned long add = 0;
249 ret = ttm_bo_wait_ctx(bo, ctx);
253 ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
256 ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
261 * Single TTM move. NOP.
263 if (old_iomap == NULL && new_iomap == NULL)
267 * Don't move nonexistent data. Clear destination instead.
269 if (old_iomap == NULL &&
270 (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
271 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
272 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
277 * TTM might be null for moves within the same region.
280 ret = ttm_tt_populate(bdev, ttm, ctx);
288 if ((old_mem->mem_type == new_mem->mem_type) &&
289 (new_mem->start < old_mem->start + old_mem->size)) {
291 add = new_mem->num_pages - 1;
294 for (i = 0; i < new_mem->num_pages; ++i) {
295 page = i * dir + add;
296 if (old_iomap == NULL) {
297 pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
298 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
300 } else if (new_iomap == NULL) {
301 pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
302 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
305 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
314 ttm_bo_assign_mem(bo, new_mem);
317 ttm_bo_tt_destroy(bo);
320 ttm_resource_iounmap(bdev, old_mem, new_iomap);
322 ttm_resource_iounmap(bdev, &old_copy, old_iomap);
325 * On error, keep the mm node!
328 ttm_resource_free(bo, &old_copy);
331 EXPORT_SYMBOL(ttm_bo_move_memcpy);
333 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
335 struct ttm_transfer_obj *fbo;
337 fbo = container_of(bo, struct ttm_transfer_obj, base);
343 * ttm_buffer_object_transfer
345 * @bo: A pointer to a struct ttm_buffer_object.
346 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
347 * holding the data of @bo with the old placement.
349 * This is a utility function that may be called after an accelerated move
350 * has been scheduled. A new buffer object is created as a placeholder for
351 * the old data while it's being copied. When that buffer object is idle,
352 * it can be destroyed, releasing the space of the old placement.
357 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
358 struct ttm_buffer_object **new_obj)
360 struct ttm_transfer_obj *fbo;
363 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
373 * Fix up members that we shouldn't copy directly:
374 * TODO: Explicit member copy would probably be better here.
377 atomic_inc(&ttm_bo_glob.bo_count);
378 INIT_LIST_HEAD(&fbo->base.ddestroy);
379 INIT_LIST_HEAD(&fbo->base.lru);
380 INIT_LIST_HEAD(&fbo->base.swap);
381 fbo->base.moving = NULL;
382 drm_vma_node_reset(&fbo->base.base.vma_node);
384 kref_init(&fbo->base.kref);
385 fbo->base.destroy = &ttm_transfered_destroy;
386 fbo->base.acc_size = 0;
387 fbo->base.pin_count = 1;
388 if (bo->type != ttm_bo_type_sg)
389 fbo->base.base.resv = &fbo->base.base._resv;
391 dma_resv_init(&fbo->base.base._resv);
392 fbo->base.base.dev = NULL;
393 ret = dma_resv_trylock(&fbo->base.base._resv);
396 *new_obj = &fbo->base;
400 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
403 struct ttm_resource_manager *man;
404 enum ttm_caching caching;
406 man = ttm_manager_type(bo->bdev, res->mem_type);
407 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
409 /* Cached mappings need no adjustment */
410 if (caching == ttm_cached)
413 #if defined(__i386__) || defined(__x86_64__)
414 if (caching == ttm_write_combined)
415 tmp = pgprot_writecombine(tmp);
416 else if (boot_cpu_data.x86 > 3)
417 tmp = pgprot_noncached(tmp);
419 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
420 defined(__powerpc__) || defined(__mips__)
421 if (caching == ttm_write_combined)
422 tmp = pgprot_writecombine(tmp);
424 tmp = pgprot_noncached(tmp);
426 #if defined(__sparc__)
427 tmp = pgprot_noncached(tmp);
431 EXPORT_SYMBOL(ttm_io_prot);
433 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
434 unsigned long offset,
436 struct ttm_bo_kmap_obj *map)
438 struct ttm_resource *mem = &bo->mem;
440 if (bo->mem.bus.addr) {
441 map->bo_kmap_type = ttm_bo_map_premapped;
442 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
444 map->bo_kmap_type = ttm_bo_map_iomap;
445 if (mem->bus.caching == ttm_write_combined)
446 map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
449 map->virtual = ioremap(bo->mem.bus.offset + offset,
452 return (!map->virtual) ? -ENOMEM : 0;
455 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
456 unsigned long start_page,
457 unsigned long num_pages,
458 struct ttm_bo_kmap_obj *map)
460 struct ttm_resource *mem = &bo->mem;
461 struct ttm_operation_ctx ctx = {
462 .interruptible = false,
465 struct ttm_tt *ttm = bo->ttm;
471 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
475 if (num_pages == 1 && ttm->caching == ttm_cached) {
477 * We're mapping a single page, and the desired
478 * page protection is consistent with the bo.
481 map->bo_kmap_type = ttm_bo_map_kmap;
482 map->page = ttm->pages[start_page];
483 map->virtual = kmap(map->page);
486 * We need to use vmap to get the desired page protection
487 * or to make the buffer object look contiguous.
489 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
490 map->bo_kmap_type = ttm_bo_map_vmap;
491 map->virtual = vmap(ttm->pages + start_page, num_pages,
494 return (!map->virtual) ? -ENOMEM : 0;
497 int ttm_bo_kmap(struct ttm_buffer_object *bo,
498 unsigned long start_page, unsigned long num_pages,
499 struct ttm_bo_kmap_obj *map)
501 unsigned long offset, size;
506 if (num_pages > bo->num_pages)
508 if (start_page > bo->num_pages)
511 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
514 if (!bo->mem.bus.is_iomem) {
515 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
517 offset = start_page << PAGE_SHIFT;
518 size = num_pages << PAGE_SHIFT;
519 return ttm_bo_ioremap(bo, offset, size, map);
522 EXPORT_SYMBOL(ttm_bo_kmap);
524 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
528 switch (map->bo_kmap_type) {
529 case ttm_bo_map_iomap:
530 iounmap(map->virtual);
532 case ttm_bo_map_vmap:
533 vunmap(map->virtual);
535 case ttm_bo_map_kmap:
538 case ttm_bo_map_premapped:
543 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
547 EXPORT_SYMBOL(ttm_bo_kunmap);
549 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
553 ret = ttm_bo_wait(bo, false, false);
558 ttm_bo_tt_destroy(bo);
559 ttm_resource_free(bo, &bo->mem);
563 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
564 struct dma_fence *fence,
567 struct ttm_buffer_object *ghost_obj;
571 * This should help pipeline ordinary buffer moves.
573 * Hang old buffer memory on a new buffer object,
574 * and leave it to be released when the GPU
575 * operation has completed.
578 dma_fence_put(bo->moving);
579 bo->moving = dma_fence_get(fence);
581 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
585 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
588 * If we're not moving to fixed memory, the TTM object
589 * needs to stay alive. Otherwhise hang it on the ghost
590 * bo to be unbound and destroyed.
594 ghost_obj->ttm = NULL;
598 dma_resv_unlock(&ghost_obj->base._resv);
599 ttm_bo_put(ghost_obj);
603 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
604 struct dma_fence *fence)
606 struct ttm_bo_device *bdev = bo->bdev;
607 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
610 * BO doesn't have a TTM we need to bind/unbind. Just remember
611 * this eviction and free up the allocation
613 spin_lock(&from->move_lock);
614 if (!from->move || dma_fence_is_later(fence, from->move)) {
615 dma_fence_put(from->move);
616 from->move = dma_fence_get(fence);
618 spin_unlock(&from->move_lock);
620 ttm_resource_free(bo, &bo->mem);
622 dma_fence_put(bo->moving);
623 bo->moving = dma_fence_get(fence);
626 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
627 struct dma_fence *fence,
630 struct ttm_resource *new_mem)
632 struct ttm_bo_device *bdev = bo->bdev;
633 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
634 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
637 dma_resv_add_excl_fence(bo->base.resv, fence);
639 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
640 else if (!from->use_tt && pipeline)
641 ttm_bo_move_pipeline_evict(bo, fence);
643 ret = ttm_bo_wait_free_node(bo, man->use_tt);
648 ttm_bo_assign_mem(bo, new_mem);
652 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
654 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
656 struct ttm_buffer_object *ghost;
659 ret = ttm_buffer_object_transfer(bo, &ghost);
663 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
664 /* Last resort, wait for the BO to be idle when we are OOM */
666 ttm_bo_wait(bo, false, false);
668 memset(&bo->mem, 0, sizeof(bo->mem));
669 bo->mem.mem_type = TTM_PL_SYSTEM;
672 dma_resv_unlock(&ghost->base._resv);