1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
33 static const struct ttm_place vram_placement_flags = {
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
39 static const struct ttm_place vram_ne_placement_flags = {
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
45 static const struct ttm_place sys_placement_flags = {
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
51 static const struct ttm_place sys_ne_placement_flags = {
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
57 static const struct ttm_place gmr_placement_flags = {
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
63 static const struct ttm_place gmr_ne_placement_flags = {
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
69 static const struct ttm_place mob_placement_flags = {
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
75 static const struct ttm_place mob_ne_placement_flags = {
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
81 struct ttm_placement vmw_vram_placement = {
83 .placement = &vram_placement_flags,
84 .num_busy_placement = 1,
85 .busy_placement = &vram_placement_flags
88 static const struct ttm_place vram_gmr_placement_flags[] = {
92 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
96 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
100 static const struct ttm_place gmr_vram_placement_flags[] = {
104 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
108 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
112 struct ttm_placement vmw_vram_gmr_placement = {
114 .placement = vram_gmr_placement_flags,
115 .num_busy_placement = 1,
116 .busy_placement = &gmr_placement_flags
119 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
123 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
128 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
133 struct ttm_placement vmw_vram_gmr_ne_placement = {
135 .placement = vram_gmr_ne_placement_flags,
136 .num_busy_placement = 1,
137 .busy_placement = &gmr_ne_placement_flags
140 struct ttm_placement vmw_vram_sys_placement = {
142 .placement = &vram_placement_flags,
143 .num_busy_placement = 1,
144 .busy_placement = &sys_placement_flags
147 struct ttm_placement vmw_vram_ne_placement = {
149 .placement = &vram_ne_placement_flags,
150 .num_busy_placement = 1,
151 .busy_placement = &vram_ne_placement_flags
154 struct ttm_placement vmw_sys_placement = {
156 .placement = &sys_placement_flags,
157 .num_busy_placement = 1,
158 .busy_placement = &sys_placement_flags
161 struct ttm_placement vmw_sys_ne_placement = {
163 .placement = &sys_ne_placement_flags,
164 .num_busy_placement = 1,
165 .busy_placement = &sys_ne_placement_flags
168 static const struct ttm_place evictable_placement_flags[] = {
172 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
176 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
180 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
184 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
188 static const struct ttm_place nonfixed_placement_flags[] = {
192 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
196 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
200 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
204 struct ttm_placement vmw_evictable_placement = {
206 .placement = evictable_placement_flags,
207 .num_busy_placement = 1,
208 .busy_placement = &sys_placement_flags
211 struct ttm_placement vmw_srf_placement = {
213 .num_busy_placement = 2,
214 .placement = &gmr_placement_flags,
215 .busy_placement = gmr_vram_placement_flags
218 struct ttm_placement vmw_mob_placement = {
220 .num_busy_placement = 1,
221 .placement = &mob_placement_flags,
222 .busy_placement = &mob_placement_flags
225 struct ttm_placement vmw_mob_ne_placement = {
227 .num_busy_placement = 1,
228 .placement = &mob_ne_placement_flags,
229 .busy_placement = &mob_ne_placement_flags
232 struct ttm_placement vmw_nonfixed_placement = {
234 .placement = nonfixed_placement_flags,
235 .num_busy_placement = 1,
236 .busy_placement = &sys_placement_flags
240 struct ttm_dma_tt dma_ttm;
241 struct vmw_private *dev_priv;
246 struct vmw_sg_table vsgt;
247 uint64_t sg_alloc_size;
251 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
254 * Helper functions to advance a struct vmw_piter iterator.
256 * @viter: Pointer to the iterator.
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
262 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
264 return ++(viter->i) < viter->num_pages;
267 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
269 bool ret = __vmw_piter_non_sg_next(viter);
271 return __sg_page_iter_dma_next(&viter->iter) && ret;
276 * Helper functions to return a pointer to the current page.
278 * @viter: Pointer to the iterator
280 * These functions return a pointer to the page currently
281 * pointed to by @viter. Functions are selected depending on the
282 * current mapping mode.
284 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
286 return viter->pages[viter->i];
290 * Helper functions to return the DMA address of the current page.
292 * @viter: Pointer to the iterator
294 * These functions return the DMA address of the page currently
295 * pointed to by @viter. Functions are selected depending on the
296 * current mapping mode.
298 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
300 return page_to_phys(viter->pages[viter->i]);
303 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
305 return viter->addrs[viter->i];
308 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
310 return sg_page_iter_dma_address(&viter->iter);
315 * vmw_piter_start - Initialize a struct vmw_piter.
317 * @viter: Pointer to the iterator to initialize
318 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
320 * Note that we're following the convention of __sg_page_iter_start, so that
321 * the iterator doesn't point to a valid page after initialization; it has
322 * to be advanced one step first.
324 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
325 unsigned long p_offset)
327 viter->i = p_offset - 1;
328 viter->num_pages = vsgt->num_pages;
329 viter->page = &__vmw_piter_non_sg_page;
330 viter->pages = vsgt->pages;
331 switch (vsgt->mode) {
333 viter->next = &__vmw_piter_non_sg_next;
334 viter->dma_address = &__vmw_piter_phys_addr;
336 case vmw_dma_alloc_coherent:
337 viter->next = &__vmw_piter_non_sg_next;
338 viter->dma_address = &__vmw_piter_dma_addr;
339 viter->addrs = vsgt->addrs;
341 case vmw_dma_map_populate:
342 case vmw_dma_map_bind:
343 viter->next = &__vmw_piter_sg_next;
344 viter->dma_address = &__vmw_piter_sg_addr;
345 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
346 vsgt->sgt->orig_nents, p_offset);
354 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
357 * @vmw_tt: Pointer to a struct vmw_ttm_backend
359 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
361 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
363 struct device *dev = vmw_tt->dev_priv->dev->dev;
365 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
367 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
371 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
373 * @vmw_tt: Pointer to a struct vmw_ttm_backend
375 * This function is used to get device addresses from the kernel DMA layer.
376 * However, it's violating the DMA API in that when this operation has been
377 * performed, it's illegal for the CPU to write to the pages without first
378 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379 * therefore only legal to call this function if we know that the function
380 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381 * a CPU write buffer flush.
383 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
385 struct device *dev = vmw_tt->dev_priv->dev->dev;
388 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
390 if (unlikely(ret == 0))
393 vmw_tt->sgt.nents = ret;
399 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
401 * @vmw_tt: Pointer to a struct vmw_ttm_tt
403 * Select the correct function for and make sure the TTM pages are
404 * visible to the device. Allocate storage for the device mappings.
405 * If a mapping has already been performed, indicated by the storage
406 * pointer being non NULL, the function returns success.
408 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
410 struct vmw_private *dev_priv = vmw_tt->dev_priv;
411 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
412 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
413 struct ttm_operation_ctx ctx = {
414 .interruptible = true,
417 struct vmw_piter iter;
420 static size_t sgl_size;
421 static size_t sgt_size;
426 vsgt->mode = dev_priv->map_mode;
427 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
428 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
429 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
430 vsgt->sgt = &vmw_tt->sgt;
432 switch (dev_priv->map_mode) {
433 case vmw_dma_map_bind:
434 case vmw_dma_map_populate:
435 if (unlikely(!sgl_size)) {
436 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
437 sgt_size = ttm_round_pot(sizeof(struct sg_table));
439 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
440 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
441 if (unlikely(ret != 0))
444 ret = __sg_alloc_table_from_pages
445 (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
446 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
447 dma_get_max_seg_size(dev_priv->dev->dev),
449 if (unlikely(ret != 0))
450 goto out_sg_alloc_fail;
452 if (vsgt->num_pages > vmw_tt->sgt.nents) {
453 uint64_t over_alloc =
454 sgl_size * (vsgt->num_pages -
457 ttm_mem_global_free(glob, over_alloc);
458 vmw_tt->sg_alloc_size -= over_alloc;
461 ret = vmw_ttm_map_for_dma(vmw_tt);
462 if (unlikely(ret != 0))
470 old = ~((dma_addr_t) 0);
471 vmw_tt->vsgt.num_regions = 0;
472 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
473 dma_addr_t cur = vmw_piter_dma_addr(&iter);
475 if (cur != old + PAGE_SIZE)
476 vmw_tt->vsgt.num_regions++;
480 vmw_tt->mapped = true;
484 sg_free_table(vmw_tt->vsgt.sgt);
485 vmw_tt->vsgt.sgt = NULL;
487 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
492 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
494 * @vmw_tt: Pointer to a struct vmw_ttm_tt
496 * Tear down any previously set up device DMA mappings and free
497 * any storage space allocated for them. If there are no mappings set up,
498 * this function is a NOP.
500 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
502 struct vmw_private *dev_priv = vmw_tt->dev_priv;
504 if (!vmw_tt->vsgt.sgt)
507 switch (dev_priv->map_mode) {
508 case vmw_dma_map_bind:
509 case vmw_dma_map_populate:
510 vmw_ttm_unmap_from_dma(vmw_tt);
511 sg_free_table(vmw_tt->vsgt.sgt);
512 vmw_tt->vsgt.sgt = NULL;
513 ttm_mem_global_free(vmw_mem_glob(dev_priv),
514 vmw_tt->sg_alloc_size);
519 vmw_tt->mapped = false;
523 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
526 * @bo: Pointer to a struct ttm_buffer_object
528 * Returns a pointer to a struct vmw_sg_table object. The object should
529 * not be freed after use.
530 * Note that for the device addresses to be valid, the buffer object must
531 * either be reserved or pinned.
533 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
535 struct vmw_ttm_tt *vmw_tt =
536 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
538 return &vmw_tt->vsgt;
542 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
544 struct vmw_ttm_tt *vmw_be =
545 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
548 ret = vmw_ttm_map_dma(vmw_be);
549 if (unlikely(ret != 0))
552 vmw_be->gmr_id = bo_mem->start;
553 vmw_be->mem_type = bo_mem->mem_type;
555 switch (bo_mem->mem_type) {
557 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
558 ttm->num_pages, vmw_be->gmr_id);
560 if (unlikely(vmw_be->mob == NULL)) {
562 vmw_mob_create(ttm->num_pages);
563 if (unlikely(vmw_be->mob == NULL))
567 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
568 &vmw_be->vsgt, ttm->num_pages,
576 static void vmw_ttm_unbind(struct ttm_tt *ttm)
578 struct vmw_ttm_tt *vmw_be =
579 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
581 switch (vmw_be->mem_type) {
583 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
586 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
592 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
593 vmw_ttm_unmap_dma(vmw_be);
597 static void vmw_ttm_destroy(struct ttm_tt *ttm)
599 struct vmw_ttm_tt *vmw_be =
600 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
602 vmw_ttm_unmap_dma(vmw_be);
603 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
604 ttm_dma_tt_fini(&vmw_be->dma_ttm);
609 vmw_mob_destroy(vmw_be->mob);
615 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
617 struct vmw_ttm_tt *vmw_tt =
618 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
619 struct vmw_private *dev_priv = vmw_tt->dev_priv;
620 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
623 if (ttm->state != tt_unpopulated)
626 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
628 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
629 ret = ttm_mem_global_alloc(glob, size, ctx);
630 if (unlikely(ret != 0))
633 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
635 if (unlikely(ret != 0))
636 ttm_mem_global_free(glob, size);
638 ret = ttm_pool_populate(ttm, ctx);
643 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
645 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
647 struct vmw_private *dev_priv = vmw_tt->dev_priv;
648 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
652 vmw_mob_destroy(vmw_tt->mob);
656 vmw_ttm_unmap_dma(vmw_tt);
657 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
659 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
661 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
662 ttm_mem_global_free(glob, size);
664 ttm_pool_unpopulate(ttm);
667 static struct ttm_backend_func vmw_ttm_func = {
668 .bind = vmw_ttm_bind,
669 .unbind = vmw_ttm_unbind,
670 .destroy = vmw_ttm_destroy,
673 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
676 struct vmw_ttm_tt *vmw_be;
679 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
683 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
684 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
687 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
688 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
690 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
691 if (unlikely(ret != 0))
694 return &vmw_be->dma_ttm.ttm;
700 static void vmw_evict_flags(struct ttm_buffer_object *bo,
701 struct ttm_placement *placement)
703 *placement = vmw_sys_placement;
706 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
708 struct ttm_object_file *tfile =
709 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
711 return vmw_user_bo_verify_access(bo, tfile);
714 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
716 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
718 switch (mem->mem_type) {
724 mem->bus.offset = mem->start << PAGE_SHIFT;
725 mem->bus.base = dev_priv->vram_start;
726 mem->bus.is_iomem = true;
735 * vmw_move_notify - TTM move_notify_callback
737 * @bo: The TTM buffer object about to move.
738 * @mem: The struct ttm_resource indicating to what memory
739 * region the move is taking place.
741 * Calls move_notify for all subsystems needing it.
742 * (currently only resources).
744 static void vmw_move_notify(struct ttm_buffer_object *bo,
746 struct ttm_resource *mem)
748 vmw_bo_move_notify(bo, mem);
749 vmw_query_move_notify(bo, mem);
754 * vmw_swap_notify - TTM move_notify_callback
756 * @bo: The TTM buffer object about to be swapped out.
758 static void vmw_swap_notify(struct ttm_buffer_object *bo)
760 vmw_bo_swap_notify(bo);
761 (void) ttm_bo_wait(bo, false, false);
765 struct ttm_bo_driver vmw_bo_driver = {
766 .ttm_tt_create = &vmw_ttm_tt_create,
767 .ttm_tt_populate = &vmw_ttm_populate,
768 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
769 .eviction_valuable = ttm_bo_eviction_valuable,
770 .evict_flags = vmw_evict_flags,
772 .verify_access = vmw_verify_access,
773 .move_notify = vmw_move_notify,
774 .swap_notify = vmw_swap_notify,
775 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
778 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
779 unsigned long bo_size,
780 struct ttm_buffer_object **bo_p)
782 struct ttm_operation_ctx ctx = {
783 .interruptible = false,
786 struct ttm_buffer_object *bo;
789 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
791 &vmw_sys_ne_placement,
794 if (unlikely(ret != 0))
797 ret = ttm_bo_reserve(bo, false, true, NULL);
799 ret = vmw_ttm_populate(bo->ttm, &ctx);
800 if (likely(ret == 0)) {
801 struct vmw_ttm_tt *vmw_tt =
802 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
803 ret = vmw_ttm_map_dma(vmw_tt);
806 ttm_bo_unreserve(bo);
808 if (likely(ret == 0))