1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <linux/sched.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/file.h>
37 #include <drm/drm_cache.h>
38 #include <drm/ttm/ttm_bo_driver.h>
40 #include "ttm_module.h"
42 static unsigned long ttm_pages_limit;
44 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
45 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
47 static unsigned long ttm_dma32_pages_limit;
49 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
50 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
52 static atomic_long_t ttm_pages_allocated;
53 static atomic_long_t ttm_dma32_pages_allocated;
56 * Allocates a ttm structure for the given BO.
58 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
60 struct ttm_device *bdev = bo->bdev;
61 uint32_t page_flags = 0;
63 dma_resv_assert_held(bo->base.resv);
69 case ttm_bo_type_device:
71 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
73 case ttm_bo_type_kernel:
76 page_flags |= TTM_PAGE_FLAG_SG;
79 pr_err("Illegal buffer object type\n");
83 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
84 if (unlikely(bo->ttm == NULL))
91 * Allocates storage for pointers to the pages that back the ttm.
93 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
95 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
96 GFP_KERNEL | __GFP_ZERO);
102 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
104 ttm->pages = kvmalloc_array(ttm->num_pages,
105 sizeof(*ttm->pages) +
106 sizeof(*ttm->dma_address),
107 GFP_KERNEL | __GFP_ZERO);
111 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
115 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
117 ttm->dma_address = kvmalloc_array(ttm->num_pages,
118 sizeof(*ttm->dma_address),
119 GFP_KERNEL | __GFP_ZERO);
120 if (!ttm->dma_address)
125 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
127 bdev->funcs->ttm_tt_destroy(bdev, ttm);
130 static void ttm_tt_init_fields(struct ttm_tt *ttm,
131 struct ttm_buffer_object *bo,
133 enum ttm_caching caching)
135 ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
136 ttm->caching = ttm_cached;
137 ttm->page_flags = page_flags;
138 ttm->dma_address = NULL;
139 ttm->swap_storage = NULL;
141 ttm->caching = caching;
144 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
145 uint32_t page_flags, enum ttm_caching caching)
147 ttm_tt_init_fields(ttm, bo, page_flags, caching);
149 if (ttm_tt_alloc_page_directory(ttm)) {
150 pr_err("Failed allocating page table\n");
155 EXPORT_SYMBOL(ttm_tt_init);
157 void ttm_tt_fini(struct ttm_tt *ttm)
159 WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
161 if (ttm->swap_storage)
162 fput(ttm->swap_storage);
163 ttm->swap_storage = NULL;
168 kvfree(ttm->dma_address);
170 ttm->dma_address = NULL;
172 EXPORT_SYMBOL(ttm_tt_fini);
174 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
175 uint32_t page_flags, enum ttm_caching caching)
179 ttm_tt_init_fields(ttm, bo, page_flags, caching);
181 if (page_flags & TTM_PAGE_FLAG_SG)
182 ret = ttm_sg_tt_alloc_page_directory(ttm);
184 ret = ttm_dma_tt_alloc_page_directory(ttm);
186 pr_err("Failed allocating page table\n");
191 EXPORT_SYMBOL(ttm_sg_tt_init);
193 int ttm_tt_swapin(struct ttm_tt *ttm)
195 struct address_space *swap_space;
196 struct file *swap_storage;
197 struct page *from_page;
198 struct page *to_page;
202 swap_storage = ttm->swap_storage;
203 BUG_ON(swap_storage == NULL);
205 swap_space = swap_storage->f_mapping;
206 gfp_mask = mapping_gfp_mask(swap_space);
208 for (i = 0; i < ttm->num_pages; ++i) {
209 from_page = shmem_read_mapping_page_gfp(swap_space, i,
211 if (IS_ERR(from_page)) {
212 ret = PTR_ERR(from_page);
215 to_page = ttm->pages[i];
216 if (unlikely(to_page == NULL)) {
221 copy_highpage(to_page, from_page);
226 ttm->swap_storage = NULL;
227 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
236 * ttm_tt_swapout - swap out tt object
238 * @bdev: TTM device structure.
239 * @ttm: The struct ttm_tt.
240 * @gfp_flags: Flags to use for memory allocation.
242 * Swapout a TT object to a shmem_file, return number of pages swapped out or
243 * negative error code.
245 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
248 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
249 struct address_space *swap_space;
250 struct file *swap_storage;
251 struct page *from_page;
252 struct page *to_page;
255 swap_storage = shmem_file_setup("ttm swap", size, 0);
256 if (IS_ERR(swap_storage)) {
257 pr_err("Failed allocating swap storage\n");
258 return PTR_ERR(swap_storage);
261 swap_space = swap_storage->f_mapping;
262 gfp_flags &= mapping_gfp_mask(swap_space);
264 for (i = 0; i < ttm->num_pages; ++i) {
265 from_page = ttm->pages[i];
266 if (unlikely(from_page == NULL))
269 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
270 if (IS_ERR(to_page)) {
271 ret = PTR_ERR(to_page);
274 copy_highpage(to_page, from_page);
275 set_page_dirty(to_page);
276 mark_page_accessed(to_page);
280 ttm_tt_unpopulate(bdev, ttm);
281 ttm->swap_storage = swap_storage;
282 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
284 return ttm->num_pages;
292 static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
296 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
299 for (i = 0; i < ttm->num_pages; ++i)
300 ttm->pages[i]->mapping = bdev->dev_mapping;
303 int ttm_tt_populate(struct ttm_device *bdev,
304 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
311 if (ttm_tt_is_populated(ttm))
314 if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
315 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
316 if (bdev->pool.use_dma32)
317 atomic_long_add(ttm->num_pages,
318 &ttm_dma32_pages_allocated);
321 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
322 atomic_long_read(&ttm_dma32_pages_allocated) >
323 ttm_dma32_pages_limit) {
325 ret = ttm_global_swapout(ctx, GFP_KERNEL);
332 if (bdev->funcs->ttm_tt_populate)
333 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
335 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
339 ttm_tt_add_mapping(bdev, ttm);
340 ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
341 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
342 ret = ttm_tt_swapin(ttm);
343 if (unlikely(ret != 0)) {
344 ttm_tt_unpopulate(bdev, ttm);
352 if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
353 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
354 if (bdev->pool.use_dma32)
355 atomic_long_sub(ttm->num_pages,
356 &ttm_dma32_pages_allocated);
360 EXPORT_SYMBOL(ttm_tt_populate);
362 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
365 struct page **page = ttm->pages;
367 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
370 for (i = 0; i < ttm->num_pages; ++i)
371 (*page)->mapping = NULL;
374 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
376 if (!ttm_tt_is_populated(ttm))
379 ttm_tt_clear_mapping(ttm);
380 if (bdev->funcs->ttm_tt_unpopulate)
381 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
383 ttm_pool_free(&bdev->pool, ttm);
385 if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
386 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
387 if (bdev->pool.use_dma32)
388 atomic_long_sub(ttm->num_pages,
389 &ttm_dma32_pages_allocated);
392 ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
395 #ifdef CONFIG_DEBUG_FS
397 /* Test the shrinker functions and dump the result */
398 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
400 struct ttm_operation_ctx ctx = { false, false };
402 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
405 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
411 * ttm_tt_mgr_init - register with the MM shrinker
413 * Register with the MM shrinker for swapping out BOs.
415 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
417 #ifdef CONFIG_DEBUG_FS
418 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
419 &ttm_tt_debugfs_shrink_fops);
422 if (!ttm_pages_limit)
423 ttm_pages_limit = num_pages;
425 if (!ttm_dma32_pages_limit)
426 ttm_dma32_pages_limit = num_dma32_pages;
429 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
430 struct dma_buf_map *dmap,
433 struct ttm_kmap_iter_tt *iter_tt =
434 container_of(iter, typeof(*iter_tt), base);
436 dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
440 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
441 struct dma_buf_map *map)
443 kunmap_local(map->vaddr);
446 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
447 .map_local = ttm_kmap_iter_tt_map_local,
448 .unmap_local = ttm_kmap_iter_tt_unmap_local,
453 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
454 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
455 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
457 * Return: Pointer to the embedded struct ttm_kmap_iter.
459 struct ttm_kmap_iter *
460 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
463 iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
466 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
468 iter_tt->prot = PAGE_KERNEL;
470 return &iter_tt->base;
472 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);