1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * Copyright 2020 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Christian König
28 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
32 #include <drm/ttm/ttm_device.h>
33 #include <drm/ttm/ttm_tt.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_bo_api.h>
37 #include "ttm_module.h"
40 * ttm_global_mutex - protecting the global state
42 static DEFINE_MUTEX(ttm_global_mutex);
43 static unsigned ttm_glob_use_count;
44 struct ttm_global ttm_glob;
45 EXPORT_SYMBOL(ttm_glob);
47 struct dentry *ttm_debugfs_root;
49 static void ttm_global_release(void)
51 struct ttm_global *glob = &ttm_glob;
53 mutex_lock(&ttm_global_mutex);
54 if (--ttm_glob_use_count > 0)
58 debugfs_remove(ttm_debugfs_root);
60 __free_page(glob->dummy_read_page);
61 memset(glob, 0, sizeof(*glob));
63 mutex_unlock(&ttm_global_mutex);
66 static int ttm_global_init(void)
68 struct ttm_global *glob = &ttm_glob;
69 unsigned long num_pages, num_dma32;
73 mutex_lock(&ttm_global_mutex);
74 if (++ttm_glob_use_count > 1)
79 ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
80 if (IS_ERR(ttm_debugfs_root)) {
81 ttm_debugfs_root = NULL;
84 /* Limit the number of pages in the pool to about 50% of the total
87 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
90 /* But for DMA32 we limit ourself to only use 2GiB maximum. */
91 num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
93 num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
95 ttm_pool_mgr_init(num_pages);
96 ttm_tt_mgr_init(num_pages, num_dma32);
98 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
100 if (unlikely(glob->dummy_read_page == NULL)) {
105 INIT_LIST_HEAD(&glob->device_list);
106 atomic_set(&glob->bo_count, 0);
108 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
111 if (ret && ttm_debugfs_root)
112 debugfs_remove(ttm_debugfs_root);
114 --ttm_glob_use_count;
115 mutex_unlock(&ttm_global_mutex);
120 * A buffer object shrink method that tries to swap out the first
121 * buffer object on the global::swap_lru list.
123 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
125 struct ttm_global *glob = &ttm_glob;
126 struct ttm_device *bdev;
129 mutex_lock(&ttm_global_mutex);
130 list_for_each_entry(bdev, &glob->device_list, device_list) {
131 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
133 list_move_tail(&bdev->device_list, &glob->device_list);
137 mutex_unlock(&ttm_global_mutex);
140 EXPORT_SYMBOL(ttm_global_swapout);
142 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
145 struct ttm_resource_cursor cursor;
146 struct ttm_resource_manager *man;
147 struct ttm_resource *res;
151 spin_lock(&bdev->lru_lock);
152 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
153 man = ttm_manager_type(bdev, i);
154 if (!man || !man->use_tt)
157 ttm_resource_manager_for_each_res(man, &cursor, res) {
158 struct ttm_buffer_object *bo = res->bo;
164 num_pages = PFN_UP(bo->base.size);
165 ret = ttm_bo_swapout(bo, ctx, gfp_flags);
166 /* ttm_bo_swapout has dropped the lru_lock */
173 spin_unlock(&bdev->lru_lock);
176 EXPORT_SYMBOL(ttm_device_swapout);
178 static void ttm_device_delayed_workqueue(struct work_struct *work)
180 struct ttm_device *bdev =
181 container_of(work, struct ttm_device, wq.work);
183 if (!ttm_bo_delayed_delete(bdev, false))
184 schedule_delayed_work(&bdev->wq,
185 ((HZ / 100) < 1) ? 1 : HZ / 100);
191 * @bdev: A pointer to a struct ttm_device to initialize.
192 * @funcs: Function table for the device.
193 * @dev: The core kernel device pointer for DMA mappings and allocations.
194 * @mapping: The address space to use for this bo.
195 * @vma_manager: A pointer to a vma manager.
196 * @use_dma_alloc: If coherent DMA allocation API should be used.
197 * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
199 * Initializes a struct ttm_device:
203 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
204 struct device *dev, struct address_space *mapping,
205 struct drm_vma_offset_manager *vma_manager,
206 bool use_dma_alloc, bool use_dma32)
208 struct ttm_global *glob = &ttm_glob;
211 if (WARN_ON(vma_manager == NULL))
214 ret = ttm_global_init();
220 ttm_sys_man_init(bdev);
221 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
223 bdev->vma_manager = vma_manager;
224 INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
225 spin_lock_init(&bdev->lru_lock);
226 INIT_LIST_HEAD(&bdev->ddestroy);
227 INIT_LIST_HEAD(&bdev->pinned);
228 bdev->dev_mapping = mapping;
229 mutex_lock(&ttm_global_mutex);
230 list_add_tail(&bdev->device_list, &glob->device_list);
231 mutex_unlock(&ttm_global_mutex);
235 EXPORT_SYMBOL(ttm_device_init);
237 void ttm_device_fini(struct ttm_device *bdev)
239 struct ttm_resource_manager *man;
242 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
243 ttm_resource_manager_set_used(man, false);
244 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
246 mutex_lock(&ttm_global_mutex);
247 list_del(&bdev->device_list);
248 mutex_unlock(&ttm_global_mutex);
250 cancel_delayed_work_sync(&bdev->wq);
252 if (ttm_bo_delayed_delete(bdev, true))
253 pr_debug("Delayed destroy list was clean\n");
255 spin_lock(&bdev->lru_lock);
256 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
257 if (list_empty(&man->lru[0]))
258 pr_debug("Swap list %d was clean\n", i);
259 spin_unlock(&bdev->lru_lock);
261 ttm_pool_fini(&bdev->pool);
262 ttm_global_release();
264 EXPORT_SYMBOL(ttm_device_fini);
266 static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
267 struct list_head *list)
269 struct ttm_resource *res;
271 spin_lock(&bdev->lru_lock);
272 while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
273 struct ttm_buffer_object *bo = res->bo;
275 /* Take ref against racing releases once lru_lock is unlocked */
276 if (!ttm_bo_get_unless_zero(bo))
279 list_del_init(&res->lru);
280 spin_unlock(&bdev->lru_lock);
283 ttm_tt_unpopulate(bo->bdev, bo->ttm);
286 spin_lock(&bdev->lru_lock);
288 spin_unlock(&bdev->lru_lock);
291 void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
293 struct ttm_resource_manager *man;
296 ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
298 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
299 man = ttm_manager_type(bdev, i);
300 if (!man || !man->use_tt)
303 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
304 ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
307 EXPORT_SYMBOL(ttm_device_clear_dma_mappings);