1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
31 static void drm_ttm_ipi_handler(void *null)
36 static void drm_ttm_cache_flush(void)
38 if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
39 DRM_ERROR("Timed out waiting for drm cache flush.\n");
44 * Use kmalloc if possible. Otherwise fall back to vmalloc.
47 static void ttm_alloc_pages(drm_ttm_t *ttm)
49 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
52 if (drm_alloc_memctl(size))
55 if (size <= PAGE_SIZE) {
56 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
59 ttm->pages = vmalloc_user(size);
61 ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
64 drm_free_memctl(size);
68 static void ttm_free_pages(drm_ttm_t *ttm)
70 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
72 if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
74 ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
76 drm_free(ttm->pages, size, DRM_MEM_TTM);
78 drm_free_memctl(size);
83 * Unmap all vma pages from vmas mapping this ttm.
86 static int unmap_vma_pages(drm_ttm_t * ttm)
88 drm_device_t *dev = ttm->dev;
89 loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
90 loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
92 #ifdef DRM_ODD_MM_COMPAT
94 ret = drm_ttm_lock_mm(ttm);
98 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
99 #ifdef DRM_ODD_MM_COMPAT
100 drm_ttm_finish_unmap(ttm);
106 * Change caching policy for the linear kernel map
107 * for range of pages in a ttm.
110 static int drm_set_caching(drm_ttm_t * ttm, int noncached)
113 struct page **cur_page;
116 if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
120 drm_ttm_cache_flush();
122 for (i = 0; i < ttm->num_pages; ++i) {
123 cur_page = ttm->pages + i;
125 if (!PageHighMem(*cur_page)) {
127 map_page_into_agp(*cur_page);
129 unmap_page_from_agp(*cur_page);
136 flush_agp_mappings();
138 DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
144 * Free all resources associated with a ttm.
147 int drm_destroy_ttm(drm_ttm_t * ttm)
151 struct page **cur_page;
152 drm_ttm_backend_t *be;
157 if (atomic_read(&ttm->vma_count) > 0) {
159 DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
163 DRM_DEBUG("Destroying a ttm\n");
165 #ifdef DRM_TTM_ODD_COMPAT
166 BUG_ON(!list_empty(&ttm->vma_list));
167 BUG_ON(!list_empty(&ttm->p_mm_list));
176 drm_buffer_manager_t *bm = &ttm->dev->bm;
177 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
178 drm_set_caching(ttm, 0);
180 for (i = 0; i < ttm->num_pages; ++i) {
181 cur_page = ttm->pages + i;
183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
184 unlock_page(*cur_page);
186 ClearPageReserved(*cur_page);
188 if (page_count(*cur_page) != 1) {
189 DRM_ERROR("Erroneous page count. "
192 if (page_mapped(*cur_page)) {
193 DRM_ERROR("Erroneous map count. "
194 "Leaking page mappings.\n");
201 __free_page(*cur_page);
202 drm_free_memctl(PAGE_SIZE);
209 drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
213 static int drm_ttm_populate(drm_ttm_t * ttm)
217 drm_buffer_manager_t *bm;
218 drm_ttm_backend_t *be;
220 if (ttm->state != ttm_unpopulated)
225 for (i = 0; i < ttm->num_pages; ++i) {
226 page = ttm->pages[i];
228 if (drm_alloc_memctl(PAGE_SIZE)) {
231 page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
233 drm_free_memctl(PAGE_SIZE);
236 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
239 SetPageReserved(page);
241 ttm->pages[i] = page;
245 be->populate(be, ttm->num_pages, ttm->pages);
246 ttm->state = ttm_unbound;
254 static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
256 drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
262 ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
266 #ifdef DRM_ODD_MM_COMPAT
267 INIT_LIST_HEAD(&ttm->p_mm_list);
268 INIT_LIST_HEAD(&ttm->vma_list);
272 atomic_set(&ttm->vma_count, 0);
275 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
280 * Account also for AGP module memory usage.
283 ttm_alloc_pages(ttm);
285 drm_destroy_ttm(ttm);
286 DRM_ERROR("Failed allocating page table\n");
289 ttm->be = bo_driver->create_ttm_backend_entry(dev);
291 drm_destroy_ttm(ttm);
292 DRM_ERROR("Failed creating ttm backend entry\n");
295 ttm->state = ttm_unpopulated;
300 * Unbind a ttm region from the aperture.
303 int drm_evict_ttm(drm_ttm_t * ttm)
305 drm_ttm_backend_t *be = ttm->be;
308 switch (ttm->state) {
310 if (be->needs_ub_cache_adjust(be)) {
311 ret = unmap_vma_pages(ttm);
321 ttm->state = ttm_evicted;
325 void drm_fixup_ttm_caching(drm_ttm_t * ttm)
328 if (ttm->state == ttm_evicted) {
329 drm_ttm_backend_t *be = ttm->be;
330 if (be->needs_ub_cache_adjust(be)) {
331 drm_set_caching(ttm, 0);
333 ttm->state = ttm_unbound;
337 int drm_unbind_ttm(drm_ttm_t * ttm)
341 if (ttm->state == ttm_bound)
342 ret = drm_evict_ttm(ttm);
347 drm_fixup_ttm_caching(ttm);
351 int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
355 drm_ttm_backend_t *be;
359 if (ttm->state == ttm_bound)
364 ret = drm_ttm_populate(ttm);
367 if (ttm->state == ttm_unbound && !cached) {
368 ret = unmap_vma_pages(ttm);
372 drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
374 #ifdef DRM_ODD_MM_COMPAT
375 else if (ttm->state == ttm_evicted && !cached) {
376 ret = drm_ttm_lock_mm(ttm);
381 if ((ret = be->bind(be, aper_offset, cached))) {
382 ttm->state = ttm_evicted;
383 #ifdef DRM_ODD_MM_COMPAT
384 if (be->needs_ub_cache_adjust(be))
385 drm_ttm_unlock_mm(ttm);
387 DRM_ERROR("Couldn't bind backend.\n");
391 ttm->aper_offset = aper_offset;
392 ttm->state = ttm_bound;
394 #ifdef DRM_ODD_MM_COMPAT
395 if (be->needs_ub_cache_adjust(be)) {
396 ret = drm_ttm_remap_bound(ttm);
406 * dev->struct_mutex locked.
408 static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
410 drm_map_list_t *list = &object->map_list;
411 drm_local_map_t *map;
413 if (list->user_token)
414 drm_ht_remove_item(&dev->map_hash, &list->hash);
416 if (list->file_offset_node) {
417 drm_mm_put_block(list->file_offset_node);
418 list->file_offset_node = NULL;
424 drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
426 if (drm_destroy_ttm(ttm) != -EBUSY) {
427 drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
430 drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
434 drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
437 void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
439 if (atomic_dec_and_test(&to->usage)) {
440 drm_ttm_object_remove(dev, to);
444 void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
446 if (atomic_dec_and_test(&to->usage)) {
447 mutex_lock(&dev->struct_mutex);
448 if (atomic_read(&to->usage) == 0)
449 drm_ttm_object_remove(dev, to);
450 mutex_unlock(&dev->struct_mutex);
455 * Create a ttm and add it to the drm book-keeping.
456 * dev->struct_mutex locked.
459 int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
460 uint32_t flags, drm_ttm_object_t ** ttm_object)
462 drm_ttm_object_t *object;
463 drm_map_list_t *list;
464 drm_local_map_t *map;
467 object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
470 object->flags = flags;
471 list = &object->map_list;
473 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
475 drm_ttm_object_remove(dev, object);
480 ttm = drm_init_ttm(dev, size);
482 DRM_ERROR("Could not create ttm\n");
483 drm_ttm_object_remove(dev, object);
487 map->offset = (unsigned long)ttm;
488 map->type = _DRM_TTM;
489 map->flags = _DRM_REMOVABLE;
490 map->size = ttm->num_pages * PAGE_SIZE;
491 map->handle = (void *)object;
494 * Add a one-page "hole" to the block size to avoid the mm subsystem
496 * FIXME: Is this really needed?
499 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
500 ttm->num_pages + 1, 0, 0);
501 if (!list->file_offset_node) {
502 drm_ttm_object_remove(dev, object);
505 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
506 ttm->num_pages + 1, 0);
508 list->hash.key = list->file_offset_node->start;
510 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
511 drm_ttm_object_remove(dev, object);
515 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
516 ttm->mapping_offset = list->hash.key;
517 atomic_set(&object->usage, 1);
518 *ttm_object = object;