1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 static void drm_ttm_ipi_handler(void *null)
38 void drm_ttm_cache_flush(void)
40 if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
41 DRM_ERROR("Timed out waiting for drm cache flush.\n");
43 EXPORT_SYMBOL(drm_ttm_cache_flush);
46 * Use kmalloc if possible. Otherwise fall back to vmalloc.
49 static void ttm_alloc_pages(struct drm_ttm * ttm)
51 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
54 if (drm_alloc_memctl(size))
57 if (size <= PAGE_SIZE) {
58 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
61 ttm->pages = vmalloc_user(size);
63 ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
66 drm_free_memctl(size);
70 static void ttm_free_pages(struct drm_ttm * ttm)
72 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
74 if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
76 ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
78 drm_free(ttm->pages, size, DRM_MEM_TTM);
80 drm_free_memctl(size);
84 static struct page *drm_ttm_alloc_page(void)
88 if (drm_alloc_memctl(PAGE_SIZE)) {
91 page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
93 drm_free_memctl(PAGE_SIZE);
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
99 SetPageReserved(page);
105 * Change caching policy for the linear kernel map
106 * for range of pages in a ttm.
109 static int drm_set_caching(struct drm_ttm * ttm, int noncached)
112 struct page **cur_page;
115 if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
119 drm_ttm_cache_flush();
121 for (i = 0; i < ttm->num_pages; ++i) {
122 cur_page = ttm->pages + i;
124 if (!PageHighMem(*cur_page)) {
126 map_page_into_agp(*cur_page);
128 unmap_page_from_agp(*cur_page);
135 flush_agp_mappings();
137 DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
143 static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
145 struct mm_struct *mm = ttm->user_mm;
151 BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
152 write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
153 dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
155 down_read(&mm->mmap_sem);
156 for (i=0; i<ttm->num_pages; ++i) {
157 page = ttm->pages[i];
161 if (page == ttm->dummy_read_page) {
166 if (write && dirty && !PageReserved(page))
169 ttm->pages[i] = NULL;
170 page_cache_release(page);
172 up_read(&mm->mmap_sem);
175 static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
178 struct drm_buffer_manager *bm = &ttm->dev->bm;
179 struct page **cur_page;
181 for (i = 0; i < ttm->num_pages; ++i) {
182 cur_page = ttm->pages + i;
184 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
185 unlock_page(*cur_page);
187 ClearPageReserved(*cur_page);
189 if (page_count(*cur_page) != 1) {
190 DRM_ERROR("Erroneous page count. "
193 if (page_mapped(*cur_page)) {
194 DRM_ERROR("Erroneous map count. "
195 "Leaking page mappings.\n");
197 __free_page(*cur_page);
198 drm_free_memctl(PAGE_SIZE);
205 * Free all resources associated with a ttm.
208 int drm_destroy_ttm(struct drm_ttm * ttm)
210 struct drm_ttm_backend *be;
217 be->func->destroy(be);
222 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
223 drm_set_caching(ttm, 0);
225 if (ttm->page_flags & DRM_TTM_PAGE_USER)
226 drm_ttm_free_user_pages(ttm);
228 drm_ttm_free_alloced_pages(ttm);
233 drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
237 struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
240 struct drm_buffer_manager *bm = &ttm->dev->bm;
242 p = ttm->pages[index];
244 p = drm_ttm_alloc_page();
247 ttm->pages[index] = p;
252 EXPORT_SYMBOL(drm_ttm_get_page);
257 int drm_ttm_set_user(struct drm_ttm *ttm,
258 struct task_struct *tsk,
261 unsigned long num_pages,
262 struct page *dummy_read_page)
264 struct mm_struct *mm = tsk->mm;
268 BUG_ON(num_pages != ttm->num_pages);
271 ttm->dummy_read_page = dummy_read_page;
272 ttm->page_flags = DRM_TTM_PAGE_USER |
273 ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
276 down_read(&mm->mmap_sem);
277 ret = get_user_pages(tsk, mm, start, num_pages,
278 write, 0, ttm->pages, NULL);
279 up_read(&mm->mmap_sem);
281 if (ret != num_pages && write) {
282 drm_ttm_free_user_pages(ttm);
286 for (i=0; i<num_pages; ++i) {
287 if (ttm->pages[i] == NULL) {
288 ttm->pages[i] = ttm->dummy_read_page;
297 int drm_ttm_populate(struct drm_ttm * ttm)
301 struct drm_ttm_backend *be;
303 if (ttm->state != ttm_unpopulated)
307 for (i = 0; i < ttm->num_pages; ++i) {
308 page = drm_ttm_get_page(ttm, i);
312 be->func->populate(be, ttm->num_pages, ttm->pages);
313 ttm->state = ttm_unbound;
321 struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
323 struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
329 ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
334 atomic_set(&ttm->vma_count, 0);
337 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
342 * Account also for AGP module memory usage.
345 ttm_alloc_pages(ttm);
347 drm_destroy_ttm(ttm);
348 DRM_ERROR("Failed allocating page table\n");
351 ttm->be = bo_driver->create_ttm_backend_entry(dev);
353 drm_destroy_ttm(ttm);
354 DRM_ERROR("Failed creating ttm backend entry\n");
357 ttm->state = ttm_unpopulated;
362 * Unbind a ttm region from the aperture.
365 void drm_ttm_evict(struct drm_ttm * ttm)
367 struct drm_ttm_backend *be = ttm->be;
370 if (ttm->state == ttm_bound) {
371 ret = be->func->unbind(be);
375 ttm->state = ttm_evicted;
378 void drm_ttm_fixup_caching(struct drm_ttm * ttm)
381 if (ttm->state == ttm_evicted) {
382 struct drm_ttm_backend *be = ttm->be;
383 if (be->func->needs_ub_cache_adjust(be)) {
384 drm_set_caching(ttm, 0);
386 ttm->state = ttm_unbound;
390 void drm_ttm_unbind(struct drm_ttm * ttm)
392 if (ttm->state == ttm_bound)
395 drm_ttm_fixup_caching(ttm);
398 int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
400 struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
402 struct drm_ttm_backend *be;
406 if (ttm->state == ttm_bound)
411 ret = drm_ttm_populate(ttm);
415 if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
416 drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
417 } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
418 bo_driver->ttm_cache_flush)
419 bo_driver->ttm_cache_flush(ttm);
421 if ((ret = be->func->bind(be, bo_mem))) {
422 ttm->state = ttm_evicted;
423 DRM_ERROR("Couldn't bind backend.\n");
427 ttm->state = ttm_bound;
428 if (ttm->page_flags & DRM_TTM_PAGE_USER)
429 ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
433 EXPORT_SYMBOL(drm_bind_ttm);