c17c41cb5ab591ffc60df22d9f719eb9a2dea1a1
[profile/ivi/libdrm.git] / linux-core / drm_ttm.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28
29 #include "drmP.h"
30
31 static void drm_ttm_ipi_handler(void *null)
32 {
33         flush_agp_cache();
34 }
35
36 static void drm_ttm_cache_flush(void) 
37 {
38         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
39                 DRM_ERROR("Timed out waiting for drm cache flush.\n");
40 }
41
42
43 /*
44  * Use kmalloc if possible. Otherwise fall back to vmalloc.
45  */
46
47 static void ttm_alloc_pages(drm_ttm_t *ttm)
48 {
49         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
50         ttm->pages = NULL;
51
52         if (drm_alloc_memctl(size))
53                 return;
54
55         if (size <= PAGE_SIZE) {
56                 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
57         }
58         if (!ttm->pages) {
59                 ttm->pages = vmalloc_user(size);
60                 if (ttm->pages)
61                         ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
62         }
63         if (!ttm->pages) {
64                 drm_free_memctl(size);
65         }
66 }
67
68 static void ttm_free_pages(drm_ttm_t *ttm)
69 {
70         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
71
72         if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
73                 vfree(ttm->pages);
74                 ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
75         } else {
76                 drm_free(ttm->pages, size, DRM_MEM_TTM);
77         }
78         drm_free_memctl(size);
79         ttm->pages = NULL;
80 }
81
82 /*
83  * Unmap all vma pages from vmas mapping this ttm.
84  */
85
86 static int unmap_vma_pages(drm_ttm_t * ttm)
87 {
88         drm_device_t *dev = ttm->dev;
89         loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
90         loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
91
92 #ifdef DRM_ODD_MM_COMPAT
93         int ret;
94         ret = drm_ttm_lock_mm(ttm);
95         if (ret)
96                 return ret;
97 #endif
98         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
99 #ifdef DRM_ODD_MM_COMPAT
100         drm_ttm_finish_unmap(ttm);
101 #endif
102         return 0;
103 }
104
105 /*
106  * Change caching policy for the linear kernel map 
107  * for range of pages in a ttm.
108  */
109
110 static int drm_set_caching(drm_ttm_t * ttm, int noncached)
111 {
112         int i;
113         struct page **cur_page;
114         int do_tlbflush = 0;
115
116         if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
117                 return 0;
118
119         if (noncached) 
120                 drm_ttm_cache_flush();
121
122         for (i = 0; i < ttm->num_pages; ++i) {
123                 cur_page = ttm->pages + i;
124                 if (*cur_page) {
125                         if (!PageHighMem(*cur_page)) {
126                                 if (noncached) {
127                                         map_page_into_agp(*cur_page);
128                                 } else {
129                                         unmap_page_from_agp(*cur_page);
130                                 }
131                                 do_tlbflush = 1;
132                         }
133                 }
134         }
135         if (do_tlbflush)
136                 flush_agp_mappings();
137
138         DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
139
140         return 0;
141 }
142
143 /*
144  * Free all resources associated with a ttm.
145  */
146
147 int drm_destroy_ttm(drm_ttm_t * ttm)
148 {
149
150         int i;
151         struct page **cur_page;
152         drm_ttm_backend_t *be;
153
154         if (!ttm)
155                 return 0;
156
157         if (atomic_read(&ttm->vma_count) > 0) {
158                 ttm->destroy = 1;
159                 DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
160                 return -EBUSY;
161         }
162
163         DRM_DEBUG("Destroying a ttm\n");
164
165 #ifdef DRM_TTM_ODD_COMPAT
166         BUG_ON(!list_empty(&ttm->vma_list));
167         BUG_ON(!list_empty(&ttm->p_mm_list));
168 #endif
169         be = ttm->be;
170         if (be) {
171                 be->destroy(be);
172                 ttm->be = NULL;
173         }
174
175         if (ttm->pages) {
176                 drm_buffer_manager_t *bm = &ttm->dev->bm;
177                 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
178                         drm_set_caching(ttm, 0);
179
180                 for (i = 0; i < ttm->num_pages; ++i) {
181                         cur_page = ttm->pages + i;
182                         if (*cur_page) {
183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
184                                 unlock_page(*cur_page);
185 #else
186                                 ClearPageReserved(*cur_page);
187 #endif
188                                 if (page_count(*cur_page) != 1) {
189                                         DRM_ERROR("Erroneous page count. "
190                                                   "Leaking pages.\n");
191                                 }
192                                 if (page_mapped(*cur_page)) {
193                                         DRM_ERROR("Erroneous map count. "
194                                                   "Leaking page mappings.\n");
195                                 }
196
197                                 /*
198                                  * End debugging.
199                                  */
200
201                                 __free_page(*cur_page);
202                                 drm_free_memctl(PAGE_SIZE);
203                                 --bm->cur_pages;
204                         }
205                 }
206                 ttm_free_pages(ttm);
207         }
208
209         drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
210         return 0;
211 }
212
213 static int drm_ttm_populate(drm_ttm_t * ttm)
214 {
215         struct page *page;
216         unsigned long i;
217         drm_buffer_manager_t *bm;
218         drm_ttm_backend_t *be;
219
220         if (ttm->state != ttm_unpopulated)
221                 return 0;
222
223         bm = &ttm->dev->bm;
224         be = ttm->be;
225         for (i = 0; i < ttm->num_pages; ++i) {
226                 page = ttm->pages[i];
227                 if (!page) {
228                         if (drm_alloc_memctl(PAGE_SIZE)) {
229                                 return -ENOMEM;
230                         }
231                         page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
232                         if (!page) {
233                                 drm_free_memctl(PAGE_SIZE);
234                                 return -ENOMEM;
235                         }
236 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
237                         SetPageLocked(page);
238 #else
239                         SetPageReserved(page);
240 #endif
241                         ttm->pages[i] = page;
242                         ++bm->cur_pages;
243                 }
244         }
245         be->populate(be, ttm->num_pages, ttm->pages);
246         ttm->state = ttm_unbound;
247         return 0;
248 }
249
250 /*
251  * Initialize a ttm.
252  */
253
254 static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
255 {
256         drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
257         drm_ttm_t *ttm;
258
259         if (!bo_driver)
260                 return NULL;
261
262         ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
263         if (!ttm)
264                 return NULL;
265
266 #ifdef DRM_ODD_MM_COMPAT
267         INIT_LIST_HEAD(&ttm->p_mm_list);
268         INIT_LIST_HEAD(&ttm->vma_list);
269 #endif
270
271         ttm->dev = dev;
272         atomic_set(&ttm->vma_count, 0);
273
274         ttm->destroy = 0;
275         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
276
277         ttm->page_flags = 0;
278
279         /*
280          * Account also for AGP module memory usage.
281          */
282
283         ttm_alloc_pages(ttm);
284         if (!ttm->pages) {
285                 drm_destroy_ttm(ttm);
286                 DRM_ERROR("Failed allocating page table\n");
287                 return NULL;
288         }
289         ttm->be = bo_driver->create_ttm_backend_entry(dev);
290         if (!ttm->be) {
291                 drm_destroy_ttm(ttm);
292                 DRM_ERROR("Failed creating ttm backend entry\n");
293                 return NULL;
294         }
295         ttm->state = ttm_unpopulated;
296         return ttm;
297 }
298
299 /*
300  * Unbind a ttm region from the aperture.
301  */
302
303 int drm_evict_ttm(drm_ttm_t * ttm)
304 {
305         drm_ttm_backend_t *be = ttm->be;
306         int ret;
307
308         switch (ttm->state) {
309         case ttm_bound:
310                 if (be->needs_ub_cache_adjust(be)) {
311                         ret = unmap_vma_pages(ttm);
312                         if (ret) {
313                                 return ret;
314                         }
315                 }
316                 be->unbind(be);
317                 break;
318         default:
319                 break;
320         }
321         ttm->state = ttm_evicted;
322         return 0;
323 }
324
325 void drm_fixup_ttm_caching(drm_ttm_t * ttm)
326 {
327
328         if (ttm->state == ttm_evicted) {
329                 drm_ttm_backend_t *be = ttm->be;
330                 if (be->needs_ub_cache_adjust(be)) {
331                         drm_set_caching(ttm, 0);
332                 }
333                 ttm->state = ttm_unbound;
334         }
335 }
336
337 int drm_unbind_ttm(drm_ttm_t * ttm)
338 {
339         int ret = 0;
340
341         if (ttm->state == ttm_bound)
342                 ret = drm_evict_ttm(ttm);
343
344         if (ret)
345                 return ret;
346
347         drm_fixup_ttm_caching(ttm);
348         return 0;
349 }
350
351 int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
352 {
353
354         int ret = 0;
355         drm_ttm_backend_t *be;
356
357         if (!ttm)
358                 return -EINVAL;
359         if (ttm->state == ttm_bound)
360                 return 0;
361
362         be = ttm->be;
363
364         ret = drm_ttm_populate(ttm);
365         if (ret)
366                 return ret;
367         if (ttm->state == ttm_unbound && !cached) {
368                 ret = unmap_vma_pages(ttm);
369                 if (ret)
370                         return ret;
371
372                 drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
373         }
374 #ifdef DRM_ODD_MM_COMPAT
375         else if (ttm->state == ttm_evicted && !cached) {
376                 ret = drm_ttm_lock_mm(ttm);
377                 if (ret)
378                         return ret;
379         }
380 #endif
381         if ((ret = be->bind(be, aper_offset, cached))) {
382                 ttm->state = ttm_evicted;
383 #ifdef DRM_ODD_MM_COMPAT
384                 if (be->needs_ub_cache_adjust(be))
385                         drm_ttm_unlock_mm(ttm);
386 #endif
387                 DRM_ERROR("Couldn't bind backend.\n");
388                 return ret;
389         }
390
391         ttm->aper_offset = aper_offset;
392         ttm->state = ttm_bound;
393
394 #ifdef DRM_ODD_MM_COMPAT
395         if (be->needs_ub_cache_adjust(be)) {
396                 ret = drm_ttm_remap_bound(ttm);
397                 if (ret)
398                         return ret;
399         }
400 #endif
401
402         return 0;
403 }
404
405 /*
406  * dev->struct_mutex locked.
407  */
408 static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
409 {
410         drm_map_list_t *list = &object->map_list;
411         drm_local_map_t *map;
412
413         if (list->user_token)
414                 drm_ht_remove_item(&dev->map_hash, &list->hash);
415
416         if (list->file_offset_node) {
417                 drm_mm_put_block(list->file_offset_node);
418                 list->file_offset_node = NULL;
419         }
420
421         map = list->map;
422
423         if (map) {
424                 drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
425                 if (ttm) {
426                         if (drm_destroy_ttm(ttm) != -EBUSY) {
427                                 drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
428                         }
429                 } else {
430                         drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
431                 }
432         }
433
434         drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
435 }
436
437 void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
438 {
439         if (atomic_dec_and_test(&to->usage)) {
440                 drm_ttm_object_remove(dev, to);
441         }
442 }
443
444 void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
445 {
446         if (atomic_dec_and_test(&to->usage)) {
447                 mutex_lock(&dev->struct_mutex);
448                 if (atomic_read(&to->usage) == 0)
449                         drm_ttm_object_remove(dev, to);
450                 mutex_unlock(&dev->struct_mutex);
451         }
452 }
453
454 /*
455  * Create a ttm and add it to the drm book-keeping. 
456  * dev->struct_mutex locked.
457  */
458
459 int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
460                           uint32_t flags, drm_ttm_object_t ** ttm_object)
461 {
462         drm_ttm_object_t *object;
463         drm_map_list_t *list;
464         drm_local_map_t *map;
465         drm_ttm_t *ttm;
466
467         object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
468         if (!object)
469                 return -ENOMEM;
470         object->flags = flags;
471         list = &object->map_list;
472
473         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
474         if (!list->map) {
475                 drm_ttm_object_remove(dev, object);
476                 return -ENOMEM;
477         }
478         map = list->map;
479
480         ttm = drm_init_ttm(dev, size);
481         if (!ttm) {
482                 DRM_ERROR("Could not create ttm\n");
483                 drm_ttm_object_remove(dev, object);
484                 return -ENOMEM;
485         }
486
487         map->offset = (unsigned long)ttm;
488         map->type = _DRM_TTM;
489         map->flags = _DRM_REMOVABLE;
490         map->size = ttm->num_pages * PAGE_SIZE;
491         map->handle = (void *)object;
492
493         /*
494          * Add a one-page "hole" to the block size to avoid the mm subsystem
495          * merging vmas.
496          * FIXME: Is this really needed?
497          */
498
499         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
500                                                     ttm->num_pages + 1, 0, 0);
501         if (!list->file_offset_node) {
502                 drm_ttm_object_remove(dev, object);
503                 return -ENOMEM;
504         }
505         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
506                                                   ttm->num_pages + 1, 0);
507
508         list->hash.key = list->file_offset_node->start;
509
510         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
511                 drm_ttm_object_remove(dev, object);
512                 return -ENOMEM;
513         }
514
515         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
516         ttm->mapping_offset = list->hash.key;
517         atomic_set(&object->usage, 1);
518         *ttm_object = object;
519         return 0;
520 }