drm: move some of the OS stuff into the OS header
[platform/upstream/libdrm.git] / linux-core / drm_ttm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 static void drm_ttm_ipi_handler(void *null)
34 {
35         flush_agp_cache();
36 }
37
38 void drm_ttm_cache_flush(void)
39 {
40         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
41                 DRM_ERROR("Timed out waiting for drm cache flush.\n");
42 }
43 EXPORT_SYMBOL(drm_ttm_cache_flush);
44
45 /*
46  * Use kmalloc if possible. Otherwise fall back to vmalloc.
47  */
48
49 static void ttm_alloc_pages(struct drm_ttm * ttm)
50 {
51         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
52         ttm->pages = NULL;
53
54         if (drm_alloc_memctl(size))
55                 return;
56
57         if (size <= PAGE_SIZE) {
58                 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
59         }
60         if (!ttm->pages) {
61                 ttm->pages = vmalloc_user(size);
62                 if (ttm->pages)
63                         ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
64         }
65         if (!ttm->pages) {
66                 drm_free_memctl(size);
67         }
68 }
69
70 static void ttm_free_pages(struct drm_ttm * ttm)
71 {
72         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
73
74         if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
75                 vfree(ttm->pages);
76                 ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
77         } else {
78                 drm_free(ttm->pages, size, DRM_MEM_TTM);
79         }
80         drm_free_memctl(size);
81         ttm->pages = NULL;
82 }
83
84 static struct page *drm_ttm_alloc_page(void)
85 {
86         struct page *page;
87
88         if (drm_alloc_memctl(PAGE_SIZE)) {
89                 return NULL;
90         }
91         page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
92         if (!page) {
93                 drm_free_memctl(PAGE_SIZE);
94                 return NULL;
95         }
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
97         SetPageLocked(page);
98 #else
99         SetPageReserved(page);
100 #endif
101         return page;
102 }
103
104 /*
105  * Change caching policy for the linear kernel map
106  * for range of pages in a ttm.
107  */
108
109 static int drm_set_caching(struct drm_ttm * ttm, int noncached)
110 {
111         int i;
112         struct page **cur_page;
113         int do_tlbflush = 0;
114
115         if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
116                 return 0;
117
118         if (noncached)
119                 drm_ttm_cache_flush();
120
121         for (i = 0; i < ttm->num_pages; ++i) {
122                 cur_page = ttm->pages + i;
123                 if (*cur_page) {
124                         if (!PageHighMem(*cur_page)) {
125                                 if (noncached) {
126                                         map_page_into_agp(*cur_page);
127                                 } else {
128                                         unmap_page_from_agp(*cur_page);
129                                 }
130                                 do_tlbflush = 1;
131                         }
132                 }
133         }
134         if (do_tlbflush)
135                 flush_agp_mappings();
136
137         DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
138
139         return 0;
140 }
141
142
143 static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
144 {
145         struct mm_struct *mm = ttm->user_mm;
146         int write;
147         int dirty;
148         struct page *page;
149         int i;
150
151         BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
152         write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
153         dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
154
155         down_read(&mm->mmap_sem);
156         for (i=0; i<ttm->num_pages; ++i) {
157                 page = ttm->pages[i];
158                 if (page == NULL)
159                         continue;
160
161                 if (page == ttm->dummy_read_page) {
162                         BUG_ON(write);
163                         continue;
164                 }
165
166                 if (write && dirty && !PageReserved(page))
167                         SetPageDirty(page);
168
169                 ttm->pages[i] = NULL;
170                 page_cache_release(page);
171         }
172         up_read(&mm->mmap_sem);
173 }
174
175 static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
176 {
177         int i;
178         struct drm_buffer_manager *bm = &ttm->dev->bm;
179         struct page **cur_page;
180
181         for (i = 0; i < ttm->num_pages; ++i) {
182                 cur_page = ttm->pages + i;
183                 if (*cur_page) {
184 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
185                         unlock_page(*cur_page);
186 #else
187                         ClearPageReserved(*cur_page);
188 #endif
189                         if (page_count(*cur_page) != 1) {
190                                 DRM_ERROR("Erroneous page count. "
191                                           "Leaking pages.\n");
192                         }
193                         if (page_mapped(*cur_page)) {
194                                 DRM_ERROR("Erroneous map count. "
195                                           "Leaking page mappings.\n");
196                         }
197                         __free_page(*cur_page);
198                         drm_free_memctl(PAGE_SIZE);
199                         --bm->cur_pages;
200                 }
201         }
202 }
203
204 /*
205  * Free all resources associated with a ttm.
206  */
207
208 int drm_destroy_ttm(struct drm_ttm * ttm)
209 {
210         struct drm_ttm_backend *be;
211
212         if (!ttm)
213                 return 0;
214
215         be = ttm->be;
216         if (be) {
217                 be->func->destroy(be);
218                 ttm->be = NULL;
219         }
220
221         if (ttm->pages) {
222                 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
223                         drm_set_caching(ttm, 0);
224
225                 if (ttm->page_flags & DRM_TTM_PAGE_USER)
226                         drm_ttm_free_user_pages(ttm);
227                 else
228                         drm_ttm_free_alloced_pages(ttm);
229
230                 ttm_free_pages(ttm);
231         }
232
233         drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
234         return 0;
235 }
236
237 struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
238 {
239         struct page *p;
240         struct drm_buffer_manager *bm = &ttm->dev->bm;
241
242         p = ttm->pages[index];
243         if (!p) {
244                 p = drm_ttm_alloc_page();
245                 if (!p)
246                         return NULL;
247                 ttm->pages[index] = p;
248                 ++bm->cur_pages;
249         }
250         return p;
251 }
252 EXPORT_SYMBOL(drm_ttm_get_page);
253
254
255
256
257 int drm_ttm_set_user(struct drm_ttm *ttm,
258                      struct task_struct *tsk,
259                      int write,
260                      unsigned long start,
261                      unsigned long num_pages,
262                      struct page *dummy_read_page)
263 {
264         struct mm_struct *mm = tsk->mm;
265         int ret;
266         int i;
267
268         BUG_ON(num_pages != ttm->num_pages);
269
270         ttm->user_mm = mm;
271         ttm->dummy_read_page = dummy_read_page;
272         ttm->page_flags = DRM_TTM_PAGE_USER |
273                 ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
274
275
276         down_read(&mm->mmap_sem);
277         ret = get_user_pages(tsk, mm, start, num_pages,
278                              write, 0, ttm->pages, NULL);
279         up_read(&mm->mmap_sem);
280
281         if (ret != num_pages && write) {
282                 drm_ttm_free_user_pages(ttm);
283                 return -ENOMEM;
284         }
285
286         for (i=0; i<num_pages; ++i) {
287                 if (ttm->pages[i] == NULL) {
288                         ttm->pages[i] = ttm->dummy_read_page;
289                 }
290         }
291
292         return 0;
293 }
294
295
296
297 int drm_ttm_populate(struct drm_ttm * ttm)
298 {
299         struct page *page;
300         unsigned long i;
301         struct drm_ttm_backend *be;
302
303         if (ttm->state != ttm_unpopulated)
304                 return 0;
305
306         be = ttm->be;
307         for (i = 0; i < ttm->num_pages; ++i) {
308                 page = drm_ttm_get_page(ttm, i);
309                 if (!page)
310                         return -ENOMEM;
311         }
312         be->func->populate(be, ttm->num_pages, ttm->pages);
313         ttm->state = ttm_unbound;
314         return 0;
315 }
316
317 /*
318  * Initialize a ttm.
319  */
320
321 struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
322 {
323         struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
324         struct drm_ttm *ttm;
325
326         if (!bo_driver)
327                 return NULL;
328
329         ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
330         if (!ttm)
331                 return NULL;
332
333         ttm->dev = dev;
334         atomic_set(&ttm->vma_count, 0);
335
336         ttm->destroy = 0;
337         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
338
339         ttm->page_flags = 0;
340
341         /*
342          * Account also for AGP module memory usage.
343          */
344
345         ttm_alloc_pages(ttm);
346         if (!ttm->pages) {
347                 drm_destroy_ttm(ttm);
348                 DRM_ERROR("Failed allocating page table\n");
349                 return NULL;
350         }
351         ttm->be = bo_driver->create_ttm_backend_entry(dev);
352         if (!ttm->be) {
353                 drm_destroy_ttm(ttm);
354                 DRM_ERROR("Failed creating ttm backend entry\n");
355                 return NULL;
356         }
357         ttm->state = ttm_unpopulated;
358         return ttm;
359 }
360
361 /*
362  * Unbind a ttm region from the aperture.
363  */
364
365 void drm_ttm_evict(struct drm_ttm * ttm)
366 {
367         struct drm_ttm_backend *be = ttm->be;
368         int ret;
369
370         if (ttm->state == ttm_bound) {
371                 ret = be->func->unbind(be);
372                 BUG_ON(ret);
373         }
374
375         ttm->state = ttm_evicted;
376 }
377
378 void drm_ttm_fixup_caching(struct drm_ttm * ttm)
379 {
380
381         if (ttm->state == ttm_evicted) {
382                 struct drm_ttm_backend *be = ttm->be;
383                 if (be->func->needs_ub_cache_adjust(be)) {
384                         drm_set_caching(ttm, 0);
385                 }
386                 ttm->state = ttm_unbound;
387         }
388 }
389
390 void drm_ttm_unbind(struct drm_ttm * ttm)
391 {
392         if (ttm->state == ttm_bound)
393                 drm_ttm_evict(ttm);
394
395         drm_ttm_fixup_caching(ttm);
396 }
397
398 int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
399 {
400         struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
401         int ret = 0;
402         struct drm_ttm_backend *be;
403
404         if (!ttm)
405                 return -EINVAL;
406         if (ttm->state == ttm_bound)
407                 return 0;
408
409         be = ttm->be;
410
411         ret = drm_ttm_populate(ttm);
412         if (ret)
413                 return ret;
414
415         if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
416                 drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
417         } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
418                    bo_driver->ttm_cache_flush)
419                 bo_driver->ttm_cache_flush(ttm);
420
421         if ((ret = be->func->bind(be, bo_mem))) {
422                 ttm->state = ttm_evicted;
423                 DRM_ERROR("Couldn't bind backend.\n");
424                 return ret;
425         }
426
427         ttm->state = ttm_bound;
428         if (ttm->page_flags & DRM_TTM_PAGE_USER)
429                 ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
430         return 0;
431 }
432
433 EXPORT_SYMBOL(drm_bind_ttm);