[intel] Quirk away MSI support on 945G/GM.
[platform/upstream/libdrm.git] / linux-core / drm_ttm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 #if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
34 static void drm_clflush_page(struct page *page)
35 {
36         uint8_t *page_virtual;
37         unsigned int i;
38
39         if (unlikely(page == NULL))
40                 return;
41
42         page_virtual = kmap_atomic(page, KM_USER0);
43
44         for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
45                 clflush(page_virtual + i);
46
47         kunmap_atomic(page_virtual, KM_USER0);
48 }
49
50 static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
51 {
52         unsigned long i;
53
54         mb();
55         for (i=0; i < num_pages; ++i)
56                 drm_clflush_page(*pages++);
57         mb();
58 }
59 #endif
60
61 static void drm_ttm_ipi_handler(void *null)
62 {
63         flush_agp_cache();
64 }
65
66 void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
67 {
68
69 #if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
70         if (cpu_has_clflush) {
71                 drm_ttm_cache_flush_clflush(pages, num_pages);
72                 return;
73         }
74 #endif
75         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
76                 DRM_ERROR("Timed out waiting for drm cache flush.\n");
77 }
78 EXPORT_SYMBOL(drm_ttm_cache_flush);
79
80 /**
81  * Allocates storage for pointers to the pages that back the ttm.
82  *
83  * Uses kmalloc if possible. Otherwise falls back to vmalloc.
84  */
85 static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
86 {
87         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
88         ttm->pages = NULL;
89
90         if (drm_alloc_memctl(size))
91                 return;
92
93         if (size <= PAGE_SIZE)
94                 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
95
96         if (!ttm->pages) {
97                 ttm->pages = vmalloc_user(size);
98                 if (ttm->pages)
99                         ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
100         }
101         if (!ttm->pages)
102                 drm_free_memctl(size);
103 }
104
105 static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
106 {
107         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
108
109         if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
110                 vfree(ttm->pages);
111                 ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
112         } else {
113                 drm_free(ttm->pages, size, DRM_MEM_TTM);
114         }
115         drm_free_memctl(size);
116         ttm->pages = NULL;
117 }
118
119 static struct page *drm_ttm_alloc_page(void)
120 {
121         struct page *page;
122
123         if (drm_alloc_memctl(PAGE_SIZE))
124                 return NULL;
125
126         page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
127         if (!page) {
128                 drm_free_memctl(PAGE_SIZE);
129                 return NULL;
130         }
131 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
132         SetPageReserved(page);
133 #endif
134         return page;
135 }
136
137 /*
138  * Change caching policy for the linear kernel map
139  * for range of pages in a ttm.
140  */
141
142 static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
143 {
144         int i;
145         struct page **cur_page;
146         int do_tlbflush = 0;
147
148         if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
149                 return 0;
150
151         if (noncached)
152                 drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
153
154         for (i = 0; i < ttm->num_pages; ++i) {
155                 cur_page = ttm->pages + i;
156                 if (*cur_page) {
157                         if (!PageHighMem(*cur_page)) {
158                                 if (noncached) {
159                                         map_page_into_agp(*cur_page);
160                                 } else {
161                                         unmap_page_from_agp(*cur_page);
162                                 }
163                                 do_tlbflush = 1;
164                         }
165                 }
166         }
167         if (do_tlbflush)
168                 flush_agp_mappings();
169
170         DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
171
172         return 0;
173 }
174
175
176 static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
177 {
178         int write;
179         int dirty;
180         struct page *page;
181         int i;
182
183         BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
184         write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
185         dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
186
187         for (i = 0; i < ttm->num_pages; ++i) {
188                 page = ttm->pages[i];
189                 if (page == NULL)
190                         continue;
191
192                 if (page == ttm->dummy_read_page) {
193                         BUG_ON(write);
194                         continue;
195                 }
196
197                 if (write && dirty && !PageReserved(page))
198                         set_page_dirty_lock(page);
199
200                 ttm->pages[i] = NULL;
201                 put_page(page);
202         }
203 }
204
205 static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
206 {
207         int i;
208         struct drm_buffer_manager *bm = &ttm->dev->bm;
209         struct page **cur_page;
210
211         for (i = 0; i < ttm->num_pages; ++i) {
212                 cur_page = ttm->pages + i;
213                 if (*cur_page) {
214 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
215                         ClearPageReserved(*cur_page);
216 #endif
217                         if (page_count(*cur_page) != 1)
218                                 DRM_ERROR("Erroneous page count. Leaking pages.\n");
219                         if (page_mapped(*cur_page))
220                                 DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
221                         __free_page(*cur_page);
222                         drm_free_memctl(PAGE_SIZE);
223                         --bm->cur_pages;
224                 }
225         }
226 }
227
228 /*
229  * Free all resources associated with a ttm.
230  */
231
232 int drm_ttm_destroy(struct drm_ttm *ttm)
233 {
234         struct drm_ttm_backend *be;
235
236         if (!ttm)
237                 return 0;
238
239         be = ttm->be;
240         if (be) {
241                 be->func->destroy(be);
242                 ttm->be = NULL;
243         }
244
245         if (ttm->pages) {
246                 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
247                         drm_ttm_set_caching(ttm, 0);
248
249                 if (ttm->page_flags & DRM_TTM_PAGE_USER)
250                         drm_ttm_free_user_pages(ttm);
251                 else
252                         drm_ttm_free_alloced_pages(ttm);
253
254                 drm_ttm_free_page_directory(ttm);
255         }
256
257         drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
258         return 0;
259 }
260
261 struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
262 {
263         struct page *p;
264         struct drm_buffer_manager *bm = &ttm->dev->bm;
265
266         while(NULL == (p = ttm->pages[index])) {
267                 p = drm_ttm_alloc_page();
268                 if (!p)
269                         return NULL;
270
271                 if (PageHighMem(p))
272                         ttm->pages[--ttm->first_himem_page] = p;
273                 else
274                         ttm->pages[++ttm->last_lomem_page] = p;
275
276                 ++bm->cur_pages;
277         }
278         return p;
279 }
280 EXPORT_SYMBOL(drm_ttm_get_page);
281
282 /**
283  * drm_ttm_set_user:
284  *
285  * @ttm: the ttm to map pages to. This must always be
286  * a freshly created ttm.
287  *
288  * @tsk: a pointer to the address space from which to map
289  * pages.
290  * 
291  * @write: a boolean indicating that write access is desired
292  *
293  * start: the starting address
294  *
295  * Map a range of user addresses to a new ttm object. This
296  * provides access to user memory from the graphics device.
297  */
298 int drm_ttm_set_user(struct drm_ttm *ttm,
299                      struct task_struct *tsk,
300                      unsigned long start,
301                      unsigned long num_pages)
302 {
303         struct mm_struct *mm = tsk->mm;
304         int ret;
305         int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
306
307         BUG_ON(num_pages != ttm->num_pages);
308         BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
309
310         down_read(&mm->mmap_sem);
311         ret = get_user_pages(tsk, mm, start, num_pages,
312                              write, 0, ttm->pages, NULL);
313         up_read(&mm->mmap_sem);
314
315         if (ret != num_pages && write) {
316                 drm_ttm_free_user_pages(ttm);
317                 return -ENOMEM;
318         }
319
320         return 0;
321 }
322
323 /**
324  * drm_ttm_populate:
325  *
326  * @ttm: the object to allocate pages for
327  *
328  * Allocate pages for all unset page entries, then
329  * call the backend to create the hardware mappings
330  */
331 int drm_ttm_populate(struct drm_ttm *ttm)
332 {
333         struct page *page;
334         unsigned long i;
335         struct drm_ttm_backend *be;
336
337         if (ttm->state != ttm_unpopulated)
338                 return 0;
339
340         be = ttm->be;
341
342         for (i = 0; i < ttm->num_pages; ++i) {
343                 page = drm_ttm_get_page(ttm, i);
344                 if (!page)
345                         return -ENOMEM;
346         }
347
348         be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
349         ttm->state = ttm_unbound;
350         return 0;
351 }
352
353 /**
354  * drm_ttm_create:
355  *
356  * @dev: the drm_device
357  *
358  * @size: The size (in bytes) of the desired object
359  *
360  * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
361  *
362  * Allocate and initialize a ttm, leaving it unpopulated at this time
363  */
364
365 struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
366                                uint32_t page_flags, struct page *dummy_read_page)
367 {
368         struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
369         struct drm_ttm *ttm;
370
371         if (!bo_driver)
372                 return NULL;
373
374         ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
375         if (!ttm)
376                 return NULL;
377
378         ttm->dev = dev;
379         atomic_set(&ttm->vma_count, 0);
380
381         ttm->destroy = 0;
382         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
383         ttm->first_himem_page = ttm->num_pages;
384         ttm->last_lomem_page = -1;
385
386         ttm->page_flags = page_flags;
387
388         ttm->dummy_read_page = dummy_read_page;
389
390         /*
391          * Account also for AGP module memory usage.
392          */
393
394         drm_ttm_alloc_page_directory(ttm);
395         if (!ttm->pages) {
396                 drm_ttm_destroy(ttm);
397                 DRM_ERROR("Failed allocating page table\n");
398                 return NULL;
399         }
400         ttm->be = bo_driver->create_ttm_backend_entry(dev);
401         if (!ttm->be) {
402                 drm_ttm_destroy(ttm);
403                 DRM_ERROR("Failed creating ttm backend entry\n");
404                 return NULL;
405         }
406         ttm->state = ttm_unpopulated;
407         return ttm;
408 }
409
410 /**
411  * drm_ttm_evict:
412  *
413  * @ttm: the object to be unbound from the aperture.
414  *
415  * Transition a ttm from bound to evicted, where it
416  * isn't present in the aperture, but various caches may
417  * not be consistent.
418  */
419 void drm_ttm_evict(struct drm_ttm *ttm)
420 {
421         struct drm_ttm_backend *be = ttm->be;
422         int ret;
423
424         if (ttm->state == ttm_bound) {
425                 ret = be->func->unbind(be);
426                 BUG_ON(ret);
427         }
428
429         ttm->state = ttm_evicted;
430 }
431
432 /**
433  * drm_ttm_fixup_caching:
434  *
435  * @ttm: the object to set unbound
436  *
437  * XXX this function is misnamed. Transition a ttm from evicted to
438  * unbound, flushing caches as appropriate.
439  */
440 void drm_ttm_fixup_caching(struct drm_ttm *ttm)
441 {
442
443         if (ttm->state == ttm_evicted) {
444                 struct drm_ttm_backend *be = ttm->be;
445                 if (be->func->needs_ub_cache_adjust(be))
446                         drm_ttm_set_caching(ttm, 0);
447                 ttm->state = ttm_unbound;
448         }
449 }
450
451 /**
452  * drm_ttm_unbind:
453  *
454  * @ttm: the object to unbind from the graphics device
455  *
456  * Unbind an object from the aperture. This removes the mappings
457  * from the graphics device and flushes caches if necessary.
458  */
459 void drm_ttm_unbind(struct drm_ttm *ttm)
460 {
461         if (ttm->state == ttm_bound)
462                 drm_ttm_evict(ttm);
463
464         drm_ttm_fixup_caching(ttm);
465 }
466
467 /**
468  * drm_ttm_bind:
469  *
470  * @ttm: the ttm object to bind to the graphics device
471  *
472  * @bo_mem: the aperture memory region which will hold the object
473  *
474  * Bind a ttm object to the aperture. This ensures that the necessary
475  * pages are allocated, flushes CPU caches as needed and marks the
476  * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
477  * modified by the GPU
478  */
479 int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
480 {
481         struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
482         int ret = 0;
483         struct drm_ttm_backend *be;
484
485         if (!ttm)
486                 return -EINVAL;
487         if (ttm->state == ttm_bound)
488                 return 0;
489
490         be = ttm->be;
491
492         ret = drm_ttm_populate(ttm);
493         if (ret)
494                 return ret;
495
496         if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
497                 drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
498         else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
499                    bo_driver->ttm_cache_flush)
500                 bo_driver->ttm_cache_flush(ttm);
501
502         ret = be->func->bind(be, bo_mem);
503         if (ret) {
504                 ttm->state = ttm_evicted;
505                 DRM_ERROR("Couldn't bind backend.\n");
506                 return ret;
507         }
508
509         ttm->state = ttm_bound;
510         if (ttm->page_flags & DRM_TTM_PAGE_USER)
511                 ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
512         return 0;
513 }
514 EXPORT_SYMBOL(drm_ttm_bind);