Merge branch 'master' into crestline
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31
32 /*
33  * These have bad performance in the AGP module for the indicated kernel versions.
34  */
35
36 int drm_map_page_into_agp(struct page *page)
37 {
38         int i;
39         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40         /* Caller's responsibility to call global_flush_tlb() for
41          * performance reasons */
42         return i;
43 }
44
45 int drm_unmap_page_from_agp(struct page *page)
46 {
47         int i;
48         i = change_page_attr(page, 1, PAGE_KERNEL);
49         /* Caller's responsibility to call global_flush_tlb() for
50          * performance reasons */
51         return i;
52 }
53 #endif 
54
55
56 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
57
58 /*
59  * The protection map was exported in 2.6.19
60  */
61
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63 {
64 #ifdef MODULE
65         static pgprot_t drm_protection_map[16] = {
66                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68         };
69
70         return drm_protection_map[vm_flags & 0x0F];
71 #else
72         extern pgprot_t protection_map[];
73         return protection_map[vm_flags & 0x0F];
74 #endif
75 };
76 #endif
77
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80
81 /*
82  * vm code for kernels below 2,6,15 in which version a major vm write
83  * occured. This implement a simple straightforward 
84  * version similar to what's going to be
85  * in kernel 2.6.20+?
86  */ 
87
88 static int drm_pte_is_clear(struct vm_area_struct *vma,
89                             unsigned long addr)
90 {
91         struct mm_struct *mm = vma->vm_mm;
92         int ret = 1;
93         pte_t *pte;
94         pmd_t *pmd;
95         pud_t *pud;
96         pgd_t *pgd;
97         
98
99         spin_lock(&mm->page_table_lock);
100         pgd = pgd_offset(mm, addr);
101         if (pgd_none(*pgd))
102                 goto unlock;
103         pud = pud_offset(pgd, addr);
104         if (pud_none(*pud))
105                 goto unlock;
106         pmd = pmd_offset(pud, addr);
107         if (pmd_none(*pmd))
108                 goto unlock;
109         pte = pte_offset_map(pmd, addr);
110         if (!pte)
111                 goto unlock;
112         ret = pte_none(*pte);
113         pte_unmap(pte);
114  unlock:        
115         spin_unlock(&mm->page_table_lock);
116         return ret;
117 }
118         
119 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
120                   unsigned long pfn, pgprot_t pgprot)
121 {
122         int ret;
123         if (!drm_pte_is_clear(vma, addr))
124                 return -EBUSY;
125
126         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
127         return ret;
128 }
129
130 static struct {
131         spinlock_t lock;
132         struct page *dummy_page;
133         atomic_t present;
134 } drm_np_retry = 
135 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
136
137 struct page * get_nopage_retry(void)
138 {
139         if (atomic_read(&drm_np_retry.present) == 0) {
140                 struct page *page = alloc_page(GFP_KERNEL);
141                 if (!page)
142                         return NOPAGE_OOM;
143                 spin_lock(&drm_np_retry.lock);
144                 drm_np_retry.dummy_page = page;
145                 atomic_set(&drm_np_retry.present,1);
146                 spin_unlock(&drm_np_retry.lock);
147         }
148         get_page(drm_np_retry.dummy_page);
149         return drm_np_retry.dummy_page;
150 }
151
152 void free_nopage_retry(void)
153 {
154         if (atomic_read(&drm_np_retry.present) == 1) {
155                 spin_lock(&drm_np_retry.lock);
156                 __free_page(drm_np_retry.dummy_page);
157                 drm_np_retry.dummy_page = NULL;
158                 atomic_set(&drm_np_retry.present, 0);
159                 spin_unlock(&drm_np_retry.lock);
160         }
161 }
162
163 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
164                                unsigned long address, 
165                                int *type)
166 {
167         struct fault_data data;
168
169         if (type)
170                 *type = VM_FAULT_MINOR;
171
172         data.address = address;
173         data.vma = vma;
174         drm_vm_ttm_fault(vma, &data);
175         switch (data.type) {
176         case VM_FAULT_OOM:
177                 return NOPAGE_OOM;
178         case VM_FAULT_SIGBUS:
179                 return NOPAGE_SIGBUS;
180         default:
181                 break;
182         }
183
184         return NOPAGE_REFAULT;
185 }
186
187 #endif
188
189 #ifdef DRM_ODD_MM_COMPAT
190
191 /*
192  * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
193  * workaround for a single BUG statement in do_no_page in these versions. The
194  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
195  * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
196  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
197  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
198  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
199  * phew.
200  */
201
202 typedef struct p_mm_entry {
203         struct list_head head;
204         struct mm_struct *mm;
205         atomic_t refcount;
206         int locked;
207 } p_mm_entry_t;
208
209 typedef struct vma_entry {
210         struct list_head head;
211         struct vm_area_struct *vma;
212 } vma_entry_t;
213
214
215 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
216                                unsigned long address, 
217                                int *type)
218 {
219         drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
220         unsigned long page_offset;
221         struct page *page;
222         drm_ttm_t *ttm; 
223         drm_buffer_manager_t *bm;
224         drm_device_t *dev;
225
226         /*
227          * FIXME: Check can't map aperture flag.
228          */
229
230         if (type)
231                 *type = VM_FAULT_MINOR;
232
233         if (!map) 
234                 return NOPAGE_OOM;
235
236         if (address > vma->vm_end) 
237                 return NOPAGE_SIGBUS;
238
239         ttm = (drm_ttm_t *) map->offset;        
240         dev = ttm->dev;
241         mutex_lock(&dev->struct_mutex);
242         drm_fixup_ttm_caching(ttm);
243         BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
244
245         bm = &dev->bm;
246         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
247         page = ttm->pages[page_offset];
248
249         if (!page) {
250                 if (drm_alloc_memctl(PAGE_SIZE)) {
251                         page = NOPAGE_OOM;
252                         goto out;
253                 }
254                 page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
255                 if (!page) {
256                         drm_free_memctl(PAGE_SIZE);
257                         page = NOPAGE_OOM;
258                         goto out;
259                 }
260                 ++bm->cur_pages;
261                 SetPageLocked(page);
262         }
263
264         get_page(page);
265  out:
266         mutex_unlock(&dev->struct_mutex);
267         return page;
268 }
269
270
271
272
273 int drm_ttm_map_bound(struct vm_area_struct *vma)
274 {
275         drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
276         drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
277         int ret = 0;
278
279         if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
280                 unsigned long pfn = ttm->aper_offset + 
281                         (ttm->be->aperture_base >> PAGE_SHIFT);
282                 pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
283                 
284                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
285                                          vma->vm_end - vma->vm_start,
286                                          pgprot);
287         }
288         return ret;
289 }
290         
291
292 int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
293 {
294         p_mm_entry_t *entry, *n_entry;
295         vma_entry_t *v_entry;
296         drm_local_map_t *map = (drm_local_map_t *)
297                 vma->vm_private_data;
298         struct mm_struct *mm = vma->vm_mm;
299
300         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
301         if (!v_entry) {
302                 DRM_ERROR("Allocation of vma pointer entry failed\n");
303                 return -ENOMEM;
304         }
305         v_entry->vma = vma;
306         map->handle = (void *) v_entry;
307         list_add_tail(&v_entry->head, &ttm->vma_list);
308
309         list_for_each_entry(entry, &ttm->p_mm_list, head) {
310                 if (mm == entry->mm) {
311                         atomic_inc(&entry->refcount);
312                         return 0;
313                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
314         }
315
316         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
317         if (!n_entry) {
318                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
319                 return -ENOMEM;
320         }
321         INIT_LIST_HEAD(&n_entry->head);
322         n_entry->mm = mm;
323         n_entry->locked = 0;
324         atomic_set(&n_entry->refcount, 0);
325         list_add_tail(&n_entry->head, &entry->head);
326
327         return 0;
328 }
329
330 void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
331 {
332         p_mm_entry_t *entry, *n;
333         vma_entry_t *v_entry, *v_n;
334         int found = 0;
335         struct mm_struct *mm = vma->vm_mm;
336
337         list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
338                 if (v_entry->vma == vma) {
339                         found = 1;
340                         list_del(&v_entry->head);
341                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
342                         break;
343                 }
344         }
345         BUG_ON(!found);
346
347         list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
348                 if (mm == entry->mm) {
349                         if (atomic_add_negative(-1, &entry->refcount)) {
350                                 list_del(&entry->head);
351                                 BUG_ON(entry->locked);
352                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
353                         }
354                         return;
355                 }
356         }
357         BUG_ON(1);
358 }
359
360
361
362 int drm_ttm_lock_mm(drm_ttm_t * ttm)
363 {
364         p_mm_entry_t *entry;
365         int lock_ok = 1;
366         
367         list_for_each_entry(entry, &ttm->p_mm_list, head) {
368                 BUG_ON(entry->locked);
369                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
370                         lock_ok = 0;
371                         break;
372                 }
373                 entry->locked = 1;
374         }
375
376         if (lock_ok)
377                 return 0;
378
379         list_for_each_entry(entry, &ttm->p_mm_list, head) {
380                 if (!entry->locked) 
381                         break;
382                 up_write(&entry->mm->mmap_sem);
383                 entry->locked = 0;
384         }
385
386         /*
387          * Possible deadlock. Try again. Our callers should handle this
388          * and restart.
389          */
390
391         return -EAGAIN;
392 }
393
394 void drm_ttm_unlock_mm(drm_ttm_t * ttm)
395 {
396         p_mm_entry_t *entry;
397         
398         list_for_each_entry(entry, &ttm->p_mm_list, head) {
399                 BUG_ON(!entry->locked);
400                 up_write(&entry->mm->mmap_sem);
401                 entry->locked = 0;
402         }
403 }
404
405 int drm_ttm_remap_bound(drm_ttm_t *ttm) 
406 {
407         vma_entry_t *v_entry;
408         int ret = 0;
409         
410         list_for_each_entry(v_entry, &ttm->vma_list, head) {
411                 ret = drm_ttm_map_bound(v_entry->vma);
412                 if (ret)
413                         break;
414         }
415
416         drm_ttm_unlock_mm(ttm);
417         return ret;
418 }
419
420 void drm_ttm_finish_unmap(drm_ttm_t *ttm)
421 {
422         vma_entry_t *v_entry;
423         
424         if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
425                 return;
426
427         list_for_each_entry(v_entry, &ttm->vma_list, head) {
428                 v_entry->vma->vm_flags &= ~VM_PFNMAP; 
429         }
430         drm_ttm_unlock_mm(ttm);
431 }       
432
433 #endif
434