Remove max number of locked pages check and call, since
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31 int drm_map_page_into_agp(struct page *page)
32 {
33         int i;
34         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
35         /* Caller's responsibility to call global_flush_tlb() for
36          * performance reasons */
37         return i;
38 }
39
40 int drm_unmap_page_from_agp(struct page *page)
41 {
42         int i;
43         i = change_page_attr(page, 1, PAGE_KERNEL);
44         /* Caller's responsibility to call global_flush_tlb() for
45          * performance reasons */
46         return i;
47 }
48 #endif
49
50
51 pgprot_t vm_get_page_prot(unsigned long vm_flags)
52 {
53 #ifdef MODULE
54         static pgprot_t drm_protection_map[16] = {
55                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
56                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
57         };
58
59         return drm_protection_map[vm_flags & 0x0F];
60 #else
61         extern pgprot_t protection_map[];
62         return protection_map[vm_flags & 0x0F];
63 #endif
64 };
65
66 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
67
68 static int drm_pte_is_clear(struct vm_area_struct *vma,
69                             unsigned long addr)
70 {
71         struct mm_struct *mm = vma->vm_mm;
72         int ret = 1;
73         pte_t *pte;
74         pmd_t *pmd;
75         pud_t *pud;
76         pgd_t *pgd;
77         
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80         spin_lock(&mm->page_table_lock);
81 #else
82         spinlock_t *ptl;
83 #endif
84         
85         pgd = pgd_offset(mm, addr);
86         if (pgd_none(*pgd))
87                 goto unlock;
88         pud = pud_offset(pgd, addr);
89         if (pud_none(*pud))
90                 goto unlock;
91         pmd = pmd_offset(pud, addr);
92         if (pmd_none(*pmd))
93                 goto unlock;
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
95         pte = pte_offset_map(pmd, addr);
96 #else 
97         pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 
98 #endif
99         if (!pte)
100                 goto unlock;
101         ret = pte_none(*pte);
102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
103         pte_unmap(pte);
104  unlock:        
105         spin_unlock(&mm->page_table_lock);
106 #else
107         pte_unmap_unlock(pte, ptl);
108  unlock:
109 #endif
110         return ret;
111 }
112         
113 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
114                   unsigned long pfn, pgprot_t pgprot)
115 {
116         int ret;
117         if (!drm_pte_is_clear(vma, addr))
118                 return -EBUSY;
119
120         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
121         return ret;
122 }
123
124
125 static struct {
126         spinlock_t lock;
127         struct page *dummy_page;
128         atomic_t present;
129 } drm_np_retry = 
130 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
131
132 struct page * get_nopage_retry(void)
133 {
134         if (atomic_read(&drm_np_retry.present) == 0) {
135                 struct page *page = alloc_page(GFP_KERNEL);
136                 if (!page)
137                         return NOPAGE_OOM;
138                 spin_lock(&drm_np_retry.lock);
139                 drm_np_retry.dummy_page = page;
140                 atomic_set(&drm_np_retry.present,1);
141                 spin_unlock(&drm_np_retry.lock);
142         }
143         get_page(drm_np_retry.dummy_page);
144         return drm_np_retry.dummy_page;
145 }
146
147 void free_nopage_retry(void)
148 {
149         if (atomic_read(&drm_np_retry.present) == 1) {
150                 spin_lock(&drm_np_retry.lock);
151                 __free_page(drm_np_retry.dummy_page);
152                 drm_np_retry.dummy_page = NULL;
153                 atomic_set(&drm_np_retry.present, 0);
154                 spin_unlock(&drm_np_retry.lock);
155         }
156 }
157 #endif
158
159 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
160
161 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
162                                unsigned long address, 
163                                int *type)
164 {
165         struct fault_data data;
166
167         if (type)
168                 *type = VM_FAULT_MINOR;
169
170         data.address = address;
171         data.vma = vma;
172         drm_vm_ttm_fault(vma, &data);
173         switch (data.type) {
174         case VM_FAULT_OOM:
175                 return NOPAGE_OOM;
176         case VM_FAULT_SIGBUS:
177                 return NOPAGE_SIGBUS;
178         default:
179                 break;
180         }
181
182         return NOPAGE_REFAULT;
183 }
184
185 #endif
186
187 #ifdef DRM_ODD_MM_COMPAT
188
189 typedef struct p_mm_entry {
190         struct list_head head;
191         struct mm_struct *mm;
192         atomic_t refcount;
193         int locked;
194 } p_mm_entry_t;
195
196 typedef struct vma_entry {
197         struct list_head head;
198         struct vm_area_struct *vma;
199 } vma_entry_t;
200
201
202 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
203                                unsigned long address, 
204                                int *type)
205 {
206         drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
207         unsigned long page_offset;
208         struct page *page;
209         drm_ttm_t *ttm; 
210         drm_buffer_manager_t *bm;
211         drm_device_t *dev;
212
213         /*
214          * FIXME: Check can't map aperture flag.
215          */
216
217         if (type)
218                 *type = VM_FAULT_MINOR;
219
220         if (!map) 
221                 return NOPAGE_OOM;
222
223         if (address > vma->vm_end) 
224                 return NOPAGE_SIGBUS;
225
226         ttm = (drm_ttm_t *) map->offset;        
227         dev = ttm->dev;
228         mutex_lock(&dev->struct_mutex);
229         drm_fixup_ttm_caching(ttm);
230         BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
231
232         bm = &dev->bm;
233         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
234         page = ttm->pages[page_offset];
235
236         if (!page) {
237                 if (drm_alloc_memctl(PAGE_SIZE)) {
238                         page = NOPAGE_OOM;
239                         goto out;
240                 }
241                 page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
242                 if (!page) {
243                         drm_free_memctl(PAGE_SIZE);
244                         page = NOPAGE_OOM;
245                         goto out;
246                 }
247                 ++bm->cur_pages;
248                 SetPageLocked(page);
249         }
250
251         get_page(page);
252  out:
253         mutex_unlock(&dev->struct_mutex);
254         return page;
255 }
256
257
258
259
260 int drm_ttm_map_bound(struct vm_area_struct *vma)
261 {
262         drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
263         drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
264         int ret = 0;
265
266         if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
267                 unsigned long pfn = ttm->aper_offset + 
268                         (ttm->be->aperture_base >> PAGE_SHIFT);
269                 pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
270                 
271                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
272                                          vma->vm_end - vma->vm_start,
273                                          pgprot);
274         }
275         return ret;
276 }
277         
278
279 int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
280 {
281         p_mm_entry_t *entry, *n_entry;
282         vma_entry_t *v_entry;
283         drm_local_map_t *map = (drm_local_map_t *)
284                 vma->vm_private_data;
285         struct mm_struct *mm = vma->vm_mm;
286
287         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
288         if (!v_entry) {
289                 DRM_ERROR("Allocation of vma pointer entry failed\n");
290                 return -ENOMEM;
291         }
292         v_entry->vma = vma;
293         map->handle = (void *) v_entry;
294         list_add_tail(&v_entry->head, &ttm->vma_list);
295
296         list_for_each_entry(entry, &ttm->p_mm_list, head) {
297                 if (mm == entry->mm) {
298                         atomic_inc(&entry->refcount);
299                         return 0;
300                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
301         }
302
303         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
304         if (!n_entry) {
305                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
306                 return -ENOMEM;
307         }
308         INIT_LIST_HEAD(&n_entry->head);
309         n_entry->mm = mm;
310         n_entry->locked = 0;
311         atomic_set(&n_entry->refcount, 0);
312         list_add_tail(&n_entry->head, &entry->head);
313
314         return 0;
315 }
316
317 void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
318 {
319         p_mm_entry_t *entry, *n;
320         vma_entry_t *v_entry, *v_n;
321         int found = 0;
322         struct mm_struct *mm = vma->vm_mm;
323
324         list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
325                 if (v_entry->vma == vma) {
326                         found = 1;
327                         list_del(&v_entry->head);
328                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
329                         break;
330                 }
331         }
332         BUG_ON(!found);
333
334         list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
335                 if (mm == entry->mm) {
336                         if (atomic_add_negative(-1, &entry->refcount)) {
337                                 list_del(&entry->head);
338                                 BUG_ON(entry->locked);
339                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
340                         }
341                         return;
342                 }
343         }
344         BUG_ON(1);
345 }
346
347
348
349 int drm_ttm_lock_mm(drm_ttm_t * ttm)
350 {
351         p_mm_entry_t *entry;
352         int lock_ok = 1;
353         
354         list_for_each_entry(entry, &ttm->p_mm_list, head) {
355                 BUG_ON(entry->locked);
356                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
357                         lock_ok = 0;
358                         break;
359                 }
360                 entry->locked = 1;
361         }
362
363         if (lock_ok)
364                 return 0;
365
366         list_for_each_entry(entry, &ttm->p_mm_list, head) {
367                 if (!entry->locked) 
368                         break;
369                 up_write(&entry->mm->mmap_sem);
370                 entry->locked = 0;
371         }
372
373         /*
374          * Possible deadlock. Try again. Our callers should handle this
375          * and restart.
376          */
377
378         return -EAGAIN;
379 }
380
381 void drm_ttm_unlock_mm(drm_ttm_t * ttm)
382 {
383         p_mm_entry_t *entry;
384         
385         list_for_each_entry(entry, &ttm->p_mm_list, head) {
386                 BUG_ON(!entry->locked);
387                 up_write(&entry->mm->mmap_sem);
388                 entry->locked = 0;
389         }
390 }
391
392 int drm_ttm_remap_bound(drm_ttm_t *ttm) 
393 {
394         vma_entry_t *v_entry;
395         int ret = 0;
396         
397         list_for_each_entry(v_entry, &ttm->vma_list, head) {
398                 ret = drm_ttm_map_bound(v_entry->vma);
399                 if (ret)
400                         break;
401         }
402
403         drm_ttm_unlock_mm(ttm);
404         return ret;
405 }
406
407 void drm_ttm_finish_unmap(drm_ttm_t *ttm)
408 {
409         vma_entry_t *v_entry;
410         
411         if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
412                 return;
413
414         list_for_each_entry(v_entry, &ttm->vma_list, head) {
415                 v_entry->vma->vm_flags &= ~VM_PFNMAP; 
416         }
417         drm_ttm_unlock_mm(ttm);
418 }       
419
420 #endif
421