drm: fixup old kernel compat code
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31
32 /*
33  * These have bad performance in the AGP module for the indicated kernel versions.
34  */
35
36 int drm_map_page_into_agp(struct page *page)
37 {
38         int i;
39         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40         /* Caller's responsibility to call global_flush_tlb() for
41          * performance reasons */
42         return i;
43 }
44
45 int drm_unmap_page_from_agp(struct page *page)
46 {
47         int i;
48         i = change_page_attr(page, 1, PAGE_KERNEL);
49         /* Caller's responsibility to call global_flush_tlb() for
50          * performance reasons */
51         return i;
52 }
53 #endif 
54
55
56 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
57
58 /*
59  * The protection map was exported in 2.6.19
60  */
61
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63 {
64 #ifdef MODULE
65         static pgprot_t drm_protection_map[16] = {
66                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68         };
69
70         return drm_protection_map[vm_flags & 0x0F];
71 #else
72         extern pgprot_t protection_map[];
73         return protection_map[vm_flags & 0x0F];
74 #endif
75 };
76 #endif
77
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80
81 /*
82  * vm code for kernels below 2.6.15 in which version a major vm write
83  * occured. This implement a simple straightforward 
84  * version similar to what's going to be
85  * in kernel 2.6.19+
86  * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
87  * nopfn.
88  */ 
89
90 static struct {
91         spinlock_t lock;
92         struct page *dummy_page;
93         atomic_t present;
94 } drm_np_retry = 
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
96
97
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
99                                     struct fault_data *data);
100
101
102 struct page * get_nopage_retry(void)
103 {
104         if (atomic_read(&drm_np_retry.present) == 0) {
105                 struct page *page = alloc_page(GFP_KERNEL);
106                 if (!page)
107                         return NOPAGE_OOM;
108                 spin_lock(&drm_np_retry.lock);
109                 drm_np_retry.dummy_page = page;
110                 atomic_set(&drm_np_retry.present,1);
111                 spin_unlock(&drm_np_retry.lock);
112         }
113         get_page(drm_np_retry.dummy_page);
114         return drm_np_retry.dummy_page;
115 }
116
117 void free_nopage_retry(void)
118 {
119         if (atomic_read(&drm_np_retry.present) == 1) {
120                 spin_lock(&drm_np_retry.lock);
121                 __free_page(drm_np_retry.dummy_page);
122                 drm_np_retry.dummy_page = NULL;
123                 atomic_set(&drm_np_retry.present, 0);
124                 spin_unlock(&drm_np_retry.lock);
125         }
126 }
127
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129                                unsigned long address, 
130                                int *type)
131 {
132         struct fault_data data;
133
134         if (type)
135                 *type = VM_FAULT_MINOR;
136
137         data.address = address;
138         data.vma = vma;
139         drm_bo_vm_fault(vma, &data);
140         switch (data.type) {
141         case VM_FAULT_OOM:
142                 return NOPAGE_OOM;
143         case VM_FAULT_SIGBUS:
144                 return NOPAGE_SIGBUS;
145         default:
146                 break;
147         }
148
149         return NOPAGE_REFAULT;
150 }
151
152 #endif
153
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155   ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156    (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
157
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
159                             unsigned long addr)
160 {
161         struct mm_struct *mm = vma->vm_mm;
162         int ret = 1;
163         pte_t *pte;
164         pmd_t *pmd;
165         pud_t *pud;
166         pgd_t *pgd;
167
168         spin_lock(&mm->page_table_lock);
169         pgd = pgd_offset(mm, addr);
170         if (pgd_none(*pgd))
171                 goto unlock;
172         pud = pud_offset(pgd, addr);
173         if (pud_none(*pud))
174                 goto unlock;
175         pmd = pmd_offset(pud, addr);
176         if (pmd_none(*pmd))
177                 goto unlock;
178         pte = pte_offset_map(pmd, addr);
179         if (!pte)
180                 goto unlock;
181         ret = pte_none(*pte);
182         pte_unmap(pte);
183  unlock:
184         spin_unlock(&mm->page_table_lock);
185         return ret;
186 }
187
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
189                   unsigned long pfn)
190 {
191         int ret;
192         if (!drm_pte_is_clear(vma, addr))
193                 return -EBUSY;
194
195         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
196         return ret;
197 }
198
199
200 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
201                                     struct fault_data *data)
202 {
203         unsigned long address = data->address;
204         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
205         unsigned long page_offset;
206         struct page *page = NULL;
207         struct drm_ttm *ttm; 
208         struct drm_device *dev;
209         unsigned long pfn;
210         int err;
211         unsigned long bus_base;
212         unsigned long bus_offset;
213         unsigned long bus_size;
214         
215
216         mutex_lock(&bo->mutex);
217
218         err = drm_bo_wait(bo, 0, 1, 0);
219         if (err) {
220                 data->type = (err == -EAGAIN) ? 
221                         VM_FAULT_MINOR : VM_FAULT_SIGBUS;
222                 goto out_unlock;
223         }
224         
225         
226         /*
227          * If buffer happens to be in a non-mappable location,
228          * move it to a mappable.
229          */
230
231         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
232                 unsigned long _end = jiffies + 3*DRM_HZ;
233                 uint32_t new_mask = bo->mem.mask |
234                         DRM_BO_FLAG_MAPPABLE |
235                         DRM_BO_FLAG_FORCE_MAPPABLE;
236
237                 do {
238                         err = drm_bo_move_buffer(bo, new_mask, 0, 0);
239                 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
240
241                 if (err) {
242                         DRM_ERROR("Timeout moving buffer to mappable location.\n");
243                         data->type = VM_FAULT_SIGBUS;
244                         goto out_unlock;
245                 }
246         }
247
248         if (address > vma->vm_end) {
249                 data->type = VM_FAULT_SIGBUS;
250                 goto out_unlock;
251         }
252
253         dev = bo->dev;
254         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, 
255                                 &bus_size);
256
257         if (err) {
258                 data->type = VM_FAULT_SIGBUS;
259                 goto out_unlock;
260         }
261
262         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
263
264         if (bus_size) {
265                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
266
267                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
268                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
269         } else {
270                 ttm = bo->ttm;
271
272                 drm_ttm_fixup_caching(ttm);
273                 page = drm_ttm_get_page(ttm, page_offset);
274                 if (!page) {
275                         data->type = VM_FAULT_OOM;
276                         goto out_unlock;
277                 }
278                 pfn = page_to_pfn(page);
279                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
280                         vm_get_page_prot(vma->vm_flags) :
281                         drm_io_prot(_DRM_TTM, vma);
282         }
283
284         err = vm_insert_pfn(vma, address, pfn);
285
286         if (!err || err == -EBUSY)
287                 data->type = VM_FAULT_MINOR; 
288         else
289                 data->type = VM_FAULT_OOM;
290 out_unlock:
291         mutex_unlock(&bo->mutex);
292         return NULL;
293 }
294
295 #endif
296
297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
298   !defined(DRM_FULL_MM_COMPAT)
299
300 /**
301  */
302
303 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
304                            unsigned long address)
305 {
306         struct fault_data data;
307         data.address = address;
308
309         (void) drm_bo_vm_fault(vma, &data);
310         if (data.type == VM_FAULT_OOM)
311                 return NOPFN_OOM;
312         else if (data.type == VM_FAULT_SIGBUS)
313                 return NOPFN_SIGBUS;
314
315         /*
316          * pfn already set.
317          */
318
319         return 0;
320 }
321 #endif
322
323
324 #ifdef DRM_ODD_MM_COMPAT
325
326 /*
327  * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
328  * workaround for a single BUG statement in do_no_page in these versions. The
329  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
330  * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
331  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
332  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
333  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
334  * phew.
335  */
336
337 typedef struct p_mm_entry {
338         struct list_head head;
339         struct mm_struct *mm;
340         atomic_t refcount;
341         int locked;
342 } p_mm_entry_t;
343
344 typedef struct vma_entry {
345         struct list_head head;
346         struct vm_area_struct *vma;
347 } vma_entry_t;
348
349
350 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
351                                unsigned long address, 
352                                int *type)
353 {
354         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
355         unsigned long page_offset;
356         struct page *page;
357         struct drm_ttm *ttm; 
358         struct drm_device *dev;
359
360         mutex_lock(&bo->mutex);
361
362         if (type)
363                 *type = VM_FAULT_MINOR;
364
365         if (address > vma->vm_end) {
366                 page = NOPAGE_SIGBUS;
367                 goto out_unlock;
368         }
369         
370         dev = bo->dev;
371
372         if (drm_mem_reg_is_pci(dev, &bo->mem)) {
373                 DRM_ERROR("Invalid compat nopage.\n");
374                 page = NOPAGE_SIGBUS;
375                 goto out_unlock;
376         }
377
378         ttm = bo->ttm;
379         drm_ttm_fixup_caching(ttm);
380         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
381         page = drm_ttm_get_page(ttm, page_offset);
382         if (!page) {
383                 page = NOPAGE_OOM;
384                 goto out_unlock;
385         }
386
387         get_page(page);
388 out_unlock:
389         mutex_unlock(&bo->mutex);
390         return page;
391 }
392
393
394
395
396 int drm_bo_map_bound(struct vm_area_struct *vma)
397 {
398         struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
399         int ret = 0;
400         unsigned long bus_base;
401         unsigned long bus_offset;
402         unsigned long bus_size;
403         
404         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, 
405                                 &bus_offset, &bus_size);
406         BUG_ON(ret);
407
408         if (bus_size) {
409                 struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
410                 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
411                 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
412                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
413                                          vma->vm_end - vma->vm_start,
414                                          pgprot);
415         }
416
417         return ret;
418 }
419         
420
421 int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
422 {
423         p_mm_entry_t *entry, *n_entry;
424         vma_entry_t *v_entry;
425         struct mm_struct *mm = vma->vm_mm;
426
427         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
428         if (!v_entry) {
429                 DRM_ERROR("Allocation of vma pointer entry failed\n");
430                 return -ENOMEM;
431         }
432         v_entry->vma = vma;
433
434         list_add_tail(&v_entry->head, &bo->vma_list);
435
436         list_for_each_entry(entry, &bo->p_mm_list, head) {
437                 if (mm == entry->mm) {
438                         atomic_inc(&entry->refcount);
439                         return 0;
440                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
441         }
442
443         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
444         if (!n_entry) {
445                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
446                 return -ENOMEM;
447         }
448         INIT_LIST_HEAD(&n_entry->head);
449         n_entry->mm = mm;
450         n_entry->locked = 0;
451         atomic_set(&n_entry->refcount, 0);
452         list_add_tail(&n_entry->head, &entry->head);
453
454         return 0;
455 }
456
457 void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
458 {
459         p_mm_entry_t *entry, *n;
460         vma_entry_t *v_entry, *v_n;
461         int found = 0;
462         struct mm_struct *mm = vma->vm_mm;
463
464         list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
465                 if (v_entry->vma == vma) {
466                         found = 1;
467                         list_del(&v_entry->head);
468                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
469                         break;
470                 }
471         }
472         BUG_ON(!found);
473
474         list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
475                 if (mm == entry->mm) {
476                         if (atomic_add_negative(-1, &entry->refcount)) {
477                                 list_del(&entry->head);
478                                 BUG_ON(entry->locked);
479                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
480                         }
481                         return;
482                 }
483         }
484         BUG_ON(1);
485 }
486
487
488
489 int drm_bo_lock_kmm(struct drm_buffer_object * bo)
490 {
491         p_mm_entry_t *entry;
492         int lock_ok = 1;
493         
494         list_for_each_entry(entry, &bo->p_mm_list, head) {
495                 BUG_ON(entry->locked);
496                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
497                         lock_ok = 0;
498                         break;
499                 }
500                 entry->locked = 1;
501         }
502
503         if (lock_ok)
504                 return 0;
505
506         list_for_each_entry(entry, &bo->p_mm_list, head) {
507                 if (!entry->locked) 
508                         break;
509                 up_write(&entry->mm->mmap_sem);
510                 entry->locked = 0;
511         }
512
513         /*
514          * Possible deadlock. Try again. Our callers should handle this
515          * and restart.
516          */
517
518         return -EAGAIN;
519 }
520
521 void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
522 {
523         p_mm_entry_t *entry;
524         
525         list_for_each_entry(entry, &bo->p_mm_list, head) {
526                 BUG_ON(!entry->locked);
527                 up_write(&entry->mm->mmap_sem);
528                 entry->locked = 0;
529         }
530 }
531
532 int drm_bo_remap_bound(struct drm_buffer_object *bo) 
533 {
534         vma_entry_t *v_entry;
535         int ret = 0;
536
537         if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
538                 list_for_each_entry(v_entry, &bo->vma_list, head) {
539                         ret = drm_bo_map_bound(v_entry->vma);
540                         if (ret)
541                                 break;
542                 }
543         }
544
545         return ret;
546 }
547
548 void drm_bo_finish_unmap(struct drm_buffer_object *bo)
549 {
550         vma_entry_t *v_entry;
551
552         list_for_each_entry(v_entry, &bo->vma_list, head) {
553                 v_entry->vma->vm_flags &= ~VM_PFNMAP; 
554         }
555 }       
556
557 #endif
558
559 #ifdef DRM_IDR_COMPAT_FN
560 /* only called when idp->lock is held */
561 static void __free_layer(struct idr *idp, struct idr_layer *p)
562 {
563         p->ary[0] = idp->id_free;
564         idp->id_free = p;
565         idp->id_free_cnt++;
566 }
567
568 static void free_layer(struct idr *idp, struct idr_layer *p)
569 {
570         unsigned long flags;
571
572         /*
573          * Depends on the return element being zeroed.
574          */
575         spin_lock_irqsave(&idp->lock, flags);
576         __free_layer(idp, p);
577         spin_unlock_irqrestore(&idp->lock, flags);
578 }
579
580 /**
581  * idr_for_each - iterate through all stored pointers
582  * @idp: idr handle
583  * @fn: function to be called for each pointer
584  * @data: data passed back to callback function
585  *
586  * Iterate over the pointers registered with the given idr.  The
587  * callback function will be called for each pointer currently
588  * registered, passing the id, the pointer and the data pointer passed
589  * to this function.  It is not safe to modify the idr tree while in
590  * the callback, so functions such as idr_get_new and idr_remove are
591  * not allowed.
592  *
593  * We check the return of @fn each time. If it returns anything other
594  * than 0, we break out and return that value.
595  *
596 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
597  */
598 int idr_for_each(struct idr *idp,
599                  int (*fn)(int id, void *p, void *data), void *data)
600 {
601         int n, id, max, error = 0;
602         struct idr_layer *p;
603         struct idr_layer *pa[MAX_LEVEL];
604         struct idr_layer **paa = &pa[0];
605
606         n = idp->layers * IDR_BITS;
607         p = idp->top;
608         max = 1 << n;
609
610         id = 0;
611         while (id < max) {
612                 while (n > 0 && p) {
613                         n -= IDR_BITS;
614                         *paa++ = p;
615                         p = p->ary[(id >> n) & IDR_MASK];
616                 }
617
618                 if (p) {
619                         error = fn(id, (void *)p, data);
620                         if (error)
621                                 break;
622                 }
623
624                 id += 1 << n;
625                 while (n < fls(id)) {
626                         n += IDR_BITS;
627                         p = *--paa;
628                 }
629         }
630
631         return error;
632 }
633 EXPORT_SYMBOL(idr_for_each);
634
635 /**
636  * idr_remove_all - remove all ids from the given idr tree
637  * @idp: idr handle
638  *
639  * idr_destroy() only frees up unused, cached idp_layers, but this
640  * function will remove all id mappings and leave all idp_layers
641  * unused.
642  *
643  * A typical clean-up sequence for objects stored in an idr tree, will
644  * use idr_for_each() to free all objects, if necessay, then
645  * idr_remove_all() to remove all ids, and idr_destroy() to free
646  * up the cached idr_layers.
647  */
648 void idr_remove_all(struct idr *idp)
649 {
650        int n, id, max, error = 0;
651        struct idr_layer *p;
652        struct idr_layer *pa[MAX_LEVEL];
653        struct idr_layer **paa = &pa[0];
654
655        n = idp->layers * IDR_BITS;
656        p = idp->top;
657        max = 1 << n;
658
659        id = 0;
660        while (id < max && !error) {
661                while (n > IDR_BITS && p) {
662                        n -= IDR_BITS;
663                        *paa++ = p;
664                        p = p->ary[(id >> n) & IDR_MASK];
665                }
666
667                id += 1 << n;
668                while (n < fls(id)) {
669                        if (p) {
670                                memset(p, 0, sizeof *p);
671                                free_layer(idp, p);
672                        }
673                        n += IDR_BITS;
674                        p = *--paa;
675                }
676        }
677        idp->top = NULL;
678        idp->layers = 0;
679 }
680 EXPORT_SYMBOL(idr_remove_all);
681 #endif