drm: fix radeon setparam alignment issues on 32/64-bit
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31
32 /*
33  * These have bad performance in the AGP module for the indicated kernel versions.
34  */
35
36 int drm_map_page_into_agp(struct page *page)
37 {
38         int i;
39         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40         /* Caller's responsibility to call global_flush_tlb() for
41          * performance reasons */
42         return i;
43 }
44
45 int drm_unmap_page_from_agp(struct page *page)
46 {
47         int i;
48         i = change_page_attr(page, 1, PAGE_KERNEL);
49         /* Caller's responsibility to call global_flush_tlb() for
50          * performance reasons */
51         return i;
52 }
53 #endif 
54
55
56 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
57
58 /*
59  * The protection map was exported in 2.6.19
60  */
61
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63 {
64 #ifdef MODULE
65         static pgprot_t drm_protection_map[16] = {
66                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68         };
69
70         return drm_protection_map[vm_flags & 0x0F];
71 #else
72         extern pgprot_t protection_map[];
73         return protection_map[vm_flags & 0x0F];
74 #endif
75 };
76 #endif
77
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80
81 /*
82  * vm code for kernels below 2.6.15 in which version a major vm write
83  * occured. This implement a simple straightforward 
84  * version similar to what's going to be
85  * in kernel 2.6.19+
86  * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
87  * nopfn.
88  */ 
89
90 static struct {
91         spinlock_t lock;
92         struct page *dummy_page;
93         atomic_t present;
94 } drm_np_retry = 
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
96
97
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
99                                     struct fault_data *data);
100
101
102 struct page * get_nopage_retry(void)
103 {
104         if (atomic_read(&drm_np_retry.present) == 0) {
105                 struct page *page = alloc_page(GFP_KERNEL);
106                 if (!page)
107                         return NOPAGE_OOM;
108                 spin_lock(&drm_np_retry.lock);
109                 drm_np_retry.dummy_page = page;
110                 atomic_set(&drm_np_retry.present,1);
111                 spin_unlock(&drm_np_retry.lock);
112         }
113         get_page(drm_np_retry.dummy_page);
114         return drm_np_retry.dummy_page;
115 }
116
117 void free_nopage_retry(void)
118 {
119         if (atomic_read(&drm_np_retry.present) == 1) {
120                 spin_lock(&drm_np_retry.lock);
121                 __free_page(drm_np_retry.dummy_page);
122                 drm_np_retry.dummy_page = NULL;
123                 atomic_set(&drm_np_retry.present, 0);
124                 spin_unlock(&drm_np_retry.lock);
125         }
126 }
127
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129                                unsigned long address, 
130                                int *type)
131 {
132         struct fault_data data;
133
134         if (type)
135                 *type = VM_FAULT_MINOR;
136
137         data.address = address;
138         data.vma = vma;
139         drm_bo_vm_fault(vma, &data);
140         switch (data.type) {
141         case VM_FAULT_OOM:
142                 return NOPAGE_OOM;
143         case VM_FAULT_SIGBUS:
144                 return NOPAGE_SIGBUS;
145         default:
146                 break;
147         }
148
149         return NOPAGE_REFAULT;
150 }
151
152 #endif
153
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155   ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156    (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
157
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
159                             unsigned long addr)
160 {
161         struct mm_struct *mm = vma->vm_mm;
162         int ret = 1;
163         pte_t *pte;
164         pmd_t *pmd;
165         pud_t *pud;
166         pgd_t *pgd;
167
168         spin_lock(&mm->page_table_lock);
169         pgd = pgd_offset(mm, addr);
170         if (pgd_none(*pgd))
171                 goto unlock;
172         pud = pud_offset(pgd, addr);
173         if (pud_none(*pud))
174                 goto unlock;
175         pmd = pmd_offset(pud, addr);
176         if (pmd_none(*pmd))
177                 goto unlock;
178         pte = pte_offset_map(pmd, addr);
179         if (!pte)
180                 goto unlock;
181         ret = pte_none(*pte);
182         pte_unmap(pte);
183  unlock:
184         spin_unlock(&mm->page_table_lock);
185         return ret;
186 }
187
188 #if 0
189 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
190                   unsigned long pfn)
191 {
192         int ret;
193         if (!drm_pte_is_clear(vma, addr))
194                 return -EBUSY;
195
196         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
197         return ret;
198 }
199 #endif
200
201 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
202                                     struct fault_data *data)
203 {
204         unsigned long address = data->address;
205         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
206         unsigned long page_offset;
207         struct page *page = NULL;
208         drm_ttm_t *ttm; 
209         drm_device_t *dev;
210         unsigned long pfn;
211         int err;
212         unsigned long bus_base;
213         unsigned long bus_offset;
214         unsigned long bus_size;
215         
216
217         mutex_lock(&bo->mutex);
218
219         err = drm_bo_wait(bo, 0, 1, 0);
220         if (err) {
221                 data->type = (err == -EAGAIN) ? 
222                         VM_FAULT_MINOR : VM_FAULT_SIGBUS;
223                 goto out_unlock;
224         }
225         
226         
227         /*
228          * If buffer happens to be in a non-mappable location,
229          * move it to a mappable.
230          */
231
232         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
233                 unsigned long _end = jiffies + 3*DRM_HZ;
234                 uint32_t new_mask = bo->mem.mask |
235                         DRM_BO_FLAG_MAPPABLE |
236                         DRM_BO_FLAG_FORCE_MAPPABLE;
237
238                 do {
239                         err = drm_bo_move_buffer(bo, new_mask, 0, 0);
240                 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
241
242                 if (err) {
243                         DRM_ERROR("Timeout moving buffer to mappable location.\n");
244                         data->type = VM_FAULT_SIGBUS;
245                         goto out_unlock;
246                 }
247         }
248
249         if (address > vma->vm_end) {
250                 data->type = VM_FAULT_SIGBUS;
251                 goto out_unlock;
252         }
253
254         dev = bo->dev;
255         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, 
256                                 &bus_size);
257
258         if (err) {
259                 data->type = VM_FAULT_SIGBUS;
260                 goto out_unlock;
261         }
262
263         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
264
265         if (bus_size) {
266                 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
267
268                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
269                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
270         } else {
271                 ttm = bo->ttm;
272
273                 drm_ttm_fixup_caching(ttm);
274                 page = drm_ttm_get_page(ttm, page_offset);
275                 if (!page) {
276                         data->type = VM_FAULT_OOM;
277                         goto out_unlock;
278                 }
279                 pfn = page_to_pfn(page);
280                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
281                         vm_get_page_prot(vma->vm_flags) :
282                         drm_io_prot(_DRM_TTM, vma);
283         }
284
285         err = vm_insert_pfn(vma, address, pfn);
286
287         if (!err || err == -EBUSY)
288                 data->type = VM_FAULT_MINOR; 
289         else
290                 data->type = VM_FAULT_OOM;
291 out_unlock:
292         mutex_unlock(&bo->mutex);
293         return NULL;
294 }
295
296 #endif
297
298 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
299   !defined(DRM_FULL_MM_COMPAT)
300
301 /**
302  */
303
304 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
305                            unsigned long address)
306 {
307         struct fault_data data;
308         data.address = address;
309
310         (void) drm_bo_vm_fault(vma, &data);
311         if (data.type == VM_FAULT_OOM)
312                 return NOPFN_OOM;
313         else if (data.type == VM_FAULT_SIGBUS)
314                 return NOPFN_SIGBUS;
315
316         /*
317          * pfn already set.
318          */
319
320         return 0;
321 }
322 #endif
323
324
325 #ifdef DRM_ODD_MM_COMPAT
326
327 /*
328  * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
329  * workaround for a single BUG statement in do_no_page in these versions. The
330  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
331  * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
332  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
333  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
334  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
335  * phew.
336  */
337
338 typedef struct p_mm_entry {
339         struct list_head head;
340         struct mm_struct *mm;
341         atomic_t refcount;
342         int locked;
343 } p_mm_entry_t;
344
345 typedef struct vma_entry {
346         struct list_head head;
347         struct vm_area_struct *vma;
348 } vma_entry_t;
349
350
351 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
352                                unsigned long address, 
353                                int *type)
354 {
355         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
356         unsigned long page_offset;
357         struct page *page;
358         drm_ttm_t *ttm; 
359         drm_device_t *dev;
360
361         mutex_lock(&bo->mutex);
362
363         if (type)
364                 *type = VM_FAULT_MINOR;
365
366         if (address > vma->vm_end) {
367                 page = NOPAGE_SIGBUS;
368                 goto out_unlock;
369         }
370         
371         dev = bo->dev;
372
373         if (drm_mem_reg_is_pci(dev, &bo->mem)) {
374                 DRM_ERROR("Invalid compat nopage.\n");
375                 page = NOPAGE_SIGBUS;
376                 goto out_unlock;
377         }
378
379         ttm = bo->ttm;
380         drm_ttm_fixup_caching(ttm);
381         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
382         page = drm_ttm_get_page(ttm, page_offset);
383         if (!page) {
384                 page = NOPAGE_OOM;
385                 goto out_unlock;
386         }
387
388         get_page(page);
389 out_unlock:
390         mutex_unlock(&bo->mutex);
391         return page;
392 }
393
394
395
396
397 int drm_bo_map_bound(struct vm_area_struct *vma)
398 {
399         drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
400         int ret = 0;
401         unsigned long bus_base;
402         unsigned long bus_offset;
403         unsigned long bus_size;
404         
405         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, 
406                                 &bus_offset, &bus_size);
407         BUG_ON(ret);
408
409         if (bus_size) {
410                 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
411                 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
412                 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
413                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
414                                          vma->vm_end - vma->vm_start,
415                                          pgprot);
416         }
417
418         return ret;
419 }
420         
421
422 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
423 {
424         p_mm_entry_t *entry, *n_entry;
425         vma_entry_t *v_entry;
426         struct mm_struct *mm = vma->vm_mm;
427
428         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
429         if (!v_entry) {
430                 DRM_ERROR("Allocation of vma pointer entry failed\n");
431                 return -ENOMEM;
432         }
433         v_entry->vma = vma;
434
435         list_add_tail(&v_entry->head, &bo->vma_list);
436
437         list_for_each_entry(entry, &bo->p_mm_list, head) {
438                 if (mm == entry->mm) {
439                         atomic_inc(&entry->refcount);
440                         return 0;
441                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
442         }
443
444         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
445         if (!n_entry) {
446                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
447                 return -ENOMEM;
448         }
449         INIT_LIST_HEAD(&n_entry->head);
450         n_entry->mm = mm;
451         n_entry->locked = 0;
452         atomic_set(&n_entry->refcount, 0);
453         list_add_tail(&n_entry->head, &entry->head);
454
455         return 0;
456 }
457
458 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
459 {
460         p_mm_entry_t *entry, *n;
461         vma_entry_t *v_entry, *v_n;
462         int found = 0;
463         struct mm_struct *mm = vma->vm_mm;
464
465         list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
466                 if (v_entry->vma == vma) {
467                         found = 1;
468                         list_del(&v_entry->head);
469                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
470                         break;
471                 }
472         }
473         BUG_ON(!found);
474
475         list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
476                 if (mm == entry->mm) {
477                         if (atomic_add_negative(-1, &entry->refcount)) {
478                                 list_del(&entry->head);
479                                 BUG_ON(entry->locked);
480                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
481                         }
482                         return;
483                 }
484         }
485         BUG_ON(1);
486 }
487
488
489
490 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
491 {
492         p_mm_entry_t *entry;
493         int lock_ok = 1;
494         
495         list_for_each_entry(entry, &bo->p_mm_list, head) {
496                 BUG_ON(entry->locked);
497                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
498                         lock_ok = 0;
499                         break;
500                 }
501                 entry->locked = 1;
502         }
503
504         if (lock_ok)
505                 return 0;
506
507         list_for_each_entry(entry, &bo->p_mm_list, head) {
508                 if (!entry->locked) 
509                         break;
510                 up_write(&entry->mm->mmap_sem);
511                 entry->locked = 0;
512         }
513
514         /*
515          * Possible deadlock. Try again. Our callers should handle this
516          * and restart.
517          */
518
519         return -EAGAIN;
520 }
521
522 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
523 {
524         p_mm_entry_t *entry;
525         
526         list_for_each_entry(entry, &bo->p_mm_list, head) {
527                 BUG_ON(!entry->locked);
528                 up_write(&entry->mm->mmap_sem);
529                 entry->locked = 0;
530         }
531 }
532
533 int drm_bo_remap_bound(drm_buffer_object_t *bo) 
534 {
535         vma_entry_t *v_entry;
536         int ret = 0;
537
538         if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
539                 list_for_each_entry(v_entry, &bo->vma_list, head) {
540                         ret = drm_bo_map_bound(v_entry->vma);
541                         if (ret)
542                                 break;
543                 }
544         }
545
546         return ret;
547 }
548
549 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
550 {
551         vma_entry_t *v_entry;
552
553         list_for_each_entry(v_entry, &bo->vma_list, head) {
554                 v_entry->vma->vm_flags &= ~VM_PFNMAP; 
555         }
556 }       
557
558 #endif
559