Set the drm bus map type for each buffer object memory type.
[platform/upstream/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31
32 /*
33  * These have bad performance in the AGP module for the indicated kernel versions.
34  */
35
36 int drm_map_page_into_agp(struct page *page)
37 {
38         int i;
39         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40         /* Caller's responsibility to call global_flush_tlb() for
41          * performance reasons */
42         return i;
43 }
44
45 int drm_unmap_page_from_agp(struct page *page)
46 {
47         int i;
48         i = change_page_attr(page, 1, PAGE_KERNEL);
49         /* Caller's responsibility to call global_flush_tlb() for
50          * performance reasons */
51         return i;
52 }
53 #endif 
54
55
56 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
57
58 /*
59  * The protection map was exported in 2.6.19
60  */
61
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63 {
64 #ifdef MODULE
65         static pgprot_t drm_protection_map[16] = {
66                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68         };
69
70         return drm_protection_map[vm_flags & 0x0F];
71 #else
72         extern pgprot_t protection_map[];
73         return protection_map[vm_flags & 0x0F];
74 #endif
75 };
76 #endif
77
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80
81 /*
82  * vm code for kernels below 2.6.15 in which version a major vm write
83  * occured. This implement a simple straightforward 
84  * version similar to what's going to be
85  * in kernel 2.6.19+
86  * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
87  * nopfn.
88  */ 
89
90 static struct {
91         spinlock_t lock;
92         struct page *dummy_page;
93         atomic_t present;
94 } drm_np_retry = 
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
96
97 struct page * get_nopage_retry(void)
98 {
99         if (atomic_read(&drm_np_retry.present) == 0) {
100                 struct page *page = alloc_page(GFP_KERNEL);
101                 if (!page)
102                         return NOPAGE_OOM;
103                 spin_lock(&drm_np_retry.lock);
104                 drm_np_retry.dummy_page = page;
105                 atomic_set(&drm_np_retry.present,1);
106                 spin_unlock(&drm_np_retry.lock);
107         }
108         get_page(drm_np_retry.dummy_page);
109         return drm_np_retry.dummy_page;
110 }
111
112 void free_nopage_retry(void)
113 {
114         if (atomic_read(&drm_np_retry.present) == 1) {
115                 spin_lock(&drm_np_retry.lock);
116                 __free_page(drm_np_retry.dummy_page);
117                 drm_np_retry.dummy_page = NULL;
118                 atomic_set(&drm_np_retry.present, 0);
119                 spin_unlock(&drm_np_retry.lock);
120         }
121 }
122
123 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
124                                unsigned long address, 
125                                int *type)
126 {
127         struct fault_data data;
128
129         if (type)
130                 *type = VM_FAULT_MINOR;
131
132         data.address = address;
133         data.vma = vma;
134         drm_bo_vm_fault(vma, &data);
135         switch (data.type) {
136         case VM_FAULT_OOM:
137                 return NOPAGE_OOM;
138         case VM_FAULT_SIGBUS:
139                 return NOPAGE_SIGBUS;
140         default:
141                 break;
142         }
143
144         return NOPAGE_REFAULT;
145 }
146
147 #endif
148
149 #if !defined(DRM_FULL_MM_COMPAT) && \
150   ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
151    (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
152
153 static int drm_pte_is_clear(struct vm_area_struct *vma,
154                             unsigned long addr)
155 {
156         struct mm_struct *mm = vma->vm_mm;
157         int ret = 1;
158         pte_t *pte;
159         pmd_t *pmd;
160         pud_t *pud;
161         pgd_t *pgd;
162
163         spin_lock(&mm->page_table_lock);
164         pgd = pgd_offset(mm, addr);
165         if (pgd_none(*pgd))
166                 goto unlock;
167         pud = pud_offset(pgd, addr);
168         if (pud_none(*pud))
169                 goto unlock;
170         pmd = pmd_offset(pud, addr);
171         if (pmd_none(*pmd))
172                 goto unlock;
173         pte = pte_offset_map(pmd, addr);
174         if (!pte)
175                 goto unlock;
176         ret = pte_none(*pte);
177         pte_unmap(pte);
178  unlock:
179         spin_unlock(&mm->page_table_lock);
180         return ret;
181 }
182
183 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
184                   unsigned long pfn)
185 {
186         int ret;
187         if (!drm_pte_is_clear(vma, addr))
188                 return -EBUSY;
189
190         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
191         return ret;
192 }
193 #endif
194
195 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
196
197 /**
198  * While waiting for the fault() handler to appear in
199  * we accomplish approximately
200  * the same wrapping it with nopfn.
201  */
202
203 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
204                            unsigned long address)
205 {
206         struct fault_data data;
207         data.address = address;
208
209         (void) drm_bo_vm_fault(vma, &data);
210         if (data.type == VM_FAULT_OOM)
211                 return NOPFN_OOM;
212         else if (data.type == VM_FAULT_SIGBUS)
213                 return NOPFN_SIGBUS;
214
215         /*
216          * pfn already set.
217          */
218
219         return 0;
220 }
221 #endif
222
223
224 #ifdef DRM_ODD_MM_COMPAT
225
226 /*
227  * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
228  * workaround for a single BUG statement in do_no_page in these versions. The
229  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
230  * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to 
231  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
232  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
233  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
234  * phew.
235  */
236
237 typedef struct p_mm_entry {
238         struct list_head head;
239         struct mm_struct *mm;
240         atomic_t refcount;
241         int locked;
242 } p_mm_entry_t;
243
244 typedef struct vma_entry {
245         struct list_head head;
246         struct vm_area_struct *vma;
247 } vma_entry_t;
248
249
250 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
251                                unsigned long address, 
252                                int *type)
253 {
254         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
255         unsigned long page_offset;
256         struct page *page;
257         drm_ttm_t *ttm; 
258         drm_device_t *dev;
259
260         mutex_lock(&bo->mutex);
261
262         if (type)
263                 *type = VM_FAULT_MINOR;
264
265         if (address > vma->vm_end) {
266                 page = NOPAGE_SIGBUS;
267                 goto out_unlock;
268         }
269         
270         dev = bo->dev;
271
272         if (drm_mem_reg_is_pci(dev, &bo->mem)) {
273                 DRM_ERROR("Invalid compat nopage.\n");
274                 page = NOPAGE_SIGBUS;
275                 goto out_unlock;
276         }
277
278         ttm = bo->ttm;
279         drm_ttm_fixup_caching(ttm);
280         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
281         page = drm_ttm_get_page(ttm, page_offset);
282         if (!page) {
283                 page = NOPAGE_OOM;
284                 goto out_unlock;
285         }
286
287         get_page(page);
288 out_unlock:
289         mutex_unlock(&bo->mutex);
290         return page;
291 }
292
293
294
295
296 int drm_bo_map_bound(struct vm_area_struct *vma)
297 {
298         drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
299         int ret = 0;
300         unsigned long bus_base;
301         unsigned long bus_offset;
302         unsigned long bus_size;
303         
304         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, 
305                                 &bus_offset, &bus_size);
306         BUG_ON(ret);
307
308         if (bus_size) {
309                 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
310                 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
311                 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
312                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
313                                          vma->vm_end - vma->vm_start,
314                                          pgprot);
315         }
316
317         return ret;
318 }
319         
320
321 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
322 {
323         p_mm_entry_t *entry, *n_entry;
324         vma_entry_t *v_entry;
325         struct mm_struct *mm = vma->vm_mm;
326
327         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
328         if (!v_entry) {
329                 DRM_ERROR("Allocation of vma pointer entry failed\n");
330                 return -ENOMEM;
331         }
332         v_entry->vma = vma;
333
334         list_add_tail(&v_entry->head, &bo->vma_list);
335
336         list_for_each_entry(entry, &bo->p_mm_list, head) {
337                 if (mm == entry->mm) {
338                         atomic_inc(&entry->refcount);
339                         return 0;
340                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
341         }
342
343         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
344         if (!n_entry) {
345                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
346                 return -ENOMEM;
347         }
348         INIT_LIST_HEAD(&n_entry->head);
349         n_entry->mm = mm;
350         n_entry->locked = 0;
351         atomic_set(&n_entry->refcount, 0);
352         list_add_tail(&n_entry->head, &entry->head);
353
354         return 0;
355 }
356
357 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
358 {
359         p_mm_entry_t *entry, *n;
360         vma_entry_t *v_entry, *v_n;
361         int found = 0;
362         struct mm_struct *mm = vma->vm_mm;
363
364         list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
365                 if (v_entry->vma == vma) {
366                         found = 1;
367                         list_del(&v_entry->head);
368                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
369                         break;
370                 }
371         }
372         BUG_ON(!found);
373
374         list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
375                 if (mm == entry->mm) {
376                         if (atomic_add_negative(-1, &entry->refcount)) {
377                                 list_del(&entry->head);
378                                 BUG_ON(entry->locked);
379                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
380                         }
381                         return;
382                 }
383         }
384         BUG_ON(1);
385 }
386
387
388
389 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
390 {
391         p_mm_entry_t *entry;
392         int lock_ok = 1;
393         
394         list_for_each_entry(entry, &bo->p_mm_list, head) {
395                 BUG_ON(entry->locked);
396                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
397                         lock_ok = 0;
398                         break;
399                 }
400                 entry->locked = 1;
401         }
402
403         if (lock_ok)
404                 return 0;
405
406         list_for_each_entry(entry, &bo->p_mm_list, head) {
407                 if (!entry->locked) 
408                         break;
409                 up_write(&entry->mm->mmap_sem);
410                 entry->locked = 0;
411         }
412
413         /*
414          * Possible deadlock. Try again. Our callers should handle this
415          * and restart.
416          */
417
418         return -EAGAIN;
419 }
420
421 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
422 {
423         p_mm_entry_t *entry;
424         
425         list_for_each_entry(entry, &bo->p_mm_list, head) {
426                 BUG_ON(!entry->locked);
427                 up_write(&entry->mm->mmap_sem);
428                 entry->locked = 0;
429         }
430 }
431
432 int drm_bo_remap_bound(drm_buffer_object_t *bo) 
433 {
434         vma_entry_t *v_entry;
435         int ret = 0;
436
437         if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
438                 list_for_each_entry(v_entry, &bo->vma_list, head) {
439                         ret = drm_bo_map_bound(v_entry->vma);
440                         if (ret)
441                                 break;
442                 }
443         }
444
445         return ret;
446 }
447
448 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
449 {
450         vma_entry_t *v_entry;
451
452         list_for_each_entry(v_entry, &bo->vma_list, head) {
453                 v_entry->vma->vm_flags &= ~VM_PFNMAP; 
454         }
455 }       
456
457 #endif
458