4a340b57f1e673e06f39dcc4f1aa68e674595e67
[platform/upstream/libdrm.git] / linux-core / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #endif
41
42 static void drm_vm_open(struct vm_area_struct *vma);
43 static void drm_vm_close(struct vm_area_struct *vma);
44 static int drm_bo_mmap_locked(struct vm_area_struct *vma,
45                               struct file *filp,
46                               drm_local_map_t *map);
47
48
49 pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
50 {
51         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
52
53 #if defined(__i386__) || defined(__x86_64__)
54         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
55                 pgprot_val(tmp) |= _PAGE_PCD;
56                 pgprot_val(tmp) &= ~_PAGE_PWT;
57         }
58 #elif defined(__powerpc__)
59         pgprot_val(tmp) |= _PAGE_NO_CACHE;
60         if (map_type == _DRM_REGISTERS)
61                 pgprot_val(tmp) |= _PAGE_GUARDED;
62 #endif
63 #if defined(__ia64__)
64         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
65                                     vma->vm_start))
66                 tmp = pgprot_writecombine(tmp);
67         else
68                 tmp = pgprot_noncached(tmp);
69 #endif
70         return tmp;
71 }
72
73
74 /**
75  * \c nopage method for AGP virtual memory.
76  *
77  * \param vma virtual memory area.
78  * \param address access address.
79  * \return pointer to the page structure.
80  *
81  * Find the right map and if it's AGP memory find the real physical page to
82  * map, get the page, increment the use count and return it.
83  */
84 #if __OS_HAS_AGP
85 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
86                                                 unsigned long address)
87 {
88         drm_file_t *priv = vma->vm_file->private_data;
89         drm_device_t *dev = priv->head->dev;
90         drm_map_t *map = NULL;
91         drm_map_list_t *r_list;
92         drm_hash_item_t *hash;
93
94         /*
95          * Find the right map
96          */
97         if (!drm_core_has_AGP(dev))
98                 goto vm_nopage_error;
99
100         if (!dev->agp || !dev->agp->cant_use_aperture)
101                 goto vm_nopage_error;
102
103         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
104                 goto vm_nopage_error;
105
106         r_list = drm_hash_entry(hash, drm_map_list_t, hash);
107         map = r_list->map;
108
109         if (map && map->type == _DRM_AGP) {
110                 unsigned long offset = address - vma->vm_start;
111                 unsigned long baddr = map->offset + offset;
112                 struct drm_agp_mem *agpmem;
113                 struct page *page;
114
115 #ifdef __alpha__
116                 /*
117                  * Adjust to a bus-relative address
118                  */
119                 baddr -= dev->hose->mem_space->start;
120 #endif
121
122                 /*
123                  * It's AGP memory - find the real physical page to map
124                  */
125                 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
126                         if (agpmem->bound <= baddr &&
127                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
128                                 break;
129                 }
130
131                 if (!agpmem)
132                         goto vm_nopage_error;
133
134                 /*
135                  * Get the page, inc the use count, and return it
136                  */
137                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
138                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
139                 get_page(page);
140
141 #if 0
142                 /* page_count() not defined everywhere */
143                 DRM_DEBUG
144                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
145                      baddr, __va(agpmem->memory->memory[offset]), offset,
146                      page_count(page));
147 #endif
148
149                 return page;
150         }
151       vm_nopage_error:
152         return NOPAGE_SIGBUS;   /* Disallow mremap */
153 }
154 #else                           /* __OS_HAS_AGP */
155 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
156                                                 unsigned long address)
157 {
158         return NOPAGE_SIGBUS;
159 }
160 #endif                          /* __OS_HAS_AGP */
161
162 /**
163  * \c nopage method for shared virtual memory.
164  *
165  * \param vma virtual memory area.
166  * \param address access address.
167  * \return pointer to the page structure.
168  *
169  * Get the the mapping, find the real physical page to map, get the page, and
170  * return it.
171  */
172 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
173                                                     unsigned long address)
174 {
175         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
176         unsigned long offset;
177         unsigned long i;
178         struct page *page;
179
180         if (address > vma->vm_end)
181                 return NOPAGE_SIGBUS;   /* Disallow mremap */
182         if (!map)
183                 return NOPAGE_SIGBUS;   /* Nothing allocated */
184
185         offset = address - vma->vm_start;
186         i = (unsigned long)map->handle + offset;
187         page = vmalloc_to_page((void *)i);
188         if (!page)
189                 return NOPAGE_SIGBUS;
190         get_page(page);
191
192         DRM_DEBUG("shm_nopage 0x%lx\n", address);
193         return page;
194 }
195
196 /**
197  * \c close method for shared virtual memory.
198  *
199  * \param vma virtual memory area.
200  *
201  * Deletes map information if we are the last
202  * person to close a mapping and it's not in the global maplist.
203  */
204 static void drm_vm_shm_close(struct vm_area_struct *vma)
205 {
206         drm_file_t *priv = vma->vm_file->private_data;
207         drm_device_t *dev = priv->head->dev;
208         drm_vma_entry_t *pt, *prev, *next;
209         drm_map_t *map;
210         drm_map_list_t *r_list;
211         struct list_head *list;
212         int found_maps = 0;
213
214         DRM_DEBUG("0x%08lx,0x%08lx\n",
215                   vma->vm_start, vma->vm_end - vma->vm_start);
216         atomic_dec(&dev->vma_count);
217
218         map = vma->vm_private_data;
219
220         mutex_lock(&dev->struct_mutex);
221         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
222                 next = pt->next;
223                 if (pt->vma->vm_private_data == map)
224                         found_maps++;
225                 if (pt->vma == vma) {
226                         if (prev) {
227                                 prev->next = pt->next;
228                         } else {
229                                 dev->vmalist = pt->next;
230                         }
231                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
232                 } else {
233                         prev = pt;
234                 }
235         }
236         /* We were the only map that was found */
237         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238                 /* Check to see if we are in the maplist, if we are not, then
239                  * we delete this mappings information.
240                  */
241                 found_maps = 0;
242                 list = &dev->maplist->head;
243                 list_for_each(list, &dev->maplist->head) {
244                         r_list = list_entry(list, drm_map_list_t, head);
245                         if (r_list->map == map)
246                                 found_maps++;
247                 }
248
249                 if (!found_maps) {
250                         drm_dma_handle_t dmah;
251
252                         switch (map->type) {
253                         case _DRM_REGISTERS:
254                         case _DRM_FRAME_BUFFER:
255                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
256                                         int retcode;
257                                         retcode = mtrr_del(map->mtrr,
258                                                            map->offset,
259                                                            map->size);
260                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
261                                 }
262                                 iounmap(map->handle);
263                                 break;
264                         case _DRM_SHM:
265                                 vfree(map->handle);
266                                 break;
267                         case _DRM_AGP:
268                         case _DRM_SCATTER_GATHER:
269                                 break;
270                         case _DRM_CONSISTENT:
271                                 dmah.vaddr = map->handle;
272                                 dmah.busaddr = map->offset;
273                                 dmah.size = map->size;
274                                 __drm_pci_free(dev, &dmah);
275                                 break;
276                         case _DRM_TTM:
277                                 BUG_ON(1);
278                                 break;
279                         }
280                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281                 }
282         }
283         mutex_unlock(&dev->struct_mutex);
284 }
285
286 /**
287  * \c nopage method for DMA virtual memory.
288  *
289  * \param vma virtual memory area.
290  * \param address access address.
291  * \return pointer to the page structure.
292  *
293  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
294  */
295 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
296                                                     unsigned long address)
297 {
298         drm_file_t *priv = vma->vm_file->private_data;
299         drm_device_t *dev = priv->head->dev;
300         drm_device_dma_t *dma = dev->dma;
301         unsigned long offset;
302         unsigned long page_nr;
303         struct page *page;
304
305         if (!dma)
306                 return NOPAGE_SIGBUS;   /* Error */
307         if (address > vma->vm_end)
308                 return NOPAGE_SIGBUS;   /* Disallow mremap */
309         if (!dma->pagelist)
310                 return NOPAGE_SIGBUS;   /* Nothing allocated */
311
312         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
313         page_nr = offset >> PAGE_SHIFT;
314         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
315
316         get_page(page);
317
318         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
319         return page;
320 }
321
322 /**
323  * \c nopage method for scatter-gather virtual memory.
324  *
325  * \param vma virtual memory area.
326  * \param address access address.
327  * \return pointer to the page structure.
328  *
329  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
330  */
331 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
332                                                    unsigned long address)
333 {
334         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
335         drm_file_t *priv = vma->vm_file->private_data;
336         drm_device_t *dev = priv->head->dev;
337         drm_sg_mem_t *entry = dev->sg;
338         unsigned long offset;
339         unsigned long map_offset;
340         unsigned long page_offset;
341         struct page *page;
342
343         DRM_DEBUG("\n");
344         if (!entry)
345                 return NOPAGE_SIGBUS;   /* Error */
346         if (address > vma->vm_end)
347                 return NOPAGE_SIGBUS;   /* Disallow mremap */
348         if (!entry->pagelist)
349                 return NOPAGE_SIGBUS;   /* Nothing allocated */
350
351         offset = address - vma->vm_start;
352         map_offset = map->offset - (unsigned long)dev->sg->virtual;
353         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
354         page = entry->pagelist[page_offset];
355         get_page(page);
356
357         return page;
358 }
359
360 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
361                                   unsigned long address, int *type)
362 {
363         if (type)
364                 *type = VM_FAULT_MINOR;
365         return drm_do_vm_nopage(vma, address);
366 }
367
368 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
369                                       unsigned long address, int *type)
370 {
371         if (type)
372                 *type = VM_FAULT_MINOR;
373         return drm_do_vm_shm_nopage(vma, address);
374 }
375
376 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
377                                       unsigned long address, int *type)
378 {
379         if (type)
380                 *type = VM_FAULT_MINOR;
381         return drm_do_vm_dma_nopage(vma, address);
382 }
383
384 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
385                                      unsigned long address, int *type)
386 {
387         if (type)
388                 *type = VM_FAULT_MINOR;
389         return drm_do_vm_sg_nopage(vma, address);
390 }
391
392
393 /** AGP virtual memory operations */
394 static struct vm_operations_struct drm_vm_ops = {
395         .nopage = drm_vm_nopage,
396         .open = drm_vm_open,
397         .close = drm_vm_close,
398 };
399
400 /** Shared virtual memory operations */
401 static struct vm_operations_struct drm_vm_shm_ops = {
402         .nopage = drm_vm_shm_nopage,
403         .open = drm_vm_open,
404         .close = drm_vm_shm_close,
405 };
406
407 /** DMA virtual memory operations */
408 static struct vm_operations_struct drm_vm_dma_ops = {
409         .nopage = drm_vm_dma_nopage,
410         .open = drm_vm_open,
411         .close = drm_vm_close,
412 };
413
414 /** Scatter-gather virtual memory operations */
415 static struct vm_operations_struct drm_vm_sg_ops = {
416         .nopage = drm_vm_sg_nopage,
417         .open = drm_vm_open,
418         .close = drm_vm_close,
419 };
420
421 /**
422  * \c open method for shared virtual memory.
423  *
424  * \param vma virtual memory area.
425  *
426  * Create a new drm_vma_entry structure as the \p vma private data entry and
427  * add it to drm_device::vmalist.
428  */
429 static void drm_vm_open_locked(struct vm_area_struct *vma)
430 {
431         drm_file_t *priv = vma->vm_file->private_data;
432         drm_device_t *dev = priv->head->dev;
433         drm_vma_entry_t *vma_entry;
434
435         DRM_DEBUG("0x%08lx,0x%08lx\n",
436                   vma->vm_start, vma->vm_end - vma->vm_start);
437         atomic_inc(&dev->vma_count);
438
439         vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
440         if (vma_entry) {
441                 vma_entry->vma = vma;
442                 vma_entry->next = dev->vmalist;
443                 vma_entry->pid = current->pid;
444                 dev->vmalist = vma_entry;
445         }
446 }
447
448 static void drm_vm_open(struct vm_area_struct *vma)
449 {
450         drm_file_t *priv = vma->vm_file->private_data;
451         drm_device_t *dev = priv->head->dev;
452
453         mutex_lock(&dev->struct_mutex);
454         drm_vm_open_locked(vma);
455         mutex_unlock(&dev->struct_mutex);
456 }
457
458 /**
459  * \c close method for all virtual memory types.
460  *
461  * \param vma virtual memory area.
462  *
463  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
464  * free it.
465  */
466 static void drm_vm_close(struct vm_area_struct *vma)
467 {
468         drm_file_t *priv = vma->vm_file->private_data;
469         drm_device_t *dev = priv->head->dev;
470         drm_vma_entry_t *pt, *prev;
471
472         DRM_DEBUG("0x%08lx,0x%08lx\n",
473                   vma->vm_start, vma->vm_end - vma->vm_start);
474         atomic_dec(&dev->vma_count);
475
476         mutex_lock(&dev->struct_mutex);
477         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
478                 if (pt->vma == vma) {
479                         if (prev) {
480                                 prev->next = pt->next;
481                         } else {
482                                 dev->vmalist = pt->next;
483                         }
484                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
485                         break;
486                 }
487         }
488         mutex_unlock(&dev->struct_mutex);
489 }
490
491
492 /**
493  * mmap DMA memory.
494  *
495  * \param filp file pointer.
496  * \param vma virtual memory area.
497  * \return zero on success or a negative number on failure.
498  *
499  * Sets the virtual memory area operations structure to vm_dma_ops, the file
500  * pointer, and calls vm_open().
501  */
502 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
503 {
504         drm_file_t *priv = filp->private_data;
505         drm_device_t *dev;
506         drm_device_dma_t *dma;
507         unsigned long length = vma->vm_end - vma->vm_start;
508
509         dev = priv->head->dev;
510         dma = dev->dma;
511         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
512                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
513
514         /* Length must match exact page count */
515         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
516                 return -EINVAL;
517         }
518
519         if (!capable(CAP_SYS_ADMIN) &&
520             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
521                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
522 #if defined(__i386__) || defined(__x86_64__)
523                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
524 #else
525                 /* Ye gads this is ugly.  With more thought
526                    we could move this up higher and use
527                    `protection_map' instead.  */
528                 vma->vm_page_prot =
529                     __pgprot(pte_val
530                              (pte_wrprotect
531                               (__pte(pgprot_val(vma->vm_page_prot)))));
532 #endif
533         }
534
535         vma->vm_ops = &drm_vm_dma_ops;
536         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
537
538         vma->vm_file = filp;    /* Needed for drm_vm_open() */
539         drm_vm_open_locked(vma);
540         return 0;
541 }
542
543 unsigned long drm_core_get_map_ofs(drm_map_t * map)
544 {
545         return map->offset;
546 }
547 EXPORT_SYMBOL(drm_core_get_map_ofs);
548
549 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
550 {
551 #ifdef __alpha__
552         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
553 #else
554         return 0;
555 #endif
556 }
557 EXPORT_SYMBOL(drm_core_get_reg_ofs);
558
559 /**
560  * mmap DMA memory.
561  *
562  * \param filp file pointer.
563  * \param vma virtual memory area.
564  * \return zero on success or a negative number on failure.
565  *
566  * If the virtual memory area has no offset associated with it then it's a DMA
567  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
568  * checks that the restricted flag is not set, sets the virtual memory operations
569  * according to the mapping type and remaps the pages. Finally sets the file
570  * pointer and calls vm_open().
571  */
572 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
573 {
574         drm_file_t *priv = filp->private_data;
575         drm_device_t *dev = priv->head->dev;
576         drm_map_t *map = NULL;
577         unsigned long offset = 0;
578         drm_hash_item_t *hash;
579
580         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
581                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
582
583         if (!priv->authenticated)
584                 return -EACCES;
585
586         /* We check for "dma". On Apple's UniNorth, it's valid to have
587          * the AGP mapped at physical address 0
588          * --BenH.
589          */
590
591         if (!vma->vm_pgoff
592 #if __OS_HAS_AGP
593             && (!dev->agp
594                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
595 #endif
596             )
597                 return drm_mmap_dma(filp, vma);
598
599         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
600                 DRM_ERROR("Could not find map\n");
601                 return -EINVAL;
602         }
603
604         map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
605
606         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
607                 return -EPERM;
608
609         /* Check for valid size. */
610         if (map->size < vma->vm_end - vma->vm_start)
611                 return -EINVAL;
612
613         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
614                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
615 #if defined(__i386__) || defined(__x86_64__)
616                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
617 #else
618                 /* Ye gads this is ugly.  With more thought
619                    we could move this up higher and use
620                    `protection_map' instead.  */
621                 vma->vm_page_prot =
622                     __pgprot(pte_val
623                              (pte_wrprotect
624                               (__pte(pgprot_val(vma->vm_page_prot)))));
625 #endif
626         }
627
628         switch (map->type) {
629         case _DRM_AGP:
630                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
631                         /*
632                          * On some platforms we can't talk to bus dma address from the CPU, so for
633                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
634                          * pages and mappings in nopage()
635                          */
636 #if defined(__powerpc__)
637                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
638 #endif
639                         vma->vm_ops = &drm_vm_ops;
640                         break;
641                 }
642                 /* fall through to _DRM_FRAME_BUFFER... */
643         case _DRM_FRAME_BUFFER:
644         case _DRM_REGISTERS:
645                 offset = dev->driver->get_reg_ofs(dev);
646                 vma->vm_flags |= VM_IO; /* not in core dump */
647                 vma->vm_page_prot = drm_io_prot(map->type, vma);
648 #ifdef __sparc__
649                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
650                 if (io_remap_pfn_range(vma, vma->vm_start,
651                                         (map->offset + offset) >>PAGE_SHIFT,
652                                         vma->vm_end - vma->vm_start,
653                                         vma->vm_page_prot))
654 #else
655                 if (remap_pfn_range(vma, vma->vm_start,
656                                      (map->offset + offset) >> PAGE_SHIFT,
657                                      vma->vm_end - vma->vm_start,
658                                      vma->vm_page_prot))
659 #endif
660                         return -EAGAIN;
661                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
662                           " offset = 0x%lx\n",
663                           map->type,
664                           vma->vm_start, vma->vm_end, map->offset + offset);
665                 vma->vm_ops = &drm_vm_ops;
666                 break;
667         case _DRM_CONSISTENT:
668                 /* Consistent memory is really like shared memory. But
669                  * it's allocated in a different way, so avoid nopage */
670                 if (remap_pfn_range(vma, vma->vm_start,
671                     page_to_pfn(virt_to_page(map->handle)),
672                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
673                         return -EAGAIN;
674         /* fall through to _DRM_SHM */
675         case _DRM_SHM:
676                 vma->vm_ops = &drm_vm_shm_ops;
677                 vma->vm_private_data = (void *)map;
678                 /* Don't let this area swap.  Change when
679                    DRM_KERNEL advisory is supported. */
680                 vma->vm_flags |= VM_RESERVED;
681                 break;
682         case _DRM_SCATTER_GATHER:
683                 vma->vm_ops = &drm_vm_sg_ops;
684                 vma->vm_private_data = (void *)map;
685                 vma->vm_flags |= VM_RESERVED;
686                 break;
687         case _DRM_TTM: 
688                 return drm_bo_mmap_locked(vma, filp, map);
689         default:
690                 return -EINVAL; /* This should never happen. */
691         }
692         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
693
694         vma->vm_file = filp;    /* Needed for drm_vm_open() */
695         drm_vm_open_locked(vma);
696         return 0;
697 }
698
699 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
700 {
701         drm_file_t *priv = filp->private_data;
702         drm_device_t *dev = priv->head->dev;
703         int ret;
704
705         mutex_lock(&dev->struct_mutex);
706         ret = drm_mmap_locked(filp, vma);
707         mutex_unlock(&dev->struct_mutex);
708
709         return ret;
710 }
711 EXPORT_SYMBOL(drm_mmap);
712
713 /**
714  * buffer object vm functions.
715  */
716
717 /**
718  * \c Pagefault method for buffer objects.
719  *
720  * \param vma Virtual memory area.
721  * \param data Fault data on failure or refault.
722  * \return Always NULL as we insert pfns directly.
723  *
724  * It's important that pfns are inserted while holding the bo->mutex lock.
725  * otherwise we might race with unmap_mapping_range() which is always
726  * called with the bo->mutex lock held.
727  *
728  * It's not pretty to modify the vma->vm_page_prot variable while not
729  * holding the mm semaphore in write mode. However, we have it i read mode,
730  * so we won't be racing with any other writers, and we only actually modify
731  * it when no ptes are present so it shouldn't be a big deal.
732  */
733
734 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) ||    \
735      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
736 #ifdef DRM_FULL_MM_COMPAT
737 static
738 #endif
739 struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
740                              struct fault_data *data)
741 {
742         unsigned long address = data->address;
743         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
744         unsigned long page_offset;
745         struct page *page = NULL;
746         drm_ttm_t *ttm; 
747         drm_device_t *dev;
748         unsigned long pfn;
749         int err;
750         unsigned long bus_base;
751         unsigned long bus_offset;
752         unsigned long bus_size;
753         
754
755         mutex_lock(&bo->mutex);
756
757         err = drm_bo_wait(bo, 0, 0, 0);
758         if (err) {
759                 data->type = (err == -EAGAIN) ? 
760                         VM_FAULT_MINOR : VM_FAULT_SIGBUS;
761                 goto out_unlock;
762         }
763         
764         
765         /*
766          * If buffer happens to be in a non-mappable location,
767          * move it to a mappable.
768          */
769
770 #ifdef DRM_BO_FULL_COMPAT
771         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
772                 uint32_t new_mask = bo->mem.mask | 
773                         DRM_BO_FLAG_MAPPABLE | 
774                         DRM_BO_FLAG_FORCE_MAPPABLE;
775                 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
776                 
777                 if (err) {
778                         data->type = (err == -EAGAIN) ? 
779                                 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
780                         goto out_unlock;
781                 }
782         }
783 #else
784         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
785                 unsigned long _end = jiffies + 3*DRM_HZ;
786                 uint32_t new_mask = bo->mem.mask |
787                         DRM_BO_FLAG_MAPPABLE |
788                         DRM_BO_FLAG_FORCE_MAPPABLE;
789
790                 do {
791                         err = drm_bo_move_buffer(bo, new_mask, 0, 0);
792                 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
793
794                 if (err) {
795                         DRM_ERROR("Timeout moving buffer to mappable location.\n");
796                         data->type = VM_FAULT_SIGBUS;
797                         goto out_unlock;
798                 }
799         }
800 #endif
801
802         if (address > vma->vm_end) {
803                 data->type = VM_FAULT_SIGBUS;
804                 goto out_unlock;
805         }
806
807         dev = bo->dev;
808         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, 
809                                 &bus_size);
810
811         if (err) {
812                 data->type = VM_FAULT_SIGBUS;
813                 goto out_unlock;
814         }
815
816         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
817
818         if (bus_size) {
819                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
820                 vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma);
821         } else {
822                 ttm = bo->ttm;
823
824                 drm_ttm_fixup_caching(ttm);
825                 page = drm_ttm_get_page(ttm, page_offset);
826                 if (!page) {
827                         data->type = VM_FAULT_OOM;
828                         goto out_unlock;
829                 }
830                 pfn = page_to_pfn(page);
831                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
832         }
833         
834         err = vm_insert_pfn(vma, address, pfn);
835
836         if (!err || err == -EBUSY) 
837                 data->type = VM_FAULT_MINOR; 
838         else
839                 data->type = VM_FAULT_OOM;
840 out_unlock:
841         mutex_unlock(&bo->mutex);
842         return NULL;
843 }
844 #endif
845
846 static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
847 {
848         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
849
850         drm_vm_open_locked(vma);
851         atomic_inc(&bo->usage);
852 #ifdef DRM_ODD_MM_COMPAT
853         drm_bo_add_vma(bo, vma);
854 #endif
855 }
856
857 /**
858  * \c vma open method for buffer objects.
859  *
860  * \param vma virtual memory area.
861  */
862
863 static void drm_bo_vm_open(struct vm_area_struct *vma)
864 {
865         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
866         drm_device_t *dev = bo->dev;
867
868         mutex_lock(&dev->struct_mutex);
869         drm_bo_vm_open_locked(vma);
870         mutex_unlock(&dev->struct_mutex);
871 }
872
873 /**
874  * \c vma close method for buffer objects.
875  *
876  * \param vma virtual memory area.
877  */
878
879 static void drm_bo_vm_close(struct vm_area_struct *vma)
880 {
881         drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
882         drm_device_t *dev = bo->dev;
883
884         drm_vm_close(vma);
885         if (bo) {
886                 mutex_lock(&dev->struct_mutex);
887 #ifdef DRM_ODD_MM_COMPAT
888                 drm_bo_delete_vma(bo, vma);
889 #endif
890                 drm_bo_usage_deref_locked(bo);
891                 mutex_unlock(&dev->struct_mutex);
892         }
893         return;
894 }
895
896 static struct vm_operations_struct drm_bo_vm_ops = {
897 #ifdef DRM_FULL_MM_COMPAT
898         .fault = drm_bo_vm_fault,
899 #else
900 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
901         .nopfn = drm_bo_vm_nopfn,
902 #else
903         .nopage = drm_bo_vm_nopage,
904 #endif
905 #endif
906         .open = drm_bo_vm_open,
907         .close = drm_bo_vm_close,
908 };
909
910 /**
911  * mmap buffer object memory.
912  *
913  * \param vma virtual memory area.
914  * \param filp file pointer.
915  * \param map The buffer object drm map.
916  * \return zero on success or a negative number on failure.
917  */
918
919 int drm_bo_mmap_locked(struct vm_area_struct *vma,
920                        struct file *filp,
921                        drm_local_map_t *map)
922 {
923         vma->vm_ops = &drm_bo_vm_ops;
924         vma->vm_private_data = map->handle;
925         vma->vm_file = filp;
926         vma->vm_flags |= VM_RESERVED | VM_IO;
927 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
928         vma->vm_flags |= VM_PFNMAP;
929 #endif
930         drm_bo_vm_open_locked(vma);
931 #ifdef DRM_ODD_MM_COMPAT
932         drm_bo_map_bound(vma);
933 #endif          
934         return 0;
935 }