Merging drm-ttm-0-2-branch
[profile/ivi/libdrm.git] / linux-core / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #endif
41
42 static void drm_vm_open(struct vm_area_struct *vma);
43 static void drm_vm_close(struct vm_area_struct *vma);
44 static void drm_vm_ttm_close(struct vm_area_struct *vma);
45 static int drm_vm_ttm_open(struct vm_area_struct *vma);
46 static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
47
48
49 pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
50 {
51         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
52
53 #if defined(__i386__) || defined(__x86_64__)
54         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
55                 pgprot_val(tmp) |= _PAGE_PCD;
56                 pgprot_val(tmp) &= ~_PAGE_PWT;
57         }
58 #elif defined(__powerpc__)
59         pgprot_val(tmp) |= _PAGE_NO_CACHE;
60         if (map->type == _DRM_REGISTERS)
61                 pgprot_val(tmp) |= _PAGE_GUARDED;
62 #endif
63 #if defined(__ia64__)
64         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
65                                     vma->vm_start))
66                 tmp = pgprot_writecombine(tmp);
67         else
68                 tmp = pgprot_noncached(tmp);
69 #endif
70         return tmp;
71 }
72
73
74 /**
75  * \c nopage method for AGP virtual memory.
76  *
77  * \param vma virtual memory area.
78  * \param address access address.
79  * \return pointer to the page structure.
80  *
81  * Find the right map and if it's AGP memory find the real physical page to
82  * map, get the page, increment the use count and return it.
83  */
84 #if __OS_HAS_AGP
85 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
86                                                 unsigned long address)
87 {
88         drm_file_t *priv = vma->vm_file->private_data;
89         drm_device_t *dev = priv->head->dev;
90         drm_map_t *map = NULL;
91         drm_map_list_t *r_list;
92         drm_hash_item_t *hash;
93
94         /*
95          * Find the right map
96          */
97         if (!drm_core_has_AGP(dev))
98                 goto vm_nopage_error;
99
100         if (!dev->agp || !dev->agp->cant_use_aperture)
101                 goto vm_nopage_error;
102
103         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
104                 goto vm_nopage_error;
105
106         r_list = drm_hash_entry(hash, drm_map_list_t, hash);
107         map = r_list->map;
108
109         if (map && map->type == _DRM_AGP) {
110                 unsigned long offset = address - vma->vm_start;
111                 unsigned long baddr = map->offset + offset;
112                 struct drm_agp_mem *agpmem;
113                 struct page *page;
114
115 #ifdef __alpha__
116                 /*
117                  * Adjust to a bus-relative address
118                  */
119                 baddr -= dev->hose->mem_space->start;
120 #endif
121
122                 /*
123                  * It's AGP memory - find the real physical page to map
124                  */
125                 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
126                         if (agpmem->bound <= baddr &&
127                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
128                                 break;
129                 }
130
131                 if (!agpmem)
132                         goto vm_nopage_error;
133
134                 /*
135                  * Get the page, inc the use count, and return it
136                  */
137                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
138                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
139                 get_page(page);
140
141 #if 0
142                 /* page_count() not defined everywhere */
143                 DRM_DEBUG
144                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
145                      baddr, __va(agpmem->memory->memory[offset]), offset,
146                      page_count(page));
147 #endif
148
149                 return page;
150         }
151       vm_nopage_error:
152         return NOPAGE_SIGBUS;   /* Disallow mremap */
153 }
154 #else                           /* __OS_HAS_AGP */
155 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
156                                                 unsigned long address)
157 {
158         return NOPAGE_SIGBUS;
159 }
160 #endif                          /* __OS_HAS_AGP */
161
162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
163 static
164 #endif
165 struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, 
166                               struct fault_data *data)
167 {
168         unsigned long address = data->address;
169         drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
170         unsigned long page_offset;
171         struct page *page;
172         drm_ttm_t *ttm; 
173         drm_buffer_manager_t *bm;
174         drm_device_t *dev;
175         unsigned long pfn;
176         int err;
177         pgprot_t pgprot;
178
179         if (!map) {
180                 data->type = VM_FAULT_OOM;
181                 return NULL;
182         }
183
184         if (address > vma->vm_end) {
185                 data->type = VM_FAULT_SIGBUS;
186                 return NULL;
187         }
188
189         ttm = (drm_ttm_t *) map->offset;
190         
191         dev = ttm->dev;
192
193         /*
194          * Perhaps retry here?
195          */
196
197         mutex_lock(&dev->struct_mutex);
198         drm_fixup_ttm_caching(ttm);
199
200         bm = &dev->bm;
201         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
202         page = ttm->pages[page_offset];
203
204         if (!page) {
205                 if (drm_alloc_memctl(PAGE_SIZE)) {
206                         data->type = VM_FAULT_OOM;
207                         goto out;
208                 }
209                 page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
210                 if (!page) {
211                         drm_free_memctl(PAGE_SIZE);
212                         data->type = VM_FAULT_OOM;
213                         goto out;
214                 }
215                 ++bm->cur_pages;
216 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
217                 SetPageLocked(page);
218 #else
219                 SetPageReserved(page);
220 #endif
221         }
222
223         if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
224
225                 /*
226                  * FIXME: Check can't map aperture flag.
227                  */
228
229                 pfn = ttm->aper_offset + page_offset + 
230                         (ttm->be->aperture_base >> PAGE_SHIFT);
231                 pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
232         } else {
233                 pfn = page_to_pfn(page);
234                 pgprot = vma->vm_page_prot;
235         }
236         
237         err = vm_insert_pfn(vma, address, pfn, pgprot);
238
239         if (!err || err == -EBUSY) 
240                 data->type = VM_FAULT_MINOR; 
241         else
242                 data->type = VM_FAULT_OOM;
243  out:
244         mutex_unlock(&dev->struct_mutex);
245         return NULL;
246 }
247
248
249 /**
250  * \c nopage method for shared virtual memory.
251  *
252  * \param vma virtual memory area.
253  * \param address access address.
254  * \return pointer to the page structure.
255  *
256  * Get the the mapping, find the real physical page to map, get the page, and
257  * return it.
258  */
259 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
260                                                     unsigned long address)
261 {
262         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
263         unsigned long offset;
264         unsigned long i;
265         struct page *page;
266
267         if (address > vma->vm_end)
268                 return NOPAGE_SIGBUS;   /* Disallow mremap */
269         if (!map)
270                 return NOPAGE_OOM;      /* Nothing allocated */
271
272         offset = address - vma->vm_start;
273         i = (unsigned long)map->handle + offset;
274         page = vmalloc_to_page((void *)i);
275         if (!page)
276                 return NOPAGE_OOM;
277         get_page(page);
278
279         DRM_DEBUG("shm_nopage 0x%lx\n", address);
280         return page;
281 }
282
283 /**
284  * \c close method for shared virtual memory.
285  *
286  * \param vma virtual memory area.
287  *
288  * Deletes map information if we are the last
289  * person to close a mapping and it's not in the global maplist.
290  */
291 static void drm_vm_shm_close(struct vm_area_struct *vma)
292 {
293         drm_file_t *priv = vma->vm_file->private_data;
294         drm_device_t *dev = priv->head->dev;
295         drm_vma_entry_t *pt, *prev, *next;
296         drm_map_t *map;
297         drm_map_list_t *r_list;
298         struct list_head *list;
299         int found_maps = 0;
300
301         DRM_DEBUG("0x%08lx,0x%08lx\n",
302                   vma->vm_start, vma->vm_end - vma->vm_start);
303         atomic_dec(&dev->vma_count);
304
305         map = vma->vm_private_data;
306
307         mutex_lock(&dev->struct_mutex);
308         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
309                 next = pt->next;
310                 if (pt->vma->vm_private_data == map)
311                         found_maps++;
312                 if (pt->vma == vma) {
313                         if (prev) {
314                                 prev->next = pt->next;
315                         } else {
316                                 dev->vmalist = pt->next;
317                         }
318                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
319                 } else {
320                         prev = pt;
321                 }
322         }
323         /* We were the only map that was found */
324         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
325                 /* Check to see if we are in the maplist, if we are not, then
326                  * we delete this mappings information.
327                  */
328                 found_maps = 0;
329                 list = &dev->maplist->head;
330                 list_for_each(list, &dev->maplist->head) {
331                         r_list = list_entry(list, drm_map_list_t, head);
332                         if (r_list->map == map)
333                                 found_maps++;
334                 }
335
336                 if (!found_maps) {
337                         drm_dma_handle_t dmah;
338
339                         switch (map->type) {
340                         case _DRM_REGISTERS:
341                         case _DRM_FRAME_BUFFER:
342                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
343                                         int retcode;
344                                         retcode = mtrr_del(map->mtrr,
345                                                            map->offset,
346                                                            map->size);
347                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
348                                 }
349                                 drm_ioremapfree(map->handle, map->size, dev);
350                                 break;
351                         case _DRM_SHM:
352                                 vfree(map->handle);
353                                 break;
354                         case _DRM_AGP:
355                         case _DRM_SCATTER_GATHER:
356                                 break;
357                         case _DRM_CONSISTENT:
358                                 dmah.vaddr = map->handle;
359                                 dmah.busaddr = map->offset;
360                                 dmah.size = map->size;
361                                 __drm_pci_free(dev, &dmah);
362                                 break;
363                         case _DRM_TTM:
364                                 BUG_ON(1);
365                                 break;
366                         }
367                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
368                 }
369         }
370         mutex_unlock(&dev->struct_mutex);
371 }
372
373 /**
374  * \c nopage method for DMA virtual memory.
375  *
376  * \param vma virtual memory area.
377  * \param address access address.
378  * \return pointer to the page structure.
379  *
380  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
381  */
382 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
383                                                     unsigned long address)
384 {
385         drm_file_t *priv = vma->vm_file->private_data;
386         drm_device_t *dev = priv->head->dev;
387         drm_device_dma_t *dma = dev->dma;
388         unsigned long offset;
389         unsigned long page_nr;
390         struct page *page;
391
392         if (!dma)
393                 return NOPAGE_SIGBUS;   /* Error */
394         if (address > vma->vm_end)
395                 return NOPAGE_SIGBUS;   /* Disallow mremap */
396         if (!dma->pagelist)
397                 return NOPAGE_OOM;      /* Nothing allocated */
398
399         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
400         page_nr = offset >> PAGE_SHIFT;
401         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
402
403         get_page(page);
404
405         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
406         return page;
407 }
408
409 /**
410  * \c nopage method for scatter-gather virtual memory.
411  *
412  * \param vma virtual memory area.
413  * \param address access address.
414  * \return pointer to the page structure.
415  *
416  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
417  */
418 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
419                                                    unsigned long address)
420 {
421         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
422         drm_file_t *priv = vma->vm_file->private_data;
423         drm_device_t *dev = priv->head->dev;
424         drm_sg_mem_t *entry = dev->sg;
425         unsigned long offset;
426         unsigned long map_offset;
427         unsigned long page_offset;
428         struct page *page;
429
430         DRM_DEBUG("\n");
431         if (!entry)
432                 return NOPAGE_SIGBUS;   /* Error */
433         if (address > vma->vm_end)
434                 return NOPAGE_SIGBUS;   /* Disallow mremap */
435         if (!entry->pagelist)
436                 return NOPAGE_OOM;      /* Nothing allocated */
437
438         offset = address - vma->vm_start;
439         map_offset = map->offset - (unsigned long)dev->sg->virtual;
440         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
441         page = entry->pagelist[page_offset];
442         get_page(page);
443
444         return page;
445 }
446
447 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
448
449 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
450                                   unsigned long address, int *type)
451 {
452         if (type)
453                 *type = VM_FAULT_MINOR;
454         return drm_do_vm_nopage(vma, address);
455 }
456
457 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
458                                       unsigned long address, int *type)
459 {
460         if (type)
461                 *type = VM_FAULT_MINOR;
462         return drm_do_vm_shm_nopage(vma, address);
463 }
464
465 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
466                                       unsigned long address, int *type)
467 {
468         if (type)
469                 *type = VM_FAULT_MINOR;
470         return drm_do_vm_dma_nopage(vma, address);
471 }
472
473 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
474                                      unsigned long address, int *type)
475 {
476         if (type)
477                 *type = VM_FAULT_MINOR;
478         return drm_do_vm_sg_nopage(vma, address);
479 }
480
481
482 #else                           /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
483
484 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
485                                   unsigned long address, int unused)
486 {
487         return drm_do_vm_nopage(vma, address);
488 }
489
490 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
491                                       unsigned long address, int unused)
492 {
493         return drm_do_vm_shm_nopage(vma, address);
494 }
495
496 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
497                                       unsigned long address, int unused)
498 {
499         return drm_do_vm_dma_nopage(vma, address);
500 }
501
502 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
503                                      unsigned long address, int unused)
504 {
505         return drm_do_vm_sg_nopage(vma, address);
506 }
507
508 #endif
509
510 /** AGP virtual memory operations */
511 static struct vm_operations_struct drm_vm_ops = {
512         .nopage = drm_vm_nopage,
513         .open = drm_vm_open,
514         .close = drm_vm_close,
515 };
516
517 /** Shared virtual memory operations */
518 static struct vm_operations_struct drm_vm_shm_ops = {
519         .nopage = drm_vm_shm_nopage,
520         .open = drm_vm_open,
521         .close = drm_vm_shm_close,
522 };
523
524 /** DMA virtual memory operations */
525 static struct vm_operations_struct drm_vm_dma_ops = {
526         .nopage = drm_vm_dma_nopage,
527         .open = drm_vm_open,
528         .close = drm_vm_close,
529 };
530
531 /** Scatter-gather virtual memory operations */
532 static struct vm_operations_struct drm_vm_sg_ops = {
533         .nopage = drm_vm_sg_nopage,
534         .open = drm_vm_open,
535         .close = drm_vm_close,
536 };
537
538 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
539 static struct vm_operations_struct drm_vm_ttm_ops = {
540         .nopage = drm_vm_ttm_nopage,
541         .open = drm_vm_ttm_open_wrapper,
542         .close = drm_vm_ttm_close,
543 };
544 #else
545 static struct vm_operations_struct drm_vm_ttm_ops = {
546         .fault = drm_vm_ttm_fault,
547         .open = drm_vm_ttm_open_wrapper,
548         .close = drm_vm_ttm_close,
549 };
550 #endif
551
552 /**
553  * \c open method for shared virtual memory.
554  *
555  * \param vma virtual memory area.
556  *
557  * Create a new drm_vma_entry structure as the \p vma private data entry and
558  * add it to drm_device::vmalist.
559  */
560 static void drm_vm_open(struct vm_area_struct *vma)
561 {
562         drm_file_t *priv = vma->vm_file->private_data;
563         drm_device_t *dev = priv->head->dev;
564         drm_vma_entry_t *vma_entry;
565
566         DRM_DEBUG("0x%08lx,0x%08lx\n",
567                   vma->vm_start, vma->vm_end - vma->vm_start);
568         atomic_inc(&dev->vma_count);
569
570         vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
571         if (vma_entry) {
572                 mutex_lock(&dev->struct_mutex);
573                 vma_entry->vma = vma;
574                 vma_entry->next = dev->vmalist;
575                 vma_entry->pid = current->pid;
576                 dev->vmalist = vma_entry;
577                 mutex_unlock(&dev->struct_mutex);
578         }
579 }
580
581 static int drm_vm_ttm_open(struct vm_area_struct *vma) {
582   
583         drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
584         drm_ttm_t *ttm;
585         drm_file_t *priv = vma->vm_file->private_data;
586         drm_device_t *dev = priv->head->dev;
587
588         drm_vm_open(vma);
589         mutex_lock(&dev->struct_mutex);
590         ttm = (drm_ttm_t *) map->offset;
591         atomic_inc(&ttm->vma_count);
592 #ifdef DRM_ODD_MM_COMPAT
593         drm_ttm_add_vma(ttm, vma);
594 #endif
595         mutex_unlock(&dev->struct_mutex);
596         return 0;
597 }
598
599 static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) 
600 {
601         drm_vm_ttm_open(vma);
602 }
603
604 /**
605  * \c close method for all virtual memory types.
606  *
607  * \param vma virtual memory area.
608  *
609  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
610  * free it.
611  */
612 static void drm_vm_close(struct vm_area_struct *vma)
613 {
614         drm_file_t *priv = vma->vm_file->private_data;
615         drm_device_t *dev = priv->head->dev;
616         drm_vma_entry_t *pt, *prev;
617
618         DRM_DEBUG("0x%08lx,0x%08lx\n",
619                   vma->vm_start, vma->vm_end - vma->vm_start);
620         atomic_dec(&dev->vma_count);
621
622         mutex_lock(&dev->struct_mutex);
623         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
624                 if (pt->vma == vma) {
625                         if (prev) {
626                                 prev->next = pt->next;
627                         } else {
628                                 dev->vmalist = pt->next;
629                         }
630                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
631                         break;
632                 }
633         }
634         mutex_unlock(&dev->struct_mutex);
635 }
636
637
638 static void drm_vm_ttm_close(struct vm_area_struct *vma)
639 {
640         drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; 
641         drm_ttm_t *ttm; 
642         drm_device_t *dev;
643         int ret;
644
645         drm_vm_close(vma); 
646         if (map) {
647                 ttm = (drm_ttm_t *) map->offset;
648                 dev = ttm->dev;
649                 mutex_lock(&dev->struct_mutex);
650 #ifdef DRM_ODD_MM_COMPAT
651                 drm_ttm_delete_vma(ttm, vma);
652 #endif
653                 if (atomic_dec_and_test(&ttm->vma_count)) {
654                         if (ttm->destroy) {
655                                 ret = drm_destroy_ttm(ttm);
656                                 BUG_ON(ret);
657                                 drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
658                         }
659                 }
660                 mutex_unlock(&dev->struct_mutex);
661         }
662         return;
663 }
664
665
666 /**
667  * mmap DMA memory.
668  *
669  * \param filp file pointer.
670  * \param vma virtual memory area.
671  * \return zero on success or a negative number on failure.
672  *
673  * Sets the virtual memory area operations structure to vm_dma_ops, the file
674  * pointer, and calls vm_open().
675  */
676 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
677 {
678         drm_file_t *priv = filp->private_data;
679         drm_device_t *dev;
680         drm_device_dma_t *dma;
681         unsigned long length = vma->vm_end - vma->vm_start;
682
683         lock_kernel();
684         dev = priv->head->dev;
685         dma = dev->dma;
686         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
687                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
688
689         /* Length must match exact page count */
690         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
691                 unlock_kernel();
692                 return -EINVAL;
693         }
694         unlock_kernel();
695
696         if (!capable(CAP_SYS_ADMIN) &&
697             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
698                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
699 #if defined(__i386__) || defined(__x86_64__)
700                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
701 #else
702                 /* Ye gads this is ugly.  With more thought
703                    we could move this up higher and use
704                    `protection_map' instead.  */
705                 vma->vm_page_prot =
706                     __pgprot(pte_val
707                              (pte_wrprotect
708                               (__pte(pgprot_val(vma->vm_page_prot)))));
709 #endif
710         }
711
712         vma->vm_ops = &drm_vm_dma_ops;
713
714 #if LINUX_VERSION_CODE <= 0x02040e      /* KERNEL_VERSION(2,4,14) */
715         vma->vm_flags |= VM_LOCKED | VM_SHM;    /* Don't swap */
716 #else
717         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
718 #endif
719
720         vma->vm_file = filp;    /* Needed for drm_vm_open() */
721         drm_vm_open(vma);
722         return 0;
723 }
724
725 unsigned long drm_core_get_map_ofs(drm_map_t * map)
726 {
727         return map->offset;
728 }
729 EXPORT_SYMBOL(drm_core_get_map_ofs);
730
731 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
732 {
733 #ifdef __alpha__
734         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
735 #else
736         return 0;
737 #endif
738 }
739 EXPORT_SYMBOL(drm_core_get_reg_ofs);
740
741 /**
742  * mmap DMA memory.
743  *
744  * \param filp file pointer.
745  * \param vma virtual memory area.
746  * \return zero on success or a negative number on failure.
747  *
748  * If the virtual memory area has no offset associated with it then it's a DMA
749  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
750  * checks that the restricted flag is not set, sets the virtual memory operations
751  * according to the mapping type and remaps the pages. Finally sets the file
752  * pointer and calls vm_open().
753  */
754 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
755 {
756         drm_file_t *priv = filp->private_data;
757         drm_device_t *dev = priv->head->dev;
758         drm_map_t *map = NULL;
759         unsigned long offset = 0;
760         drm_hash_item_t *hash;
761
762         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
763                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
764
765         if (!priv->authenticated)
766                 return -EACCES;
767
768         /* We check for "dma". On Apple's UniNorth, it's valid to have
769          * the AGP mapped at physical address 0
770          * --BenH.
771          */
772         if (!vma->vm_pgoff
773 #if __OS_HAS_AGP
774             && (!dev->agp
775                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
776 #endif
777             )
778                 return drm_mmap_dma(filp, vma);
779
780         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
781                 DRM_ERROR("Could not find map\n");
782                 return -EINVAL;
783         }
784
785         map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
786
787         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
788                 return -EPERM;
789
790         /* Check for valid size. */
791         if (map->size < vma->vm_end - vma->vm_start)
792                 return -EINVAL;
793
794         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
795                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
796 #if defined(__i386__) || defined(__x86_64__)
797                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
798 #else
799                 /* Ye gads this is ugly.  With more thought
800                    we could move this up higher and use
801                    `protection_map' instead.  */
802                 vma->vm_page_prot =
803                     __pgprot(pte_val
804                              (pte_wrprotect
805                               (__pte(pgprot_val(vma->vm_page_prot)))));
806 #endif
807         }
808
809         switch (map->type) {
810         case _DRM_AGP:
811                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
812                         /*
813                          * On some platforms we can't talk to bus dma address from the CPU, so for
814                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
815                          * pages and mappings in nopage()
816                          */
817 #if defined(__powerpc__)
818                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
819 #endif
820                         vma->vm_ops = &drm_vm_ops;
821                         break;
822                 }
823                 /* fall through to _DRM_FRAME_BUFFER... */
824         case _DRM_FRAME_BUFFER:
825         case _DRM_REGISTERS:
826                 offset = dev->driver->get_reg_ofs(dev);
827                 vma->vm_flags |= VM_IO; /* not in core dump */
828                 vma->vm_page_prot = drm_io_prot(map->type, vma);
829 #ifdef __sparc__
830                 if (io_remap_pfn_range(vma, vma->vm_start,
831                                         (map->offset + offset) >>PAGE_SHIFT,
832                                         vma->vm_end - vma->vm_start,
833                                         vma->vm_page_prot))
834 #else
835                 if (remap_pfn_range(vma, vma->vm_start,
836                                      (map->offset + offset) >> PAGE_SHIFT,
837                                      vma->vm_end - vma->vm_start,
838                                      vma->vm_page_prot))
839 #endif
840                         return -EAGAIN;
841                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
842                           " offset = 0x%lx\n",
843                           map->type,
844                           vma->vm_start, vma->vm_end, map->offset + offset);
845                 vma->vm_ops = &drm_vm_ops;
846                 break;
847         case _DRM_CONSISTENT:
848                 /* Consistent memory is really like shared memory. But
849                  * it's allocated in a different way, so avoid nopage */
850                 if (remap_pfn_range(vma, vma->vm_start,
851                     page_to_pfn(virt_to_page(map->handle)),
852                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
853                         return -EAGAIN;
854         /* fall through to _DRM_SHM */
855         case _DRM_SHM:
856                 vma->vm_ops = &drm_vm_shm_ops;
857                 vma->vm_private_data = (void *)map;
858                 /* Don't let this area swap.  Change when
859                    DRM_KERNEL advisory is supported. */
860 #if LINUX_VERSION_CODE <= 0x02040e      /* KERNEL_VERSION(2,4,14) */
861                 vma->vm_flags |= VM_LOCKED;
862 #else
863                 vma->vm_flags |= VM_RESERVED;
864 #endif
865                 break;
866         case _DRM_SCATTER_GATHER:
867                 vma->vm_ops = &drm_vm_sg_ops;
868                 vma->vm_private_data = (void *)map;
869 #if LINUX_VERSION_CODE <= 0x02040e      /* KERNEL_VERSION(2,4,14) */
870                 vma->vm_flags |= VM_LOCKED;
871 #else
872                 vma->vm_flags |= VM_RESERVED;
873 #endif
874                 break;
875         case _DRM_TTM: {
876                 vma->vm_ops = &drm_vm_ttm_ops;
877                 vma->vm_private_data = (void *) map;
878                 vma->vm_file = filp;
879                 vma->vm_flags |= VM_RESERVED | VM_IO;
880 #ifdef DRM_ODD_MM_COMPAT
881                 mutex_lock(&dev->struct_mutex);
882                 drm_ttm_map_bound(vma);
883                 mutex_unlock(&dev->struct_mutex);
884 #endif          
885                 if (drm_vm_ttm_open(vma))
886                         return -EAGAIN;
887                 return 0;
888         }
889         default:
890                 return -EINVAL; /* This should never happen. */
891         }
892 #if LINUX_VERSION_CODE <= 0x02040e      /* KERNEL_VERSION(2,4,14) */
893         vma->vm_flags |= VM_LOCKED | VM_SHM;    /* Don't swap */
894 #else
895         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
896 #endif
897
898         vma->vm_file = filp;    /* Needed for drm_vm_open() */
899         drm_vm_open(vma);
900         return 0;
901 }
902 EXPORT_SYMBOL(drm_mmap);