[intel] Quirk away MSI support on 945G/GM.
[platform/upstream/libdrm.git] / linux-core / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #endif
41
42 static void drm_vm_open(struct vm_area_struct *vma);
43 static void drm_vm_close(struct vm_area_struct *vma);
44 static int drm_bo_mmap_locked(struct vm_area_struct *vma,
45                               struct file *filp,
46                               drm_local_map_t *map);
47
48
49 pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
50 {
51         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
52
53 #if defined(__i386__) || defined(__x86_64__)
54         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
55                 pgprot_val(tmp) |= _PAGE_PCD;
56                 pgprot_val(tmp) &= ~_PAGE_PWT;
57         }
58 #elif defined(__powerpc__)
59         pgprot_val(tmp) |= _PAGE_NO_CACHE;
60         if (map_type == _DRM_REGISTERS)
61                 pgprot_val(tmp) |= _PAGE_GUARDED;
62 #elif defined(__ia64__)
63         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
64                                     vma->vm_start))
65                 tmp = pgprot_writecombine(tmp);
66         else
67                 tmp = pgprot_noncached(tmp);
68 #elif defined(__sparc__)
69         tmp = pgprot_noncached(tmp);
70 #endif
71         return tmp;
72 }
73
74 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
75 {
76         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
77
78 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
79         tmp |= _PAGE_NO_CACHE;
80 #endif
81         return tmp;
82 }
83
84 #ifndef DRM_VM_NOPAGE
85 /**
86  * \c fault method for AGP virtual memory.
87  *
88  * \param vma virtual memory area.
89  * \param address access address.
90  * \return pointer to the page structure.
91  *
92  * Find the right map and if it's AGP memory find the real physical page to
93  * map, get the page, increment the use count and return it.
94  */
95 #if __OS_HAS_AGP
96 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97 {
98         struct drm_file *priv = vma->vm_file->private_data;
99         struct drm_device *dev = priv->minor->dev;
100         struct drm_map *map = NULL;
101         struct drm_map_list *r_list;
102         struct drm_hash_item *hash;
103
104         /*
105          * Find the right map
106          */
107         if (!drm_core_has_AGP(dev))
108                 goto vm_fault_error;
109
110         if (!dev->agp || !dev->agp->cant_use_aperture)
111                 goto vm_fault_error;
112
113         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
114                 goto vm_fault_error;
115
116         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
117         map = r_list->map;
118
119         if (map && map->type == _DRM_AGP) {
120                 /*
121                  * Using vm_pgoff as a selector forces us to use this unusual
122                  * addressing scheme.
123                  */
124                 unsigned long offset = (unsigned long)vmf->virtual_address -
125                                                                 vma->vm_start;
126                 unsigned long baddr = map->offset + offset;
127                 struct drm_agp_mem *agpmem;
128                 struct page *page;
129
130 #ifdef __alpha__
131                 /*
132                  * Adjust to a bus-relative address
133                  */
134                 baddr -= dev->hose->mem_space->start;
135 #endif
136
137                 /*
138                  * It's AGP memory - find the real physical page to map
139                  */
140                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
141                         if (agpmem->bound <= baddr &&
142                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
143                                 break;
144                 }
145
146                 if (!agpmem)
147                         goto vm_fault_error;
148
149                 /*
150                  * Get the page, inc the use count, and return it
151                  */
152                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
153                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
154                 get_page(page);
155                 vmf->page = page;
156
157                 DRM_DEBUG
158                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
159                      baddr, __va(agpmem->memory->memory[offset]), offset,
160                      page_count(page));
161                 return 0;
162         }
163 vm_fault_error:
164         return VM_FAULT_SIGBUS; /* Disallow mremap */
165 }
166 #else                           /* __OS_HAS_AGP */
167 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
168 {
169         return VM_FAULT_SIGBUS;
170 }
171 #endif                          /* __OS_HAS_AGP */
172
173 /**
174  * \c nopage method for shared virtual memory.
175  *
176  * \param vma virtual memory area.
177  * \param address access address.
178  * \return pointer to the page structure.
179  *
180  * Get the mapping, find the real physical page to map, get the page, and
181  * return it.
182  */
183 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
184 {
185         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
186         unsigned long offset;
187         unsigned long i;
188         struct page *page;
189
190         if (!map)
191                 return VM_FAULT_SIGBUS; /* Nothing allocated */
192
193         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
194         i = (unsigned long)map->handle + offset;
195         page = vmalloc_to_page((void *)i);
196         if (!page)
197                 return VM_FAULT_SIGBUS;
198         get_page(page);
199         vmf->page = page;
200
201         DRM_DEBUG("shm_fault 0x%lx\n", offset);
202         return 0;
203 }
204 #endif
205
206 /**
207  * \c close method for shared virtual memory.
208  *
209  * \param vma virtual memory area.
210  *
211  * Deletes map information if we are the last
212  * person to close a mapping and it's not in the global maplist.
213  */
214 static void drm_vm_shm_close(struct vm_area_struct *vma)
215 {
216         struct drm_file *priv = vma->vm_file->private_data;
217         struct drm_device *dev = priv->minor->dev;
218         struct drm_vma_entry *pt, *temp;
219         struct drm_map *map;
220         struct drm_map_list *r_list;
221         int found_maps = 0;
222
223         DRM_DEBUG("0x%08lx,0x%08lx\n",
224                   vma->vm_start, vma->vm_end - vma->vm_start);
225         atomic_dec(&dev->vma_count);
226
227         map = vma->vm_private_data;
228
229         mutex_lock(&dev->struct_mutex);
230         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
231                 if (pt->vma->vm_private_data == map)
232                         found_maps++;
233                 if (pt->vma == vma) {
234                         list_del(&pt->head);
235                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
236                 }
237         }
238         /* We were the only map that was found */
239         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
240                 /* Check to see if we are in the maplist, if we are not, then
241                  * we delete this mappings information.
242                  */
243                 found_maps = 0;
244                 list_for_each_entry(r_list, &dev->maplist, head) {
245                         if (r_list->map == map)
246                                 found_maps++;
247                 }
248
249                 if (!found_maps) {
250                         drm_dma_handle_t dmah;
251
252                         switch (map->type) {
253                         case _DRM_REGISTERS:
254                         case _DRM_FRAME_BUFFER:
255                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
256                                         int retcode;
257                                         retcode = mtrr_del(map->mtrr,
258                                                            map->offset,
259                                                            map->size);
260                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
261                                 }
262                                 iounmap(map->handle);
263                                 break;
264                         case _DRM_SHM:
265                                 vfree(map->handle);
266                                 break;
267                         case _DRM_AGP:
268                         case _DRM_SCATTER_GATHER:
269                                 break;
270                         case _DRM_CONSISTENT:
271                                 dmah.vaddr = map->handle;
272                                 dmah.busaddr = map->offset;
273                                 dmah.size = map->size;
274                                 __drm_pci_free(dev, &dmah);
275                                 break;
276                         case _DRM_TTM:
277                                 BUG_ON(1);
278                                 break;
279                         }
280                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281                 }
282         }
283         mutex_unlock(&dev->struct_mutex);
284 }
285
286 #ifndef DRM_VM_NOPAGE
287 /**
288  * \c fault method for DMA virtual memory.
289  *
290  * \param vma virtual memory area.
291  * \param address access address.
292  * \return pointer to the page structure.
293  *
294  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
295  */
296 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
297 {
298         struct drm_file *priv = vma->vm_file->private_data;
299         struct drm_device *dev = priv->minor->dev;
300         struct drm_device_dma *dma = dev->dma;
301         unsigned long offset;
302         unsigned long page_nr;
303         struct page *page;
304
305         if (!dma)
306                 return VM_FAULT_SIGBUS; /* Error */
307         if (!dma->pagelist)
308                 return VM_FAULT_SIGBUS; /* Nothing allocated */
309
310         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
311         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
312         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
313
314         get_page(page);
315         vmf->page = page;
316
317         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
318         return 0;
319 }
320
321 /**
322  * \c fault method for scatter-gather virtual memory.
323  *
324  * \param vma virtual memory area.
325  * \param address access address.
326  * \return pointer to the page structure.
327  *
328  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
329  */
330 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
331 {
332         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
333         struct drm_file *priv = vma->vm_file->private_data;
334         struct drm_device *dev = priv->minor->dev;
335         struct drm_sg_mem *entry = dev->sg;
336         unsigned long offset;
337         unsigned long map_offset;
338         unsigned long page_offset;
339         struct page *page;
340
341         if (!entry)
342                 return VM_FAULT_SIGBUS; /* Error */
343         if (!entry->pagelist)
344                 return VM_FAULT_SIGBUS; /* Nothing allocated */
345
346         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
347         map_offset = map->offset - (unsigned long)dev->sg->virtual;
348         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
349         page = entry->pagelist[page_offset];
350         get_page(page);
351         vmf->page = page;
352
353         return 0;
354 }
355 #endif
356
357 /** AGP virtual memory operations */
358 static struct vm_operations_struct drm_vm_ops = {
359 #ifdef DRM_VM_NOPAGE
360         .nopage = drm_vm_nopage,
361 #else
362         .fault = drm_do_vm_fault,
363 #endif
364         .open = drm_vm_open,
365         .close = drm_vm_close,
366 };
367
368 /** Shared virtual memory operations */
369 static struct vm_operations_struct drm_vm_shm_ops = {
370 #ifdef DRM_VM_NOPAGE
371         .nopage = drm_vm_shm_nopage,
372 #else
373         .fault = drm_do_vm_shm_fault,
374 #endif
375         .open = drm_vm_open,
376         .close = drm_vm_shm_close,
377 };
378
379 /** DMA virtual memory operations */
380 static struct vm_operations_struct drm_vm_dma_ops = {
381 #ifdef DRM_VM_NOPAGE
382         .nopage = drm_vm_dma_nopage,
383 #else
384         .fault = drm_do_vm_dma_fault,
385 #endif
386         .open = drm_vm_open,
387         .close = drm_vm_close,
388 };
389
390 /** Scatter-gather virtual memory operations */
391 static struct vm_operations_struct drm_vm_sg_ops = {
392 #ifdef DRM_VM_NOPAGE
393         .nopage = drm_vm_sg_nopage,
394 #else
395         .fault = drm_do_vm_sg_fault,
396 #endif
397         .open = drm_vm_open,
398         .close = drm_vm_close,
399 };
400
401 /**
402  * \c open method for shared virtual memory.
403  *
404  * \param vma virtual memory area.
405  *
406  * Create a new drm_vma_entry structure as the \p vma private data entry and
407  * add it to drm_device::vmalist.
408  */
409 static void drm_vm_open_locked(struct vm_area_struct *vma)
410 {
411         struct drm_file *priv = vma->vm_file->private_data;
412         struct drm_device *dev = priv->minor->dev;
413         struct drm_vma_entry *vma_entry;
414
415         DRM_DEBUG("0x%08lx,0x%08lx\n",
416                   vma->vm_start, vma->vm_end - vma->vm_start);
417         atomic_inc(&dev->vma_count);
418
419         vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
420         if (vma_entry) {
421                 vma_entry->vma = vma;
422                 vma_entry->pid = current->pid;
423                 list_add(&vma_entry->head, &dev->vmalist);
424         }
425 }
426
427 static void drm_vm_open(struct vm_area_struct *vma)
428 {
429         struct drm_file *priv = vma->vm_file->private_data;
430         struct drm_device *dev = priv->minor->dev;
431
432         mutex_lock(&dev->struct_mutex);
433         drm_vm_open_locked(vma);
434         mutex_unlock(&dev->struct_mutex);
435 }
436
437 /**
438  * \c close method for all virtual memory types.
439  *
440  * \param vma virtual memory area.
441  *
442  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
443  * free it.
444  */
445 static void drm_vm_close(struct vm_area_struct *vma)
446 {
447         struct drm_file *priv = vma->vm_file->private_data;
448         struct drm_device *dev = priv->minor->dev;
449         struct drm_vma_entry *pt, *temp;
450
451         DRM_DEBUG("0x%08lx,0x%08lx\n",
452                   vma->vm_start, vma->vm_end - vma->vm_start);
453         atomic_dec(&dev->vma_count);
454
455         mutex_lock(&dev->struct_mutex);
456         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
457                 if (pt->vma == vma) {
458                         list_del(&pt->head);
459                         drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
460                         break;
461                 }
462         }
463         mutex_unlock(&dev->struct_mutex);
464 }
465
466
467 /**
468  * mmap DMA memory.
469  *
470  * \param file_priv DRM file private.
471  * \param vma virtual memory area.
472  * \return zero on success or a negative number on failure.
473  *
474  * Sets the virtual memory area operations structure to vm_dma_ops, the file
475  * pointer, and calls vm_open().
476  */
477 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
478 {
479         struct drm_file *priv = filp->private_data;
480         struct drm_device *dev;
481         struct drm_device_dma *dma;
482         unsigned long length = vma->vm_end - vma->vm_start;
483
484         dev = priv->minor->dev;
485         dma = dev->dma;
486         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
487                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
488
489         /* Length must match exact page count */
490         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
491                 return -EINVAL;
492         }
493
494         if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
495                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
496 #if defined(__i386__) || defined(__x86_64__)
497                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
498 #else
499                 /* Ye gads this is ugly.  With more thought
500                    we could move this up higher and use
501                    `protection_map' instead.  */
502                 vma->vm_page_prot =
503                     __pgprot(pte_val
504                              (pte_wrprotect
505                               (__pte(pgprot_val(vma->vm_page_prot)))));
506 #endif
507         }
508
509         vma->vm_ops = &drm_vm_dma_ops;
510         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
511
512         vma->vm_file = filp;    /* Needed for drm_vm_open() */
513         drm_vm_open_locked(vma);
514         return 0;
515 }
516
517 unsigned long drm_core_get_map_ofs(struct drm_map * map)
518 {
519         return map->offset;
520 }
521 EXPORT_SYMBOL(drm_core_get_map_ofs);
522
523 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
524 {
525 #ifdef __alpha__
526         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
527 #else
528         return 0;
529 #endif
530 }
531 EXPORT_SYMBOL(drm_core_get_reg_ofs);
532
533 /**
534  * mmap DMA memory.
535  *
536  * \param file_priv DRM file private.
537  * \param vma virtual memory area.
538  * \return zero on success or a negative number on failure.
539  *
540  * If the virtual memory area has no offset associated with it then it's a DMA
541  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
542  * checks that the restricted flag is not set, sets the virtual memory operations
543  * according to the mapping type and remaps the pages. Finally sets the file
544  * pointer and calls vm_open().
545  */
546 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
547 {
548         struct drm_file *priv = filp->private_data;
549         struct drm_device *dev = priv->minor->dev;
550         struct drm_map *map = NULL;
551         unsigned long offset = 0;
552         struct drm_hash_item *hash;
553
554         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
555                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
556
557         if (!priv->authenticated)
558                 return -EACCES;
559
560         /* We check for "dma". On Apple's UniNorth, it's valid to have
561          * the AGP mapped at physical address 0
562          * --BenH.
563          */
564
565         if (!vma->vm_pgoff
566 #if __OS_HAS_AGP
567             && (!dev->agp
568                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
569 #endif
570             )
571                 return drm_mmap_dma(filp, vma);
572
573         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
574                 DRM_ERROR("Could not find map\n");
575                 return -EINVAL;
576         }
577
578         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
579         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
580                 return -EPERM;
581
582         /* Check for valid size. */
583         if (map->size < vma->vm_end - vma->vm_start)
584                 return -EINVAL;
585
586         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
587                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
588 #if defined(__i386__) || defined(__x86_64__)
589                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
590 #else
591                 /* Ye gads this is ugly.  With more thought
592                    we could move this up higher and use
593                    `protection_map' instead.  */
594                 vma->vm_page_prot =
595                     __pgprot(pte_val
596                              (pte_wrprotect
597                               (__pte(pgprot_val(vma->vm_page_prot)))));
598 #endif
599         }
600
601         switch (map->type) {
602         case _DRM_AGP:
603                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
604                         /*
605                          * On some platforms we can't talk to bus dma address from the CPU, so for
606                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
607                          * pages and mappings in nopage()
608                          */
609 #if defined(__powerpc__)
610                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
611 #endif
612                         vma->vm_ops = &drm_vm_ops;
613                         break;
614                 }
615                 /* fall through to _DRM_FRAME_BUFFER... */
616         case _DRM_FRAME_BUFFER:
617         case _DRM_REGISTERS:
618                 offset = dev->driver->get_reg_ofs(dev);
619                 vma->vm_flags |= VM_IO; /* not in core dump */
620                 vma->vm_page_prot = drm_io_prot(map->type, vma);
621                 if (io_remap_pfn_range(vma, vma->vm_start,
622                                        (map->offset + offset) >> PAGE_SHIFT,
623                                        vma->vm_end - vma->vm_start,
624                                        vma->vm_page_prot))
625                         return -EAGAIN;
626                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
627                           " offset = 0x%lx\n",
628                           map->type,
629                           vma->vm_start, vma->vm_end, map->offset + offset);
630                 vma->vm_ops = &drm_vm_ops;
631                 break;
632         case _DRM_CONSISTENT:
633                 /* Consistent memory is really like shared memory. But
634                  * it's allocated in a different way, so avoid nopage */
635                 if (remap_pfn_range(vma, vma->vm_start,
636                     page_to_pfn(virt_to_page(map->handle)),
637                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
638                         return -EAGAIN;
639                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
640         /* fall through to _DRM_SHM */
641         case _DRM_SHM:
642                 vma->vm_ops = &drm_vm_shm_ops;
643                 vma->vm_private_data = (void *)map;
644                 /* Don't let this area swap.  Change when
645                    DRM_KERNEL advisory is supported. */
646                 vma->vm_flags |= VM_RESERVED;
647                 break;
648         case _DRM_SCATTER_GATHER:
649                 vma->vm_ops = &drm_vm_sg_ops;
650                 vma->vm_private_data = (void *)map;
651                 vma->vm_flags |= VM_RESERVED;
652                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
653                 break;
654         case _DRM_TTM:
655                 return drm_bo_mmap_locked(vma, filp, map);
656         default:
657                 return -EINVAL; /* This should never happen. */
658         }
659         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
660
661         vma->vm_file = filp;    /* Needed for drm_vm_open() */
662         drm_vm_open_locked(vma);
663         return 0;
664 }
665
666 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
667 {
668         struct drm_file *priv = filp->private_data;
669         struct drm_device *dev = priv->minor->dev;
670         int ret;
671
672         mutex_lock(&dev->struct_mutex);
673         ret = drm_mmap_locked(filp, vma);
674         mutex_unlock(&dev->struct_mutex);
675
676         return ret;
677 }
678 EXPORT_SYMBOL(drm_mmap);
679
680 /**
681  * buffer object vm functions.
682  */
683
684 /**
685  * \c Pagefault method for buffer objects.
686  *
687  * \param vma Virtual memory area.
688  * \param address File offset.
689  * \return Error or refault. The pfn is manually inserted.
690  *
691  * It's important that pfns are inserted while holding the bo->mutex lock.
692  * otherwise we might race with unmap_mapping_range() which is always
693  * called with the bo->mutex lock held.
694  *
695  * We're modifying the page attribute bits of the vma->vm_page_prot field,
696  * without holding the mmap_sem in write mode. Only in read mode.
697  * These bits are not used by the mm subsystem code, and we consider them
698  * protected by the bo->mutex lock.
699  */
700
701 #ifdef DRM_FULL_MM_COMPAT
702 static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
703                                      unsigned long address)
704 {
705         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
706         unsigned long page_offset;
707         struct page *page = NULL;
708         struct drm_ttm *ttm;
709         struct drm_device *dev;
710         unsigned long pfn;
711         int err;
712         unsigned long bus_base;
713         unsigned long bus_offset;
714         unsigned long bus_size;
715         unsigned long ret = NOPFN_REFAULT;
716
717         if (address > vma->vm_end)
718                 return NOPFN_SIGBUS;
719
720         dev = bo->dev;
721         err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
722         if (err)
723                 return NOPFN_REFAULT;
724
725         err = mutex_lock_interruptible(&bo->mutex);
726         if (err) {
727                 drm_bo_read_unlock(&dev->bm.bm_lock);
728                 return NOPFN_REFAULT;
729         }
730
731         err = drm_bo_wait(bo, 0, 1, 0, 1);
732         if (err) {
733                 ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
734                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
735                 goto out_unlock;
736         }
737
738         bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
739
740         /*
741          * If buffer happens to be in a non-mappable location,
742          * move it to a mappable.
743          */
744
745         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
746                 uint32_t new_flags = bo->mem.proposed_flags |
747                         DRM_BO_FLAG_MAPPABLE |
748                         DRM_BO_FLAG_FORCE_MAPPABLE;
749                 err = drm_bo_move_buffer(bo, new_flags, 0, 0);
750                 if (err) {
751                         ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
752                         goto out_unlock;
753                 }
754         }
755
756         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
757                                 &bus_size);
758
759         if (err) {
760                 ret = NOPFN_SIGBUS;
761                 goto out_unlock;
762         }
763
764         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
765
766         if (bus_size) {
767                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
768
769                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
770                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
771         } else {
772                 ttm = bo->ttm;
773
774                 drm_ttm_fixup_caching(ttm);
775                 page = drm_ttm_get_page(ttm, page_offset);
776                 if (!page) {
777                         ret = NOPFN_OOM;
778                         goto out_unlock;
779                 }
780                 pfn = page_to_pfn(page);
781                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
782                         vm_get_page_prot(vma->vm_flags) :
783                         drm_io_prot(_DRM_TTM, vma);
784         }
785
786         err = vm_insert_pfn(vma, address, pfn);
787         if (err) {
788                 ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
789                 goto out_unlock;
790         }
791 out_unlock:
792         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
793         mutex_unlock(&bo->mutex);
794         drm_bo_read_unlock(&dev->bm.bm_lock);
795         return ret;
796 }
797 #endif
798
799 static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
800 {
801         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
802
803         drm_vm_open_locked(vma);
804         atomic_inc(&bo->usage);
805 #ifdef DRM_ODD_MM_COMPAT
806         drm_bo_add_vma(bo, vma);
807 #endif
808 }
809
810 /**
811  * \c vma open method for buffer objects.
812  *
813  * \param vma virtual memory area.
814  */
815
816 static void drm_bo_vm_open(struct vm_area_struct *vma)
817 {
818         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
819         struct drm_device *dev = bo->dev;
820
821         mutex_lock(&dev->struct_mutex);
822         drm_bo_vm_open_locked(vma);
823         mutex_unlock(&dev->struct_mutex);
824 }
825
826 /**
827  * \c vma close method for buffer objects.
828  *
829  * \param vma virtual memory area.
830  */
831
832 static void drm_bo_vm_close(struct vm_area_struct *vma)
833 {
834         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
835         struct drm_device *dev = bo->dev;
836
837         drm_vm_close(vma);
838         if (bo) {
839                 mutex_lock(&dev->struct_mutex);
840 #ifdef DRM_ODD_MM_COMPAT
841                 drm_bo_delete_vma(bo, vma);
842 #endif
843                 drm_bo_usage_deref_locked((struct drm_buffer_object **)
844                                           &vma->vm_private_data);
845                 mutex_unlock(&dev->struct_mutex);
846         }
847         return;
848 }
849
850 static struct vm_operations_struct drm_bo_vm_ops = {
851 #ifdef DRM_FULL_MM_COMPAT
852         .nopfn = drm_bo_vm_nopfn,
853 #else
854 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
855         .nopfn = drm_bo_vm_nopfn,
856 #else
857         .nopage = drm_bo_vm_nopage,
858 #endif
859 #endif
860         .open = drm_bo_vm_open,
861         .close = drm_bo_vm_close,
862 };
863
864 /**
865  * mmap buffer object memory.
866  *
867  * \param vma virtual memory area.
868  * \param file_priv DRM file private.
869  * \param map The buffer object drm map.
870  * \return zero on success or a negative number on failure.
871  */
872
873 int drm_bo_mmap_locked(struct vm_area_struct *vma,
874                        struct file *filp,
875                        drm_local_map_t *map)
876 {
877         vma->vm_ops = &drm_bo_vm_ops;
878         vma->vm_private_data = map->handle;
879         vma->vm_file = filp;
880         vma->vm_flags |= VM_RESERVED | VM_IO;
881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
882         vma->vm_flags |= VM_PFNMAP;
883 #endif
884         drm_bo_vm_open_locked(vma);
885 #ifdef DRM_ODD_MM_COMPAT
886         drm_bo_map_bound(vma);
887 #endif
888         return 0;
889 }