3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
45 * \c nopage method for AGP virtual memory.
47 * \param vma virtual memory area.
48 * \param address access address.
49 * \return pointer to the page structure.
51 * Find the right map and if it's AGP memory find the real physical page to
52 * map, get the page, increment the use count and return it.
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56 unsigned long address)
58 drm_file_t *priv = vma->vm_file->private_data;
59 drm_device_t *dev = priv->head->dev;
60 drm_map_t *map = NULL;
61 drm_map_list_t *r_list;
62 drm_hash_item_t *hash;
67 if (!drm_core_has_AGP(dev))
70 if (!dev->agp || !dev->agp->cant_use_aperture)
73 if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash))
76 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
79 if (map && map->type == _DRM_AGP) {
80 unsigned long offset = address - vma->vm_start;
81 unsigned long baddr = map->offset + offset;
82 struct drm_agp_mem *agpmem;
87 * Adjust to a bus-relative address
89 baddr -= dev->hose->mem_space->start;
93 * It's AGP memory - find the real physical page to map
95 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
96 if (agpmem->bound <= baddr &&
97 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
102 goto vm_nopage_error;
105 * Get the page, inc the use count, and return it
107 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108 page = virt_to_page(__va(agpmem->memory->memory[offset]));
112 /* page_count() not defined everywhere */
114 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
115 baddr, __va(agpmem->memory->memory[offset]), offset,
122 return NOPAGE_SIGBUS; /* Disallow mremap */
124 #else /* __OS_HAS_AGP */
125 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
126 unsigned long address)
128 return NOPAGE_SIGBUS;
130 #endif /* __OS_HAS_AGP */
133 * \c nopage method for shared virtual memory.
135 * \param vma virtual memory area.
136 * \param address access address.
137 * \return pointer to the page structure.
139 * Get the the mapping, find the real physical page to map, get the page, and
142 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
143 unsigned long address)
145 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
146 unsigned long offset;
150 if (address > vma->vm_end)
151 return NOPAGE_SIGBUS; /* Disallow mremap */
153 return NOPAGE_OOM; /* Nothing allocated */
155 offset = address - vma->vm_start;
156 i = (unsigned long)map->handle + offset;
157 page = vmalloc_to_page((void *)i);
162 DRM_DEBUG("shm_nopage 0x%lx\n", address);
167 * \c close method for shared virtual memory.
169 * \param vma virtual memory area.
171 * Deletes map information if we are the last
172 * person to close a mapping and it's not in the global maplist.
174 static void drm_vm_shm_close(struct vm_area_struct *vma)
176 drm_file_t *priv = vma->vm_file->private_data;
177 drm_device_t *dev = priv->head->dev;
178 drm_vma_entry_t *pt, *prev, *next;
180 drm_map_list_t *r_list;
181 struct list_head *list;
184 DRM_DEBUG("0x%08lx,0x%08lx\n",
185 vma->vm_start, vma->vm_end - vma->vm_start);
186 atomic_dec(&dev->vma_count);
188 map = vma->vm_private_data;
190 down(&dev->struct_sem);
191 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
193 if (pt->vma->vm_private_data == map)
195 if (pt->vma == vma) {
197 prev->next = pt->next;
199 dev->vmalist = pt->next;
201 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
206 /* We were the only map that was found */
207 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
208 /* Check to see if we are in the maplist, if we are not, then
209 * we delete this mappings information.
212 list = &dev->maplist->head;
213 list_for_each(list, &dev->maplist->head) {
214 r_list = list_entry(list, drm_map_list_t, head);
215 if (r_list->map == map)
220 drm_dma_handle_t dmah;
224 case _DRM_FRAME_BUFFER:
225 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
227 retcode = mtrr_del(map->mtrr,
230 DRM_DEBUG("mtrr_del = %d\n", retcode);
232 drm_ioremapfree(map->handle, map->size, dev);
238 case _DRM_SCATTER_GATHER:
240 case _DRM_CONSISTENT:
241 dmah.vaddr = map->handle;
242 dmah.busaddr = map->offset;
243 dmah.size = map->size;
244 __drm_pci_free(dev, &dmah);
247 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
250 up(&dev->struct_sem);
254 * \c nopage method for DMA virtual memory.
256 * \param vma virtual memory area.
257 * \param address access address.
258 * \return pointer to the page structure.
260 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
262 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
263 unsigned long address)
265 drm_file_t *priv = vma->vm_file->private_data;
266 drm_device_t *dev = priv->head->dev;
267 drm_device_dma_t *dma = dev->dma;
268 unsigned long offset;
269 unsigned long page_nr;
273 return NOPAGE_SIGBUS; /* Error */
274 if (address > vma->vm_end)
275 return NOPAGE_SIGBUS; /* Disallow mremap */
277 return NOPAGE_OOM; /* Nothing allocated */
279 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
280 page_nr = offset >> PAGE_SHIFT;
281 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
285 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
290 * \c nopage method for scatter-gather virtual memory.
292 * \param vma virtual memory area.
293 * \param address access address.
294 * \return pointer to the page structure.
296 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
298 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
299 unsigned long address)
301 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
302 drm_file_t *priv = vma->vm_file->private_data;
303 drm_device_t *dev = priv->head->dev;
304 drm_sg_mem_t *entry = dev->sg;
305 unsigned long offset;
306 unsigned long map_offset;
307 unsigned long page_offset;
312 return NOPAGE_SIGBUS; /* Error */
313 if (address > vma->vm_end)
314 return NOPAGE_SIGBUS; /* Disallow mremap */
315 if (!entry->pagelist)
316 return NOPAGE_OOM; /* Nothing allocated */
318 offset = address - vma->vm_start;
319 map_offset = map->offset - (unsigned long)dev->sg->virtual;
320 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
321 page = entry->pagelist[page_offset];
327 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
329 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
330 unsigned long address, int *type)
333 *type = VM_FAULT_MINOR;
334 return drm_do_vm_nopage(vma, address);
337 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
338 unsigned long address, int *type)
341 *type = VM_FAULT_MINOR;
342 return drm_do_vm_shm_nopage(vma, address);
345 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
346 unsigned long address, int *type)
349 *type = VM_FAULT_MINOR;
350 return drm_do_vm_dma_nopage(vma, address);
353 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
354 unsigned long address, int *type)
357 *type = VM_FAULT_MINOR;
358 return drm_do_vm_sg_nopage(vma, address);
361 #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
363 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
364 unsigned long address, int unused)
366 return drm_do_vm_nopage(vma, address);
369 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
370 unsigned long address, int unused)
372 return drm_do_vm_shm_nopage(vma, address);
375 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
376 unsigned long address, int unused)
378 return drm_do_vm_dma_nopage(vma, address);
381 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
382 unsigned long address, int unused)
384 return drm_do_vm_sg_nopage(vma, address);
389 /** AGP virtual memory operations */
390 static struct vm_operations_struct drm_vm_ops = {
391 .nopage = drm_vm_nopage,
393 .close = drm_vm_close,
396 /** Shared virtual memory operations */
397 static struct vm_operations_struct drm_vm_shm_ops = {
398 .nopage = drm_vm_shm_nopage,
400 .close = drm_vm_shm_close,
403 /** DMA virtual memory operations */
404 static struct vm_operations_struct drm_vm_dma_ops = {
405 .nopage = drm_vm_dma_nopage,
407 .close = drm_vm_close,
410 /** Scatter-gather virtual memory operations */
411 static struct vm_operations_struct drm_vm_sg_ops = {
412 .nopage = drm_vm_sg_nopage,
414 .close = drm_vm_close,
418 * \c open method for shared virtual memory.
420 * \param vma virtual memory area.
422 * Create a new drm_vma_entry structure as the \p vma private data entry and
423 * add it to drm_device::vmalist.
425 static void drm_vm_open(struct vm_area_struct *vma)
427 drm_file_t *priv = vma->vm_file->private_data;
428 drm_device_t *dev = priv->head->dev;
429 drm_vma_entry_t *vma_entry;
431 DRM_DEBUG("0x%08lx,0x%08lx\n",
432 vma->vm_start, vma->vm_end - vma->vm_start);
433 atomic_inc(&dev->vma_count);
435 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
437 down(&dev->struct_sem);
438 vma_entry->vma = vma;
439 vma_entry->next = dev->vmalist;
440 vma_entry->pid = current->pid;
441 dev->vmalist = vma_entry;
442 up(&dev->struct_sem);
447 * \c close method for all virtual memory types.
449 * \param vma virtual memory area.
451 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
454 static void drm_vm_close(struct vm_area_struct *vma)
456 drm_file_t *priv = vma->vm_file->private_data;
457 drm_device_t *dev = priv->head->dev;
458 drm_vma_entry_t *pt, *prev;
460 DRM_DEBUG("0x%08lx,0x%08lx\n",
461 vma->vm_start, vma->vm_end - vma->vm_start);
462 atomic_dec(&dev->vma_count);
464 down(&dev->struct_sem);
465 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
466 if (pt->vma == vma) {
468 prev->next = pt->next;
470 dev->vmalist = pt->next;
472 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
476 up(&dev->struct_sem);
482 * \param filp file pointer.
483 * \param vma virtual memory area.
484 * \return zero on success or a negative number on failure.
486 * Sets the virtual memory area operations structure to vm_dma_ops, the file
487 * pointer, and calls vm_open().
489 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
491 drm_file_t *priv = filp->private_data;
493 drm_device_dma_t *dma;
494 unsigned long length = vma->vm_end - vma->vm_start;
497 dev = priv->head->dev;
499 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
500 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
502 /* Length must match exact page count */
503 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
509 vma->vm_ops = &drm_vm_dma_ops;
511 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
512 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
514 vma->vm_flags |= VM_RESERVED; /* Don't swap */
517 vma->vm_file = filp; /* Needed for drm_vm_open() */
522 unsigned long drm_core_get_map_ofs(drm_map_t * map)
526 EXPORT_SYMBOL(drm_core_get_map_ofs);
528 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
531 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
536 EXPORT_SYMBOL(drm_core_get_reg_ofs);
541 * \param filp file pointer.
542 * \param vma virtual memory area.
543 * \return zero on success or a negative number on failure.
545 * If the virtual memory area has no offset associated with it then it's a DMA
546 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
547 * checks that the restricted flag is not set, sets the virtual memory operations
548 * according to the mapping type and remaps the pages. Finally sets the file
549 * pointer and calls vm_open().
551 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
553 drm_file_t *priv = filp->private_data;
554 drm_device_t *dev = priv->head->dev;
555 drm_map_t *map = NULL;
556 unsigned long offset = 0;
557 drm_hash_item_t *hash;
559 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
560 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
562 if (!priv->authenticated)
565 /* We check for "dma". On Apple's UniNorth, it's valid to have
566 * the AGP mapped at physical address 0
572 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
575 return drm_mmap_dma(filp, vma);
577 if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) {
578 DRM_ERROR("Could not find map\n");
582 map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
584 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
587 /* Check for valid size. */
588 if (map->size < vma->vm_end - vma->vm_start)
591 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
592 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
593 #if defined(__i386__) || defined(__x86_64__)
594 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
596 /* Ye gads this is ugly. With more thought
597 we could move this up higher and use
598 `protection_map' instead. */
602 (__pte(pgprot_val(vma->vm_page_prot)))));
608 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
610 * On some platforms we can't talk to bus dma address from the CPU, so for
611 * memory of type DRM_AGP, we'll deal with sorting out the real physical
612 * pages and mappings in nopage()
614 #if defined(__powerpc__)
615 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
617 vma->vm_ops = &drm_vm_ops;
620 /* fall through to _DRM_FRAME_BUFFER... */
621 case _DRM_FRAME_BUFFER:
623 #if defined(__i386__) || defined(__x86_64__)
624 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
625 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
626 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
628 #elif defined(__powerpc__)
629 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
630 if (map->type == _DRM_REGISTERS)
631 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
633 vma->vm_flags |= VM_IO; /* not in core dump */
634 #if defined(__ia64__)
635 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
638 pgprot_writecombine(vma->vm_page_prot);
641 pgprot_noncached(vma->vm_page_prot);
643 offset = dev->driver->get_reg_ofs(dev);
645 if (io_remap_pfn_range(vma, vma->vm_start,
646 (map->offset + offset) >>PAGE_SHIFT,
647 vma->vm_end - vma->vm_start,
650 if (remap_pfn_range(vma, vma->vm_start,
651 (map->offset + offset) >> PAGE_SHIFT,
652 vma->vm_end - vma->vm_start,
656 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
659 vma->vm_start, vma->vm_end, map->offset + offset);
660 vma->vm_ops = &drm_vm_ops;
662 case _DRM_CONSISTENT:
663 /* Consistent memory is really like shared memory. But
664 * it's allocated in a different way, so avoid nopage */
665 if (remap_pfn_range(vma, vma->vm_start,
666 page_to_pfn(virt_to_page(map->handle)),
667 vma->vm_end - vma->vm_start, vma->vm_page_prot))
669 /* fall through to _DRM_SHM */
671 vma->vm_ops = &drm_vm_shm_ops;
672 vma->vm_private_data = (void *)map;
673 /* Don't let this area swap. Change when
674 DRM_KERNEL advisory is supported. */
675 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
676 vma->vm_flags |= VM_LOCKED;
678 vma->vm_flags |= VM_RESERVED;
681 case _DRM_SCATTER_GATHER:
682 vma->vm_ops = &drm_vm_sg_ops;
683 vma->vm_private_data = (void *)map;
684 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
685 vma->vm_flags |= VM_LOCKED;
687 vma->vm_flags |= VM_RESERVED;
691 return -EINVAL; /* This should never happen. */
693 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
694 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
696 vma->vm_flags |= VM_RESERVED; /* Don't swap */
699 vma->vm_file = filp; /* Needed for drm_vm_open() */
703 EXPORT_SYMBOL(drm_mmap);