3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
40 #include <linux/pgtable.h>
43 #include <linux/efi.h>
44 #include <linux/slab.h>
46 #include <linux/mem_encrypt.h>
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_print.h>
56 #include "drm_internal.h"
57 #include "drm_legacy.h"
59 struct drm_vma_entry {
60 struct list_head head;
61 struct vm_area_struct *vma;
65 static void drm_vm_open(struct vm_area_struct *vma);
66 static void drm_vm_close(struct vm_area_struct *vma);
68 static pgprot_t drm_io_prot(struct drm_local_map *map,
69 struct vm_area_struct *vma)
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
75 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
76 tmp = pgprot_noncached(tmp);
78 tmp = pgprot_writecombine(tmp);
79 #elif defined(__ia64__)
80 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
82 tmp = pgprot_writecombine(tmp);
84 tmp = pgprot_noncached(tmp);
85 #elif defined(__sparc__) || defined(__arm__)
86 tmp = pgprot_noncached(tmp);
91 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
93 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
95 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
96 tmp = pgprot_noncached_wc(tmp);
102 * \c fault method for AGP virtual memory.
104 * \param vma virtual memory area.
105 * \param address access address.
106 * \return pointer to the page structure.
108 * Find the right map and if it's AGP memory find the real physical page to
109 * map, get the page, increment the use count and return it.
111 #if IS_ENABLED(CONFIG_AGP)
112 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
114 struct vm_area_struct *vma = vmf->vma;
115 struct drm_file *priv = vma->vm_file->private_data;
116 struct drm_device *dev = priv->minor->dev;
117 struct drm_local_map *map = NULL;
118 struct drm_map_list *r_list;
119 struct drm_hash_item *hash;
127 if (!dev->agp || !dev->agp->cant_use_aperture)
130 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
133 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
136 if (map && map->type == _DRM_AGP) {
138 * Using vm_pgoff as a selector forces us to use this unusual
141 resource_size_t offset = vmf->address - vma->vm_start;
142 resource_size_t baddr = map->offset + offset;
143 struct drm_agp_mem *agpmem;
148 * Adjust to a bus-relative address
150 baddr -= dev->hose->mem_space->start;
154 * It's AGP memory - find the real physical page to map
156 list_for_each_entry(agpmem, &dev->agp->memory, head) {
157 if (agpmem->bound <= baddr &&
158 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
162 if (&agpmem->head == &dev->agp->memory)
166 * Get the page, inc the use count, and return it
168 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
169 page = agpmem->memory->pages[offset];
174 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
175 (unsigned long long)baddr,
176 agpmem->memory->pages[offset],
177 (unsigned long long)offset,
182 return VM_FAULT_SIGBUS; /* Disallow mremap */
185 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
187 return VM_FAULT_SIGBUS;
192 * \c nopage method for shared virtual memory.
194 * \param vma virtual memory area.
195 * \param address access address.
196 * \return pointer to the page structure.
198 * Get the mapping, find the real physical page to map, get the page, and
201 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
203 struct vm_area_struct *vma = vmf->vma;
204 struct drm_local_map *map = vma->vm_private_data;
205 unsigned long offset;
210 return VM_FAULT_SIGBUS; /* Nothing allocated */
212 offset = vmf->address - vma->vm_start;
213 i = (unsigned long)map->handle + offset;
214 page = vmalloc_to_page((void *)i);
216 return VM_FAULT_SIGBUS;
220 DRM_DEBUG("shm_fault 0x%lx\n", offset);
225 * \c close method for shared virtual memory.
227 * \param vma virtual memory area.
229 * Deletes map information if we are the last
230 * person to close a mapping and it's not in the global maplist.
232 static void drm_vm_shm_close(struct vm_area_struct *vma)
234 struct drm_file *priv = vma->vm_file->private_data;
235 struct drm_device *dev = priv->minor->dev;
236 struct drm_vma_entry *pt, *temp;
237 struct drm_local_map *map;
238 struct drm_map_list *r_list;
241 DRM_DEBUG("0x%08lx,0x%08lx\n",
242 vma->vm_start, vma->vm_end - vma->vm_start);
244 map = vma->vm_private_data;
246 mutex_lock(&dev->struct_mutex);
247 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
248 if (pt->vma->vm_private_data == map)
250 if (pt->vma == vma) {
256 /* We were the only map that was found */
257 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
258 /* Check to see if we are in the maplist, if we are not, then
259 * we delete this mappings information.
262 list_for_each_entry(r_list, &dev->maplist, head) {
263 if (r_list->map == map)
270 case _DRM_FRAME_BUFFER:
271 arch_phys_wc_del(map->mtrr);
272 iounmap(map->handle);
278 case _DRM_SCATTER_GATHER:
280 case _DRM_CONSISTENT:
281 dma_free_coherent(&dev->pdev->dev,
290 mutex_unlock(&dev->struct_mutex);
294 * \c fault method for DMA virtual memory.
296 * \param address access address.
297 * \return pointer to the page structure.
299 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
301 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
303 struct vm_area_struct *vma = vmf->vma;
304 struct drm_file *priv = vma->vm_file->private_data;
305 struct drm_device *dev = priv->minor->dev;
306 struct drm_device_dma *dma = dev->dma;
307 unsigned long offset;
308 unsigned long page_nr;
312 return VM_FAULT_SIGBUS; /* Error */
314 return VM_FAULT_SIGBUS; /* Nothing allocated */
316 offset = vmf->address - vma->vm_start;
317 /* vm_[pg]off[set] should be 0 */
318 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
319 page = virt_to_page((void *)dma->pagelist[page_nr]);
324 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
329 * \c fault method for scatter-gather virtual memory.
331 * \param address access address.
332 * \return pointer to the page structure.
334 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
336 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
338 struct vm_area_struct *vma = vmf->vma;
339 struct drm_local_map *map = vma->vm_private_data;
340 struct drm_file *priv = vma->vm_file->private_data;
341 struct drm_device *dev = priv->minor->dev;
342 struct drm_sg_mem *entry = dev->sg;
343 unsigned long offset;
344 unsigned long map_offset;
345 unsigned long page_offset;
349 return VM_FAULT_SIGBUS; /* Error */
350 if (!entry->pagelist)
351 return VM_FAULT_SIGBUS; /* Nothing allocated */
353 offset = vmf->address - vma->vm_start;
354 map_offset = map->offset - (unsigned long)dev->sg->virtual;
355 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
356 page = entry->pagelist[page_offset];
363 /** AGP virtual memory operations */
364 static const struct vm_operations_struct drm_vm_ops = {
365 .fault = drm_vm_fault,
367 .close = drm_vm_close,
370 /** Shared virtual memory operations */
371 static const struct vm_operations_struct drm_vm_shm_ops = {
372 .fault = drm_vm_shm_fault,
374 .close = drm_vm_shm_close,
377 /** DMA virtual memory operations */
378 static const struct vm_operations_struct drm_vm_dma_ops = {
379 .fault = drm_vm_dma_fault,
381 .close = drm_vm_close,
384 /** Scatter-gather virtual memory operations */
385 static const struct vm_operations_struct drm_vm_sg_ops = {
386 .fault = drm_vm_sg_fault,
388 .close = drm_vm_close,
391 static void drm_vm_open_locked(struct drm_device *dev,
392 struct vm_area_struct *vma)
394 struct drm_vma_entry *vma_entry;
396 DRM_DEBUG("0x%08lx,0x%08lx\n",
397 vma->vm_start, vma->vm_end - vma->vm_start);
399 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
401 vma_entry->vma = vma;
402 vma_entry->pid = current->pid;
403 list_add(&vma_entry->head, &dev->vmalist);
407 static void drm_vm_open(struct vm_area_struct *vma)
409 struct drm_file *priv = vma->vm_file->private_data;
410 struct drm_device *dev = priv->minor->dev;
412 mutex_lock(&dev->struct_mutex);
413 drm_vm_open_locked(dev, vma);
414 mutex_unlock(&dev->struct_mutex);
417 static void drm_vm_close_locked(struct drm_device *dev,
418 struct vm_area_struct *vma)
420 struct drm_vma_entry *pt, *temp;
422 DRM_DEBUG("0x%08lx,0x%08lx\n",
423 vma->vm_start, vma->vm_end - vma->vm_start);
425 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
426 if (pt->vma == vma) {
435 * \c close method for all virtual memory types.
437 * \param vma virtual memory area.
439 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
442 static void drm_vm_close(struct vm_area_struct *vma)
444 struct drm_file *priv = vma->vm_file->private_data;
445 struct drm_device *dev = priv->minor->dev;
447 mutex_lock(&dev->struct_mutex);
448 drm_vm_close_locked(dev, vma);
449 mutex_unlock(&dev->struct_mutex);
455 * \param file_priv DRM file private.
456 * \param vma virtual memory area.
457 * \return zero on success or a negative number on failure.
459 * Sets the virtual memory area operations structure to vm_dma_ops, the file
460 * pointer, and calls vm_open().
462 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
464 struct drm_file *priv = filp->private_data;
465 struct drm_device *dev;
466 struct drm_device_dma *dma;
467 unsigned long length = vma->vm_end - vma->vm_start;
469 dev = priv->minor->dev;
471 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
472 vma->vm_start, vma->vm_end, vma->vm_pgoff);
474 /* Length must match exact page count */
475 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
479 if (!capable(CAP_SYS_ADMIN) &&
480 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
481 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
482 #if defined(__i386__) || defined(__x86_64__)
483 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
485 /* Ye gads this is ugly. With more thought
486 we could move this up higher and use
487 `protection_map' instead. */
491 (__pte(pgprot_val(vma->vm_page_prot)))));
495 vma->vm_ops = &drm_vm_dma_ops;
497 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
499 drm_vm_open_locked(dev, vma);
503 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
506 return dev->hose->dense_mem_base;
515 * \param file_priv DRM file private.
516 * \param vma virtual memory area.
517 * \return zero on success or a negative number on failure.
519 * If the virtual memory area has no offset associated with it then it's a DMA
520 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
521 * checks that the restricted flag is not set, sets the virtual memory operations
522 * according to the mapping type and remaps the pages. Finally sets the file
523 * pointer and calls vm_open().
525 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
527 struct drm_file *priv = filp->private_data;
528 struct drm_device *dev = priv->minor->dev;
529 struct drm_local_map *map = NULL;
530 resource_size_t offset = 0;
531 struct drm_hash_item *hash;
533 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
534 vma->vm_start, vma->vm_end, vma->vm_pgoff);
536 if (!priv->authenticated)
539 /* We check for "dma". On Apple's UniNorth, it's valid to have
540 * the AGP mapped at physical address 0
544 #if IS_ENABLED(CONFIG_AGP)
546 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
549 return drm_mmap_dma(filp, vma);
551 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
552 DRM_ERROR("Could not find map\n");
556 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
557 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
560 /* Check for valid size. */
561 if (map->size < vma->vm_end - vma->vm_start)
564 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
565 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
566 #if defined(__i386__) || defined(__x86_64__)
567 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
569 /* Ye gads this is ugly. With more thought
570 we could move this up higher and use
571 `protection_map' instead. */
575 (__pte(pgprot_val(vma->vm_page_prot)))));
580 #if !defined(__arm__)
582 if (dev->agp && dev->agp->cant_use_aperture) {
584 * On some platforms we can't talk to bus dma address from the CPU, so for
585 * memory of type DRM_AGP, we'll deal with sorting out the real physical
586 * pages and mappings in fault()
588 #if defined(__powerpc__)
589 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
591 vma->vm_ops = &drm_vm_ops;
594 fallthrough; /* to _DRM_FRAME_BUFFER... */
596 case _DRM_FRAME_BUFFER:
598 offset = drm_core_get_reg_ofs(dev);
599 vma->vm_page_prot = drm_io_prot(map, vma);
600 if (io_remap_pfn_range(vma, vma->vm_start,
601 (map->offset + offset) >> PAGE_SHIFT,
602 vma->vm_end - vma->vm_start,
605 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
606 " offset = 0x%llx\n",
608 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
610 vma->vm_ops = &drm_vm_ops;
612 case _DRM_CONSISTENT:
613 /* Consistent memory is really like shared memory. But
614 * it's allocated in a different way, so avoid fault */
615 if (remap_pfn_range(vma, vma->vm_start,
616 page_to_pfn(virt_to_page(map->handle)),
617 vma->vm_end - vma->vm_start, vma->vm_page_prot))
619 vma->vm_page_prot = drm_dma_prot(map->type, vma);
620 fallthrough; /* to _DRM_SHM */
622 vma->vm_ops = &drm_vm_shm_ops;
623 vma->vm_private_data = (void *)map;
625 case _DRM_SCATTER_GATHER:
626 vma->vm_ops = &drm_vm_sg_ops;
627 vma->vm_private_data = (void *)map;
628 vma->vm_page_prot = drm_dma_prot(map->type, vma);
631 return -EINVAL; /* This should never happen. */
633 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
635 drm_vm_open_locked(dev, vma);
639 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
641 struct drm_file *priv = filp->private_data;
642 struct drm_device *dev = priv->minor->dev;
645 if (drm_dev_is_unplugged(dev))
648 mutex_lock(&dev->struct_mutex);
649 ret = drm_mmap_locked(filp, vma);
650 mutex_unlock(&dev->struct_mutex);
654 EXPORT_SYMBOL(drm_legacy_mmap);
656 #if IS_ENABLED(CONFIG_DRM_LEGACY)
657 void drm_legacy_vma_flush(struct drm_device *dev)
659 struct drm_vma_entry *vma, *vma_temp;
661 /* Clear vma list (only needed for legacy drivers) */
662 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
663 list_del(&vma->head);