2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
40 #include <drm/drm_vma_manager.h>
44 * This file provides some of the base ioctls and library routines for
45 * the graphics memory manager implemented by each device driver.
47 * Because various devices have different requirements in terms of
48 * synchronization and migration strategies, implementing that is left up to
49 * the driver, and all that the general API provides should be generic --
50 * allocating objects, reading/writing data with the cpu, freeing objects.
51 * Even there, platform-dependent optimizations for reading/writing data with
52 * the CPU mean we'll likely hook those out to driver-specific calls. However,
53 * the DRI2 implementation wants to have at least allocate/mmap be generic.
55 * The goal was to have swap-backed object allocation managed through
56 * struct file. However, file descriptors as handles to a struct file have
58 * - Process limits prevent more than 1024 or so being used at a time by
60 * - Inability to allocate high fds will aggravate the X Server's select()
61 * handling, and likely that of many GL client applications as well.
63 * This led to a plan of using our own integer IDs (called handles, following
64 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65 * ioctls. The objects themselves will still include the struct file so
66 * that we can transition to fds if the required kernel infrastructure shows
67 * up at a later date, and as our interface with shmfs for memory allocation.
71 * We make up offsets for buffer objects so we can recognize them at
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76 * the faked up offset will fit
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
88 * Initialize the GEM device fields
92 drm_gem_init(struct drm_device *dev)
94 struct drm_vma_offset_manager *vma_offset_manager;
96 mutex_init(&dev->object_name_lock);
97 idr_init(&dev->object_name_idr);
99 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
100 if (!vma_offset_manager) {
101 DRM_ERROR("out of memory\n");
105 dev->vma_offset_manager = vma_offset_manager;
106 drm_vma_offset_manager_init(vma_offset_manager,
107 DRM_FILE_PAGE_OFFSET_START,
108 DRM_FILE_PAGE_OFFSET_SIZE);
114 drm_gem_destroy(struct drm_device *dev)
117 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
118 kfree(dev->vma_offset_manager);
119 dev->vma_offset_manager = NULL;
123 * Initialize an already allocated GEM object of the specified size with
124 * shmfs backing store.
126 int drm_gem_object_init(struct drm_device *dev,
127 struct drm_gem_object *obj, size_t size)
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
133 return PTR_ERR(filp);
135 drm_gem_private_object_init(dev, obj, size);
140 EXPORT_SYMBOL(drm_gem_object_init);
143 * Initialize an already allocated GEM object of the specified size with
144 * no GEM provided backing store. Instead the caller is responsible for
145 * backing the object and handling it.
147 void drm_gem_private_object_init(struct drm_device *dev,
148 struct drm_gem_object *obj, size_t size)
150 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155 kref_init(&obj->refcount);
156 obj->handle_count = 0;
158 drm_vma_node_reset(&obj->vma_node);
160 EXPORT_SYMBOL(drm_gem_private_object_init);
163 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
166 * Note: obj->dma_buf can't disappear as long as we still hold a
167 * handle reference in obj->handle_count.
169 mutex_lock(&filp->prime.lock);
171 drm_prime_remove_buf_handle_locked(&filp->prime,
174 mutex_unlock(&filp->prime.lock);
178 * Called after the last handle to the object has been closed
180 * Removes any name for the object. Note that this must be
181 * called before drm_gem_object_free or we'll be touching
184 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
186 struct drm_device *dev = obj->dev;
188 /* Remove any name for this object */
190 idr_remove(&dev->object_name_idr, obj->name);
195 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
197 /* Unbreak the reference cycle if we have an exported dma_buf. */
199 dma_buf_put(obj->dma_buf);
205 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
207 if (WARN_ON(obj->handle_count == 0))
211 * Must bump handle count first as this may be the last
212 * ref, in which case the object would disappear before we
216 mutex_lock(&obj->dev->object_name_lock);
217 if (--obj->handle_count == 0) {
218 drm_gem_object_handle_free(obj);
219 drm_gem_object_exported_dma_buf_free(obj);
221 mutex_unlock(&obj->dev->object_name_lock);
223 drm_gem_object_unreference_unlocked(obj);
227 * Removes the mapping from handle to filp for this object.
230 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
232 struct drm_device *dev;
233 struct drm_gem_object *obj;
235 /* This is gross. The idr system doesn't let us try a delete and
236 * return an error code. It just spews if you fail at deleting.
237 * So, we have to grab a lock around finding the object and then
238 * doing the delete on it and dropping the refcount, or the user
239 * could race us to double-decrement the refcount and cause a
240 * use-after-free later. Given the frequency of our handle lookups,
241 * we may want to use ida for number allocation and a hash table
242 * for the pointers, anyway.
244 spin_lock(&filp->table_lock);
246 /* Check if we currently have a reference on the object */
247 obj = idr_find(&filp->object_idr, handle);
249 spin_unlock(&filp->table_lock);
254 /* Release reference and decrement refcount. */
255 idr_remove(&filp->object_idr, handle);
256 spin_unlock(&filp->table_lock);
258 if (drm_core_check_feature(dev, DRIVER_PRIME))
259 drm_gem_remove_prime_handles(obj, filp);
260 drm_vma_node_revoke(&obj->vma_node, filp->filp);
262 if (dev->driver->gem_close_object)
263 dev->driver->gem_close_object(obj, filp);
264 drm_gem_object_handle_unreference_unlocked(obj);
268 EXPORT_SYMBOL(drm_gem_handle_delete);
271 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
273 * This implements the ->dumb_destroy kms driver callback for drivers which use
274 * gem to manage their backing storage.
276 int drm_gem_dumb_destroy(struct drm_file *file,
277 struct drm_device *dev,
280 return drm_gem_handle_delete(file, handle);
282 EXPORT_SYMBOL(drm_gem_dumb_destroy);
285 * drm_gem_handle_create_tail - internal functions to create a handle
287 * This expects the dev->object_name_lock to be held already and will drop it
288 * before returning. Used to avoid races in establishing new handles when
289 * importing an object from either an flink name or a dma-buf.
292 drm_gem_handle_create_tail(struct drm_file *file_priv,
293 struct drm_gem_object *obj,
296 struct drm_device *dev = obj->dev;
299 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
302 * Get the user-visible handle using idr. Preload and perform
303 * allocation under our spinlock.
305 idr_preload(GFP_KERNEL);
306 spin_lock(&file_priv->table_lock);
308 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
309 drm_gem_object_reference(obj);
311 spin_unlock(&file_priv->table_lock);
313 mutex_unlock(&dev->object_name_lock);
315 drm_gem_object_handle_unreference_unlocked(obj);
320 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
322 drm_gem_handle_delete(file_priv, *handlep);
326 if (dev->driver->gem_open_object) {
327 ret = dev->driver->gem_open_object(obj, file_priv);
329 drm_gem_handle_delete(file_priv, *handlep);
338 * Create a handle for this object. This adds a handle reference
339 * to the object, which includes a regular reference count. Callers
340 * will likely want to dereference the object afterwards.
343 drm_gem_handle_create(struct drm_file *file_priv,
344 struct drm_gem_object *obj,
347 mutex_lock(&obj->dev->object_name_lock);
349 return drm_gem_handle_create_tail(file_priv, obj, handlep);
351 EXPORT_SYMBOL(drm_gem_handle_create);
355 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
356 * @obj: obj in question
358 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
361 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
363 struct drm_device *dev = obj->dev;
365 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
367 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
370 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
371 * @obj: obj in question
372 * @size: the virtual size
374 * GEM memory mapping works by handing back to userspace a fake mmap offset
375 * it can use in a subsequent mmap(2) call. The DRM core code then looks
376 * up the object based on the offset and sets up the various memory mapping
379 * This routine allocates and attaches a fake offset for @obj, in cases where
380 * the virtual size differs from the physical size (ie. obj->size). Otherwise
381 * just use drm_gem_create_mmap_offset().
384 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
386 struct drm_device *dev = obj->dev;
388 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
391 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
394 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
395 * @obj: obj in question
397 * GEM memory mapping works by handing back to userspace a fake mmap offset
398 * it can use in a subsequent mmap(2) call. The DRM core code then looks
399 * up the object based on the offset and sets up the various memory mapping
402 * This routine allocates and attaches a fake offset for @obj.
404 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
406 return drm_gem_create_mmap_offset_size(obj, obj->size);
408 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
411 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
413 * @obj: obj in question
414 * @gfpmask: gfp mask of requested pages
416 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
419 struct address_space *mapping;
420 struct page *p, **pages;
423 /* This is the shared memory object that backs the GEM resource */
424 inode = file_inode(obj->filp);
425 mapping = inode->i_mapping;
427 /* We already BUG_ON() for non-page-aligned sizes in
428 * drm_gem_object_init(), so we should never hit this unless
429 * driver author is doing something really wrong:
431 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
433 npages = obj->size >> PAGE_SHIFT;
435 pages = drm_malloc_ab(npages, sizeof(struct page *));
437 return ERR_PTR(-ENOMEM);
439 gfpmask |= mapping_gfp_mask(mapping);
441 for (i = 0; i < npages; i++) {
442 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
447 /* There is a hypothetical issue w/ drivers that require
448 * buffer memory in the low 4GB.. if the pages are un-
449 * pinned, and swapped out, they can end up swapped back
450 * in above 4GB. If pages are already in memory, then
451 * shmem_read_mapping_page_gfp will ignore the gfpmask,
452 * even if the already in-memory page disobeys the mask.
454 * It is only a theoretical issue today, because none of
455 * the devices with this limitation can be populated with
456 * enough memory to trigger the issue. But this BUG_ON()
457 * is here as a reminder in case the problem with
458 * shmem_read_mapping_page_gfp() isn't solved by the time
459 * it does become a real issue.
461 * See this thread: http://lkml.org/lkml/2011/7/11/238
463 BUG_ON((gfpmask & __GFP_DMA32) &&
464 (page_to_pfn(p) >= 0x00100000UL));
471 page_cache_release(pages[i]);
473 drm_free_large(pages);
476 EXPORT_SYMBOL(drm_gem_get_pages);
479 * drm_gem_put_pages - helper to free backing pages for a GEM object
480 * @obj: obj in question
481 * @pages: pages to free
482 * @dirty: if true, pages will be marked as dirty
483 * @accessed: if true, the pages will be marked as accessed
485 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
486 bool dirty, bool accessed)
490 /* We already BUG_ON() for non-page-aligned sizes in
491 * drm_gem_object_init(), so we should never hit this unless
492 * driver author is doing something really wrong:
494 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
496 npages = obj->size >> PAGE_SHIFT;
498 for (i = 0; i < npages; i++) {
500 set_page_dirty(pages[i]);
503 mark_page_accessed(pages[i]);
505 /* Undo the reference we took when populating the table */
506 page_cache_release(pages[i]);
509 drm_free_large(pages);
511 EXPORT_SYMBOL(drm_gem_put_pages);
513 /** Returns a reference to the object named by the handle. */
514 struct drm_gem_object *
515 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
518 struct drm_gem_object *obj;
520 spin_lock(&filp->table_lock);
522 /* Check if we currently have a reference on the object */
523 obj = idr_find(&filp->object_idr, handle);
525 spin_unlock(&filp->table_lock);
529 drm_gem_object_reference(obj);
531 spin_unlock(&filp->table_lock);
535 EXPORT_SYMBOL(drm_gem_object_lookup);
538 * Releases the handle to an mm object.
541 drm_gem_close_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *file_priv)
544 struct drm_gem_close *args = data;
547 if (!(dev->driver->driver_features & DRIVER_GEM))
550 ret = drm_gem_handle_delete(file_priv, args->handle);
556 * Create a global name for an object, returning the name.
558 * Note that the name does not hold a reference; when the object
559 * is freed, the name goes away.
562 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
563 struct drm_file *file_priv)
565 struct drm_gem_flink *args = data;
566 struct drm_gem_object *obj;
569 if (!(dev->driver->driver_features & DRIVER_GEM))
572 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
576 mutex_lock(&dev->object_name_lock);
577 idr_preload(GFP_KERNEL);
578 /* prevent races with concurrent gem_close. */
579 if (obj->handle_count == 0) {
585 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
592 args->name = (uint64_t) obj->name;
597 mutex_unlock(&dev->object_name_lock);
598 drm_gem_object_unreference_unlocked(obj);
603 * Open an object using the global name, returning a handle and the size.
605 * This handle (of course) holds a reference to the object, so the object
606 * will not go away until the handle is deleted.
609 drm_gem_open_ioctl(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
612 struct drm_gem_open *args = data;
613 struct drm_gem_object *obj;
617 if (!(dev->driver->driver_features & DRIVER_GEM))
620 mutex_lock(&dev->object_name_lock);
621 obj = idr_find(&dev->object_name_idr, (int) args->name);
623 drm_gem_object_reference(obj);
625 mutex_unlock(&dev->object_name_lock);
629 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
630 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
631 drm_gem_object_unreference_unlocked(obj);
635 args->handle = handle;
636 args->size = obj->size;
642 * Called at device open time, sets up the structure for handling refcounting
646 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
648 idr_init(&file_private->object_idr);
649 spin_lock_init(&file_private->table_lock);
653 * Called at device close to release the file's
654 * handle references on objects.
657 drm_gem_object_release_handle(int id, void *ptr, void *data)
659 struct drm_file *file_priv = data;
660 struct drm_gem_object *obj = ptr;
661 struct drm_device *dev = obj->dev;
663 if (drm_core_check_feature(dev, DRIVER_PRIME))
664 drm_gem_remove_prime_handles(obj, file_priv);
665 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
667 if (dev->driver->gem_close_object)
668 dev->driver->gem_close_object(obj, file_priv);
670 drm_gem_object_handle_unreference_unlocked(obj);
676 * Called at close time when the filp is going away.
678 * Releases any remaining references on objects by this filp.
681 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
683 idr_for_each(&file_private->object_idr,
684 &drm_gem_object_release_handle, file_private);
685 idr_destroy(&file_private->object_idr);
689 drm_gem_object_release(struct drm_gem_object *obj)
691 WARN_ON(obj->dma_buf);
696 EXPORT_SYMBOL(drm_gem_object_release);
699 * Called after the last reference to the object has been lost.
700 * Must be called holding struct_ mutex
705 drm_gem_object_free(struct kref *kref)
707 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
708 struct drm_device *dev = obj->dev;
710 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
712 if (dev->driver->gem_free_object != NULL)
713 dev->driver->gem_free_object(obj);
715 EXPORT_SYMBOL(drm_gem_object_free);
717 void drm_gem_vm_open(struct vm_area_struct *vma)
719 struct drm_gem_object *obj = vma->vm_private_data;
721 drm_gem_object_reference(obj);
723 mutex_lock(&obj->dev->struct_mutex);
724 drm_vm_open_locked(obj->dev, vma);
725 mutex_unlock(&obj->dev->struct_mutex);
727 EXPORT_SYMBOL(drm_gem_vm_open);
729 void drm_gem_vm_close(struct vm_area_struct *vma)
731 struct drm_gem_object *obj = vma->vm_private_data;
732 struct drm_device *dev = obj->dev;
734 mutex_lock(&dev->struct_mutex);
735 drm_vm_close_locked(obj->dev, vma);
736 drm_gem_object_unreference(obj);
737 mutex_unlock(&dev->struct_mutex);
739 EXPORT_SYMBOL(drm_gem_vm_close);
742 * drm_gem_mmap_obj - memory map a GEM object
743 * @obj: the GEM object to map
744 * @obj_size: the object size to be mapped, in bytes
745 * @vma: VMA for the area to be mapped
747 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
748 * provided by the driver. Depending on their requirements, drivers can either
749 * provide a fault handler in their gem_vm_ops (in which case any accesses to
750 * the object will be trapped, to perform migration, GTT binding, surface
751 * register allocation, or performance monitoring), or mmap the buffer memory
752 * synchronously after calling drm_gem_mmap_obj.
754 * This function is mainly intended to implement the DMABUF mmap operation, when
755 * the GEM object is not looked up based on its fake offset. To implement the
756 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
758 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
759 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
760 * callers must verify access restrictions before calling this helper.
762 * NOTE: This function has to be protected with dev->struct_mutex
764 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
765 * size, or if no gem_vm_ops are provided.
767 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
768 struct vm_area_struct *vma)
770 struct drm_device *dev = obj->dev;
772 lockdep_assert_held(&dev->struct_mutex);
774 /* Check for valid size. */
775 if (obj_size < vma->vm_end - vma->vm_start)
778 if (!dev->driver->gem_vm_ops)
781 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
782 vma->vm_ops = dev->driver->gem_vm_ops;
783 vma->vm_private_data = obj;
784 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
786 /* Take a ref for this mapping of the object, so that the fault
787 * handler can dereference the mmap offset's pointer to the object.
788 * This reference is cleaned up by the corresponding vm_close
789 * (which should happen whether the vma was created by this call, or
790 * by a vm_open due to mremap or partial unmap or whatever).
792 drm_gem_object_reference(obj);
794 drm_vm_open_locked(dev, vma);
797 EXPORT_SYMBOL(drm_gem_mmap_obj);
800 * drm_gem_mmap - memory map routine for GEM objects
801 * @filp: DRM file pointer
802 * @vma: VMA for the area to be mapped
804 * If a driver supports GEM object mapping, mmap calls on the DRM file
805 * descriptor will end up here.
807 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
808 * contain the fake offset we created when the GTT map ioctl was called on
809 * the object) and map it with a call to drm_gem_mmap_obj().
811 * If the caller is not granted access to the buffer object, the mmap will fail
812 * with EACCES. Please see the vma manager for more information.
814 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
816 struct drm_file *priv = filp->private_data;
817 struct drm_device *dev = priv->minor->dev;
818 struct drm_gem_object *obj;
819 struct drm_vma_offset_node *node;
822 if (drm_device_is_unplugged(dev))
825 mutex_lock(&dev->struct_mutex);
827 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
831 mutex_unlock(&dev->struct_mutex);
832 return drm_mmap(filp, vma);
833 } else if (!drm_vma_node_is_allowed(node, filp)) {
834 mutex_unlock(&dev->struct_mutex);
838 obj = container_of(node, struct drm_gem_object, vma_node);
839 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
841 mutex_unlock(&dev->struct_mutex);
845 EXPORT_SYMBOL(drm_gem_mmap);