2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
41 * This file provides some of the base ioctls and library routines for
42 * the graphics memory manager implemented by each device driver.
44 * Because various devices have different requirements in terms of
45 * synchronization and migration strategies, implementing that is left up to
46 * the driver, and all that the general API provides should be generic --
47 * allocating objects, reading/writing data with the cpu, freeing objects.
48 * Even there, platform-dependent optimizations for reading/writing data with
49 * the CPU mean we'll likely hook those out to driver-specific calls. However,
50 * the DRI2 implementation wants to have at least allocate/mmap be generic.
52 * The goal was to have swap-backed object allocation managed through
53 * struct file. However, file descriptors as handles to a struct file have
55 * - Process limits prevent more than 1024 or so being used at a time by
57 * - Inability to allocate high fds will aggravate the X Server's select()
58 * handling, and likely that of many GL client applications as well.
60 * This led to a plan of using our own integer IDs (called handles, following
61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62 * ioctls. The objects themselves will still include the struct file so
63 * that we can transition to fds if the required kernel infrastructure shows
64 * up at a later data, and as our interface with shmfs for memory allocation.
67 static struct drm_gem_object *
68 drm_gem_object_alloc(struct drm_device *dev, size_t size)
70 struct drm_gem_object *obj;
72 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
74 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
76 obj->filp = shmem_file_setup("drm mm object", size, 0);
77 if (IS_ERR(obj->filp)) {
84 if (dev->driver->gem_init_object != NULL &&
85 dev->driver->gem_init_object(dev, obj) != 0) {
94 * Removes the mapping from handle to filp for this object.
97 drm_gem_handle_delete(struct drm_device *dev, struct drm_file *filp,
100 struct drm_gem_object *obj;
102 /* This is gross. The idr system doesn't let us try a delete and
103 * return an error code. It just spews if you fail at deleting.
104 * So, we have to grab a lock around finding the object and then
105 * doing the delete on it and dropping the refcount, or the user
106 * could race us to double-decrement the refcount and cause a
107 * use-after-free later. Given the frequency of our handle lookups,
108 * we may want to use ida for number allocation and a hash table
109 * for the pointers, anyway.
111 spin_lock(&filp->table_lock);
113 /* Check if we currently have a reference on the object */
114 obj = idr_find(&filp->object_idr, handle);
116 spin_unlock(&filp->table_lock);
120 /* Release reference and decrement refcount. */
121 idr_remove(&filp->object_idr, handle);
122 drm_gem_object_unreference(dev, obj);
124 spin_unlock(&filp->table_lock);
129 /** Returns a reference to the object named by the handle. */
130 struct drm_gem_object *
131 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
134 struct drm_gem_object *obj;
136 spin_lock(&filp->table_lock);
138 /* Check if we currently have a reference on the object */
139 obj = idr_find(&filp->object_idr, handle);
141 spin_unlock(&filp->table_lock);
145 drm_gem_object_reference(dev, obj);
147 spin_unlock(&filp->table_lock);
151 EXPORT_SYMBOL(drm_gem_object_lookup);
154 * Allocates a new mm object and returns a handle to it.
157 drm_gem_alloc_ioctl(struct drm_device *dev, void *data,
158 struct drm_file *file_priv)
160 struct drm_gem_alloc *args = data;
161 struct drm_gem_object *obj;
164 /* Round requested size up to page size */
165 args->size = (args->size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
167 /* Allocate the new object */
168 obj = drm_gem_object_alloc(dev, args->size);
172 /* Get the user-visible handle using idr.
174 * I'm not really sure why the idr api needs us to do this in two
175 * repeating steps. It handles internal locking of its data
176 * structure, yet insists that we keep its memory allocation step
177 * separate from its slot-finding step for locking purposes.
180 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) {
185 ret = idr_get_new_above(&file_priv->object_idr, obj, 1,
187 } while (ret == -EAGAIN);
190 drm_gem_object_unreference(dev, obj);
194 args->handle = handle;
200 * Releases the handle to an mm object.
203 drm_gem_unreference_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *file_priv)
206 struct drm_gem_unreference *args = data;
209 ret = drm_gem_handle_delete(dev, file_priv, args->handle);
215 * Reads data from the object referenced by handle.
217 * On error, the contents of *data are undefined.
220 drm_gem_pread_ioctl(struct drm_device *dev, void *data,
221 struct drm_file *file_priv)
223 struct drm_gem_pread *args = data;
224 struct drm_gem_object *obj;
228 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
232 offset = args->offset;
234 read = obj->filp->f_op->read(obj->filp, (char __user *)args->data,
235 args->size, &offset);
236 if (read != args->size) {
237 drm_gem_object_unreference(dev, obj);
244 drm_gem_object_unreference(dev, obj);
250 * Maps the contents of an object, returning the address it is mapped
253 * While the mapping holds a reference on the contents of the object, it doesn't
254 * imply a ref on the object itself.
257 drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
258 struct drm_file *file_priv)
260 struct drm_gem_mmap *args = data;
261 struct drm_gem_object *obj;
264 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
268 offset = args->offset;
270 down_write(¤t->mm->mmap_sem);
271 args->addr = (void *)do_mmap(obj->filp, 0, args->size,
272 PROT_READ | PROT_WRITE, MAP_SHARED,
274 up_write(¤t->mm->mmap_sem);
276 drm_gem_object_unreference(dev, obj);
282 * Writes data to the object referenced by handle.
284 * On error, the contents of the buffer that were to be modified are undefined.
287 drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
288 struct drm_file *file_priv)
290 struct drm_gem_pwrite *args = data;
291 struct drm_gem_object *obj;
295 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
299 offset = args->offset;
301 written = obj->filp->f_op->write(obj->filp, (char __user *)args->data,
302 args->size, &offset);
303 if (written != args->size) {
304 drm_gem_object_unreference(dev, obj);
311 drm_gem_object_unreference(dev, obj);
317 * Called at device open time, sets up the structure for handling refcounting
321 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
323 idr_init(&file_private->object_idr);
326 /** Called at device close to release the file's references on objects. */
328 drm_gem_object_release(int id, void *ptr, void *data)
330 struct drm_device *dev = data;
331 struct drm_gem_object *obj = ptr;
333 drm_gem_object_unreference(dev, obj);
339 * Called at close time when the filp is going away.
341 * Releases any remaining references on objects by this filp.
344 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
346 idr_for_each(&file_private->object_idr, &drm_gem_object_release, dev);
348 idr_destroy(&file_private->object_idr);
352 drm_gem_object_reference(struct drm_device *dev, struct drm_gem_object *obj)
354 spin_lock(&obj->lock);
356 spin_unlock(&obj->lock);
358 EXPORT_SYMBOL(drm_gem_object_reference);
361 drm_gem_object_unreference(struct drm_device *dev, struct drm_gem_object *obj)
366 spin_lock(&obj->lock);
368 spin_unlock(&obj->lock);
369 if (obj->refcount == 0) {
370 if (dev->driver->gem_free_object != NULL)
371 dev->driver->gem_free_object(dev, obj);
377 EXPORT_SYMBOL(drm_gem_object_unreference);