drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
- drm_regman.o drm_mm.o
+ drm_regman.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o i915_compat.o i915_execbuf.o i915_mm.o
+ i915_buffer.o i915_compat.o i915_execbuf.o i915_gem.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
-#define DRIVER_MM 0x400
+#define DRIVER_GEM 0x400
/*@}*/
* This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects.
*/
-struct drm_mm_object {
+struct drm_gem_object {
/** File representing the shmem storage */
struct file *filp;
/** Reference count of this object, protected by object_lock */
int refcount;
+
+ void *driver_private;
};
#include "drm_objects.h"
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_device *dev,
+ struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_device *dev,
+ struct drm_gem_object *obj);
+
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
}
/* Memory manager (drm_mm.c) */
-void drm_mm_object_reference(struct drm_mm_object *obj);
-void drm_mm_object_unreference(struct drm_mm_object *obj);
-int drm_mm_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_mm_unreference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_mm_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_mm_pwrite_ioctl(struct drm_device *dev, void *data,
+void drm_gem_object_reference(struct drm_device *dev,
+ struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_device *dev,
+ struct drm_gem_object *obj);
+int drm_gem_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_mm_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void drm_mm_open(struct drm_file *file_private);
-void drm_mm_release(struct drm_file *file_private);
+int drm_gem_unreference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_ALLOC, drm_mm_alloc_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_UNREFERENCE, drm_mm_unreference_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_PREAD, drm_mm_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_PWRITE, drm_mm_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_MMAP, drm_mm_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_ALLOC, drm_gem_alloc_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_UNREFERENCE, drm_gem_unreference_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
goto out_free;
}
- if (dev->driver->driver_features & DRIVER_MM)
- drm_mm_open(priv);
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
dev->driver->reclaim_buffers(dev, file_priv);
}
- if (dev->driver->driver_features & DRIVER_MM)
- drm_mm_release(file_priv);
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
drm_fasync(-1, filp, 0);
--- /dev/null
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later data, and as our interface with shmfs for memory allocation.
+ */
+
+static struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+ obj->filp = shmem_file_setup("drm mm object", size, 0);
+ if (IS_ERR(obj->filp)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->refcount = 1;
+
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(dev, obj) != 0) {
+ fput(obj->filp);
+ kfree(obj);
+ return NULL;
+ }
+ return obj;
+}
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_device *dev, struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ /* This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+
+ /* Release reference and decrement refcount. */
+ idr_remove(&filp->object_idr, handle);
+ drm_gem_object_unreference(dev, obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return 0;
+}
+
+/** Returns a reference to the object named by the handle. */
+static struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+
+ drm_gem_object_reference(dev, obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+
+
+/**
+ * Allocates a new mm object and returns a handle to it.
+ */
+int
+drm_gem_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_alloc_args *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ /* Round requested size up to page size */
+ args->size = (args->size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ /* Get the user-visible handle using idr.
+ *
+ * I'm not really sure why the idr api needs us to do this in two
+ * repeating steps. It handles internal locking of its data
+ * structure, yet insists that we keep its memory allocation step
+ * separate from its slot-finding step for locking purposes.
+ */
+ do {
+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) {
+ kfree(obj);
+ return -EFAULT;
+ }
+
+ ret = idr_get_new(&file_priv->object_idr, obj, &handle);
+ } while (ret == -EAGAIN);
+
+ if (ret != 0) {
+ drm_gem_object_unreference(dev, obj);
+ return -EFAULT;
+ }
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_unreference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_unreference_args *args = data;
+ int ret;
+
+ ret = drm_gem_handle_delete(dev, file_priv, args->handle);
+
+ return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+drm_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_pread_args *args = data;
+ struct drm_gem_object *obj;
+ ssize_t read;
+ loff_t offset;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ read = obj->filp->f_op->read(obj->filp, (char __user *)args->data,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(dev, obj);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(dev, obj);
+
+ return 0;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_mmap_args *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ down_write(¤t->mm->mmap_sem);
+ args->addr = (void *)do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(¤t->mm->mmap_sem);
+
+ drm_gem_object_unreference(dev, obj);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_pwrite_args *args = data;
+ struct drm_gem_object *obj;
+ ssize_t written;
+ loff_t offset;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ written = obj->filp->f_op->write(obj->filp, (char __user *)args->data,
+ args->size, &offset);
+ if (written != args->size) {
+ drm_gem_object_unreference(dev, obj);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(dev, obj);
+
+ return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_init(&file_private->object_idr);
+}
+
+/** Called at device close to release the file's references on objects. */
+static int
+drm_gem_object_release(int id, void *ptr, void *data)
+{
+ struct drm_device *dev = data;
+ struct drm_gem_object *obj = ptr;
+
+ drm_gem_object_unreference(dev, obj);
+
+ return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_for_each(&file_private->object_idr, &drm_gem_object_release, dev);
+
+ idr_destroy(&file_private->object_idr);
+}
+
+void
+drm_gem_object_reference(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ spin_lock(&obj->lock);
+ obj->refcount++;
+ spin_unlock(&obj->lock);
+}
+
+void
+drm_gem_object_unreference(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ spin_lock(&obj->lock);
+ obj->refcount--;
+ spin_unlock(&obj->lock);
+ if (obj->refcount == 0) {
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(dev, obj);
+
+ fput(obj->filp);
+ kfree(obj);
+ }
+}
+++ /dev/null
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include "drmP.h"
-
-/** @file drm_mm.c
- *
- * This file provides some of the base ioctls and library routines for
- * the graphics memory manager implemented by each device driver.
- *
- * Because various devices have different requirements in terms of
- * synchronization and migration strategies, implementing that is left up to
- * the driver, and all that the general API provides should be generic --
- * allocating objects, reading/writing data with the cpu, freeing objects.
- * Even there, platform-dependent optimizations for reading/writing data with
- * the CPU mean we'll likely hook those out to driver-specific calls. However,
- * the DRI2 implementation wants to have at least allocate/mmap be generic.
- *
- * The goal was to have swap-backed object allocation managed through
- * struct file. However, file descriptors as handles to a struct file have
- * two major failings:
- * - Process limits prevent more than 1024 or so being used at a time by
- * default.
- * - Inability to allocate high fds will aggravate the X Server's select()
- * handling, and likely that of many GL client applications as well.
- *
- * This led to a plan of using our own integer IDs (called handles, following
- * DRM terminology) to mimic fds, and implement the fd syscalls we need as
- * ioctls. The objects themselves will still include the struct file so
- * that we can transition to fds if the required kernel infrastructure shows
- * up at a later data, and as our interface with shmfs for memory allocation.
- */
-
-static struct drm_mm_object *
-drm_mm_object_alloc(size_t size)
-{
- struct drm_mm_object *obj;
-
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
- obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
-
- obj->filp = shmem_file_setup("drm mm object", size, 0);
- if (IS_ERR(obj->filp)) {
- kfree(obj);
- return NULL;
- }
-
- obj->refcount = 1;
-
- return obj;
-}
-
-/**
- * Removes the mapping from handle to filp for this object.
- */
-static int
-drm_mm_handle_delete(struct drm_file *filp, int handle)
-{
- struct drm_mm_object *obj;
-
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return -EINVAL;
- }
-
- /* Release reference and decrement refcount. */
- idr_remove(&filp->object_idr, handle);
- drm_mm_object_unreference(obj);
-
- spin_unlock(&filp->table_lock);
-
- return 0;
-}
-
-/** Returns a reference to the object named by the handle. */
-static struct drm_mm_object *
-drm_mm_object_lookup(struct drm_file *filp, int handle)
-{
- struct drm_mm_object *obj;
-
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return NULL;
- }
-
- drm_mm_object_reference(obj);
-
- spin_unlock(&filp->table_lock);
-
- return obj;
-}
-
-
-/**
- * Allocates a new mm object and returns a handle to it.
- */
-int
-drm_mm_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mm_alloc_args *args = data;
- struct drm_mm_object *obj;
- int handle, ret;
-
- /* Round requested size up to page size */
- args->size = (args->size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
-
- /* Allocate the new object */
- obj = drm_mm_object_alloc(args->size);
- if (obj == NULL)
- return -ENOMEM;
-
- /* Get the user-visible handle using idr.
- *
- * I'm not really sure why the idr api needs us to do this in two
- * repeating steps. It handles internal locking of its data
- * structure, yet insists that we keep its memory allocation step
- * separate from its slot-finding step for locking purposes.
- */
- do {
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) {
- kfree(obj);
- return -EFAULT;
- }
-
- ret = idr_get_new(&file_priv->object_idr, obj, &handle);
- } while (ret == -EAGAIN);
-
- if (ret != 0) {
- drm_mm_object_unreference(obj);
- return -EFAULT;
- }
-
- args->handle = handle;
-
- return 0;
-}
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_mm_unreference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mm_unreference_args *args = data;
- int ret;
-
- ret = drm_mm_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-int
-drm_mm_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mm_pread_args *args = data;
- struct drm_mm_object *obj;
- ssize_t read;
- loff_t offset;
-
- obj = drm_mm_object_lookup(file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- read = obj->filp->f_op->read(obj->filp, (char __user *)args->data,
- args->size, &offset);
- if (read != args->size) {
- drm_mm_object_unreference(obj);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
-
- drm_mm_object_unreference(obj);
-
- return 0;
-}
-
-/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
-int
-drm_mm_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mm_mmap_args *args = data;
- struct drm_mm_object *obj;
- loff_t offset;
-
- obj = drm_mm_object_lookup(file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- down_write(¤t->mm->mmap_sem);
- args->addr = (void *)do_mmap(obj->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- up_write(¤t->mm->mmap_sem);
-
- drm_mm_object_unreference(obj);
-
- return 0;
-}
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-drm_mm_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mm_pwrite_args *args = data;
- struct drm_mm_object *obj;
- ssize_t written;
- loff_t offset;
-
- obj = drm_mm_object_lookup(file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- written = obj->filp->f_op->write(obj->filp, (char __user *)args->data,
- args->size, &offset);
- if (written != args->size) {
- drm_mm_object_unreference(obj);
- if (written < 0)
- return written;
- else
- return -EINVAL;
- }
-
- drm_mm_object_unreference(obj);
-
- return 0;
-}
-
-/**
- * Called at device open time, sets up the structure for handling refcounting
- * of mm objects.
- */
-void
-drm_mm_open(struct drm_file *file_private)
-{
- idr_init(&file_private->object_idr);
-}
-
-/** Called at device close to release the file's references on objects. */
-static int
-drm_mm_object_release(int id, void *ptr, void *data)
-{
- struct drm_mm_object *obj = ptr;
-
- drm_mm_object_unreference(obj);
-
- return 0;
-}
-
-/**
- * Called at close time when the filp is going away.
- *
- * Releases any remaining references on objects by this filp.
- */
-void
-drm_mm_release(struct drm_file *file_private)
-{
- idr_for_each(&file_private->object_idr, &drm_mm_object_release, NULL);
-
- idr_destroy(&file_private->object_idr);
-}
-
-void
-drm_mm_object_reference(struct drm_mm_object *obj)
-{
- spin_lock(&obj->lock);
- obj->refcount++;
- spin_unlock(&obj->lock);
-}
-
-void
-drm_mm_object_unreference(struct drm_mm_object *obj)
-{
- spin_lock(&obj->lock);
- obj->refcount--;
- spin_unlock(&obj->lock);
- if (obj->refcount == 0) {
- fput(obj->filp);
- kfree(obj);
- }
-}
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = i915_ioctls,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
--- /dev/null
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+ (args->gtt_end & (PAGE_SIZE - 1)) != 0)
+ return -EINVAL;
+
+ drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
+ args->gtt_end);
+
+ return 0;
+}
+
+static void
+i915_gem_evict_object(struct drm_device *dev, struct drm_gem_object *obj)
+{
+
+}
+
+static int
+evict_callback(struct drm_memrange_node *node, void *data)
+{
+ struct drm_device *dev = data;
+ struct drm_gem_object *obj = node->private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count == 0)
+ i915_gem_evict_object(dev, obj);
+
+ return 0;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_validate_entry *validate_list;
+ int ret, i;
+ RING_LOCALS;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ /* Big hammer: flush and idle the hardware so we can map things in/out.
+ */
+ BEGIN_LP_RING(2);
+ OUT_RING(CMD_MI_FLUSH | MI_READ_FLUSH | MI_EXE_FLUSH);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ ret = i915_quiescent(dev);
+ if (ret != 0)
+ return ret;
+
+ /* Evict everything so we have space for sure. */
+ drm_memrange_for_each(&dev_priv->mm.gtt_space, evict_callback);
+
+ /* Copy in the validate list from userland */
+ validate_list = drm_calloc(sizeof(*validate_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ ret = copy_from_user(validate_list,
+ (struct drm_i915_relocation_entry __user*)
+ args->buffers,
+ sizeof(*validate_list) * args->buffer_count);
+ if (ret != 0) {
+ drm_free(validate_list,
+ sizeof(*validate_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+ return ret;
+ }
+
+ /* Perform the relocations */
+ for (i = 0; i < args->buffer_count; i++) {
+ intel_gem_reloc_and_validate_buffer(dev, &validate_list[i]);
+ }
+
+ /* Exec the batchbuffer */
+
+
+ drm_free(validate_list, sizeof(*validate_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+
+ return 0;
+}
+
+int i915_gem_init_object(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return -ENOMEM;
+
+ obj->driver_private = obj_priv;
+
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
};
-struct drm_mm_alloc_args {
+struct drm_gem_alloc_args {
/**
* Requested size for the object.
*
uint32_t handle;
};
-struct drm_mm_unreference_args {
+struct drm_gem_unreference_args {
/** Handle of the object to be unreferenced. */
uint32_t handle;
};
-struct drm_mm_link_args {
+struct drm_gem_link_args {
/** Handle for the object being given a name. */
uint32_t handle;
/** Requested file name to export the object under. */
mode_t mode;
};
-struct drm_mm_pread_args {
+struct drm_gem_pread_args {
/** Handle for the object being read. */
uint32_t handle;
/** Offset into the object to read from */
void *data;
};
-struct drm_mm_pwrite_args {
+struct drm_gem_pwrite_args {
/** Handle for the object being written to. */
uint32_t handle;
/** Offset into the object to write to */
void *data;
};
-struct drm_mm_mmap_args {
+struct drm_gem_mmap_args {
/** Handle for the object being mapped. */
uint32_t handle;
/** Offset in the object to map. */
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
-#define DRM_IOCTL_MM_ALLOC DRM_IOWR(0x09, struct drm_mm_alloc_args)
-#define DRM_IOCTL_MM_UNREFERENCE DRM_IOW(0x0a, struct drm_mm_unreference_args)
-#define DRM_IOCTL_MM_PREAD DRM_IOW(0x0b, struct drm_mm_pread_args)
-#define DRM_IOCTL_MM_PWRITE DRM_IOW(0x0c, struct drm_mm_pwrite_args)
-#define DRM_IOCTL_MM_MMAP DRM_IOWR(0x0d, struct drm_mm_mmap_args)
+#define DRM_IOCTL_GEM_ALLOC DRM_IOWR(0x09, struct drm_gem_alloc_args)
+#define DRM_IOCTL_GEM_UNREFERENCE DRM_IOW(0x0a, struct drm_gem_unreference_args)
+#define DRM_IOCTL_GEM_PREAD DRM_IOW(0x0b, struct drm_gem_pread_args)
+#define DRM_IOCTL_GEM_PWRITE DRM_IOW(0x0c, struct drm_gem_pwrite_args)
+#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap_args)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
- DRM_IOCTL_DEF(DRM_I915_MM_INIT, intel_mm_init_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_MM_EXECBUFFER, intel_mm_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
-#define DRM_I915_MM_INIT 0x13
-#define DRM_I915_MM_EXECBUFFER 0x14
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
-#define DRM_IOCTL_I915_MM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_MM_INIT, struct drm_i915_mm_init)
-#define DRM_IOCTL_I915_MM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_MM_INIT, struct drm_i915_mm_execbuffer)
+#define DRM_IOCTL_I915_MM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_MM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_execbuffer)
/* Asynchronous page flipping:
*/
struct drm_fence_arg fence_arg;
};
-struct drm_i915_mm_init {
+struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
off_t gtt_end;
};
-struct drm_i915_relocation_entry {
+struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
uint32_t presumed_offset;
};
-struct drm_i915_mm_validate_entry {
+struct drm_i915_gem_validate_entry {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
uint32_t buffer_offset;
/** List of relocations to be performed on this buffer */
- struct drm_i915_relocation_entry *relocs;
+ struct drm_i915_gem_relocation_entry *relocs;
uint32_t relocation_count;
};
-struct drm_i915_mm_execbuffer {
+struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated wit their relocations to be
* performend on them.
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
- struct drm_i915_mm_validate_entry *buffers;
+ struct drm_i915_gem_validate_entry *buffers;
uint32_t buffer_count;
/** Offset in the batchbuffer to start execution from. */
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ /** Current offset of the object in GTT space, if any. */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* i915_mm.c */
-int intel_mm_init_ioctl(struct drm_device *dev, void *data,
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int intel_mm_execbuffer(struct drm_device *dev, void *data,
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_init_object(struct drm_device *dev, struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_device *dev, struct drm_gem_object *obj);
#endif