Merge remote branch 'origin/modesetting-101' into modesetting-gem
authorDave Airlie <airlied@linux.ie>
Fri, 25 Jul 2008 22:46:38 +0000 (08:46 +1000)
committerDave Airlie <airlied@linux.ie>
Fri, 25 Jul 2008 22:46:38 +0000 (08:46 +1000)
64 files changed:
.gitignore
configure.ac
libdrm/Makefile.am
libdrm/dri_bufmgr.c [new file with mode: 0644]
libdrm/dri_bufmgr.h [new file with mode: 0644]
libdrm/intel/Makefile.am [new file with mode: 0644]
libdrm/intel/intel_bufmgr.h [new file with mode: 0644]
libdrm/intel/intel_bufmgr_fake.c [new file with mode: 0644]
libdrm/intel/intel_bufmgr_gem.c [new file with mode: 0644]
libdrm/intel/mm.c [new file with mode: 0644]
libdrm/intel/mm.h [new file with mode: 0644]
libdrm/xf86drm.c
libdrm/xf86drm.h
libdrm/xf86mm.h
linux-core/Makefile
linux-core/Makefile.kernel
linux-core/drm-gem.txt [new file with mode: 0644]
linux-core/drmP.h
linux-core/drm_agpsupport.c
linux-core/drm_bo.c
linux-core/drm_bo_move.c
linux-core/drm_compat.h
linux-core/drm_crtc_helper.c
linux-core/drm_drv.c
linux-core/drm_fops.c
linux-core/drm_gem.c [new file with mode: 0644]
linux-core/drm_irq.c
linux-core/drm_lock.c
linux-core/drm_memory.c
linux-core/drm_memrange.c [moved from linux-core/drm_mm.c with 69% similarity]
linux-core/drm_objects.h
linux-core/drm_proc.c
linux-core/drm_sman.c
linux-core/drm_sman.h
linux-core/drm_stub.c
linux-core/dvo_ch7xxx.c
linux-core/dvo_ivch.c
linux-core/dvo_tfp410.c
linux-core/i915_drv.c
linux-core/i915_gem.c [new file with mode: 0644]
linux-core/intel_crt.c
linux-core/intel_display.c
linux-core/intel_drv.h
linux-core/intel_fb.c
linux-core/intel_lvds.c
linux-core/intel_tv.c
linux-core/nouveau_bo.c
linux-core/nouveau_sgdma.c
linux-core/xgi_cmdlist.c
linux-core/xgi_drv.c
linux-core/xgi_misc.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_drv.h
shared-core/i915_init.c
shared-core/i915_irq.c
shared-core/radeon_cp.c
shared-core/radeon_drv.h
tests/Makefile.am
tests/drmtest.c
tests/gem_basic.c [new file with mode: 0644]
tests/gem_mmap.c [new file with mode: 0644]
tests/gem_readwrite.c [new file with mode: 0644]

index 0991da8..c8a22ea 100644 (file)
@@ -58,6 +58,9 @@ tests/getclient
 tests/getstats
 tests/getversion
 tests/lock
+tests/gem_basic
+tests/gem_mmap
+tests/gem_readwrite
 tests/openclose
 tests/setversion
 tests/updatedraw
index 7820334..1cf877d 100644 (file)
@@ -19,7 +19,7 @@
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 AC_PREREQ(2.57)
-AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
 AC_CONFIG_SRCDIR([Makefile.am])
 AM_INIT_AUTOMAKE([dist-bzip2])
 
@@ -35,9 +35,77 @@ AC_SYS_LARGEFILE
 pkgconfigdir=${libdir}/pkgconfig
 AC_SUBST(pkgconfigdir)
 
+
+dnl ===========================================================================
+dnl check compiler flags
+AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
+  AC_MSG_CHECKING([whether $CC supports $1])
+
+  libdrm_save_CFLAGS="$CFLAGS"
+  CFLAGS="$CFLAGS $1"
+
+  AC_COMPILE_IFELSE([ ], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
+  CFLAGS="$libdrm_save_CFLAGS"
+
+  if test "x$libdrm_cc_flag" = "xyes"; then
+    ifelse([$2], , :, [$2])
+  else
+    ifelse([$3], , :, [$3])
+  fi
+  AC_MSG_RESULT([$libdrm_cc_flag])
+])
+
+dnl Use lots of warning flags with with gcc and compatible compilers
+
+dnl Note: if you change the following variable, the cache is automatically
+dnl skipped and all flags rechecked.  So there's no need to do anything
+dnl else.  If for any reason you need to force a recheck, just change
+dnl MAYBE_WARN in an ignorable way (like adding whitespace)
+
+MAYBE_WARN="-Wall -Wextra \
+-Wsign-compare -Werror-implicit-function-declaration \
+-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
+-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
+-Wpacked -Wswitch-enum -Wmissing-format-attribute \
+-Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
+-Wdeclaration-after-statement -Wold-style-definition \
+-Wno-missing-field-initializers -Wno-unused-parameter \
+-Wno-attributes -Wno-long-long -Winline"
+
+# invalidate cached value if MAYBE_WARN has changed
+if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
+       unset libdrm_cv_warn_cflags
+fi
+AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
+       echo
+       WARN_CFLAGS=""
+
+       # Some warning options are not supported by all versions of
+       # gcc, so test all desired options against the current
+       # compiler.
+       #
+       # Note that there are some order dependencies
+       # here. Specifically, an option that disables a warning will
+       # have no net effect if a later option then enables that
+       # warnings, (perhaps implicitly). So we put some grouped
+       # options (-Wall and -Wextra) up front and the -Wno options
+       # last.
+
+       for W in $MAYBE_WARN; do
+               LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
+       done
+
+       libdrm_cv_warn_cflags=$WARN_CFLAGS
+       libdrm_cv_warn_maybe=$MAYBE_WARN
+
+       AC_MSG_CHECKING([which warning flags were supported])])
+WARN_CFLAGS="$libdrm_cv_warn_cflags"
+
+AC_SUBST(WARN_CFLAGS)
 AC_OUTPUT([
        Makefile
        libdrm/Makefile
+       libdrm/intel/Makefile
        shared-core/Makefile
        tests/Makefile
        libdrm.pc])
index 24c3203..624f6ff 100644 (file)
 #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
+SUBDIRS = intel
+
 libdrm_la_LTLIBRARIES = libdrm.la
 libdrm_ladir = $(libdir)
 libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
 
 AM_CFLAGS = -I$(top_srcdir)/shared-core
-libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c xf86drmMode.c
+libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
+       xf86drmMode.c dri_bufmgr.c
+libdrm_la_LIBADD = intel/libdrm_intel.la
 
 libdrmincludedir = ${includedir}
-libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h
+libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h dri_bufmgr.h
 
 EXTRA_DIST = ChangeLog TODO
diff --git a/libdrm/dri_bufmgr.c b/libdrm/dri_bufmgr.c
new file mode 100644 (file)
index 0000000..7657df6
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+
+/** @file dri_bufmgr.c
+ *
+ * Convenience functions for buffer management methods.
+ */
+
+dri_bo *
+dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+            unsigned int alignment)
+{
+   return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+}
+
+void
+dri_bo_reference(dri_bo *bo)
+{
+   bo->bufmgr->bo_reference(bo);
+}
+
+void
+dri_bo_unreference(dri_bo *bo)
+{
+   if (bo == NULL)
+      return;
+
+   bo->bufmgr->bo_unreference(bo);
+}
+
+int
+dri_bo_map(dri_bo *buf, int write_enable)
+{
+   return buf->bufmgr->bo_map(buf, write_enable);
+}
+
+int
+dri_bo_unmap(dri_bo *buf)
+{
+   return buf->bufmgr->bo_unmap(buf);
+}
+
+int
+dri_bo_subdata(dri_bo *bo, unsigned long offset,
+              unsigned long size, const void *data)
+{
+   int ret;
+   if (bo->bufmgr->bo_subdata)
+      return bo->bufmgr->bo_subdata(bo, offset, size, data);
+   if (size == 0 || data == NULL)
+      return 0;
+
+   ret = dri_bo_map(bo, 1);
+   if (ret)
+       return ret;
+   memcpy((unsigned char *)bo->virtual + offset, data, size);
+   dri_bo_unmap(bo);
+   return 0;
+}
+
+int
+dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+                  unsigned long size, void *data)
+{
+   int ret;
+   if (bo->bufmgr->bo_subdata)
+      return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+
+   if (size == 0 || data == NULL)
+      return 0;
+
+   ret = dri_bo_map(bo, 0);
+   if (ret)
+       return ret;
+   memcpy(data, (unsigned char *)bo->virtual + offset, size);
+   dri_bo_unmap(bo);
+   return 0;
+}
+
+void
+dri_bo_wait_rendering(dri_bo *bo)
+{
+   bo->bufmgr->bo_wait_rendering(bo);
+}
+
+void
+dri_bufmgr_destroy(dri_bufmgr *bufmgr)
+{
+   bufmgr->destroy(bufmgr);
+}
+
+void *dri_process_relocs(dri_bo *batch_buf)
+{
+   return batch_buf->bufmgr->process_relocs(batch_buf);
+}
+
+void dri_post_submit(dri_bo *batch_buf)
+{
+   batch_buf->bufmgr->post_submit(batch_buf);
+}
+
+void
+dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
+{
+   bufmgr->debug = enable_debug;
+}
+
+int
+dri_bufmgr_check_aperture_space(dri_bo *bo)
+{
+    return bo->bufmgr->check_aperture_space(bo);
+}
diff --git a/libdrm/dri_bufmgr.h b/libdrm/dri_bufmgr.h
new file mode 100644 (file)
index 0000000..a5ae6c0
--- /dev/null
@@ -0,0 +1,174 @@
+/**************************************************************************
+ * 
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * 
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ *         Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _DRI_BUFMGR_H_
+#define _DRI_BUFMGR_H_
+#include <xf86drm.h>
+
+typedef struct _dri_bufmgr dri_bufmgr;
+typedef struct _dri_bo dri_bo;
+
+struct _dri_bo {
+   /**
+    * Size in bytes of the buffer object.
+    *
+    * The size may be larger than the size originally requested for the
+    * allocation, such as being aligned to page size.
+    */
+   unsigned long size;
+   /**
+    * Card virtual address (offset from the beginning of the aperture) for the
+    * object.  Only valid while validated.
+    */
+   unsigned long offset;
+   /**
+    * Virtual address for accessing the buffer data.  Only valid while mapped.
+    */
+   void *virtual;
+   /** Buffer manager context associated with this buffer object */
+   dri_bufmgr *bufmgr;
+};
+
+/**
+ * Context for a buffer manager instance.
+ *
+ * Contains public methods followed by private storage for the buffer manager.
+ */
+struct _dri_bufmgr {
+   /**
+    * Allocate a buffer object.
+    *
+    * Buffer objects are not necessarily initially mapped into CPU virtual
+    * address space or graphics device aperture.  They must be mapped using
+    * bo_map() to be used by the CPU, and validated for use using bo_validate()
+    * to be used from the graphics device.
+    */
+   dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
+                      unsigned long size, unsigned int alignment);
+
+   /** Takes a reference on a buffer object */
+   void (*bo_reference)(dri_bo *bo);
+
+   /**
+    * Releases a reference on a buffer object, freeing the data if
+    * rerefences remain.
+    */
+   void (*bo_unreference)(dri_bo *bo);
+
+   /**
+    * Maps the buffer into userspace.
+    *
+    * This function will block waiting for any existing execution on the
+    * buffer to complete, first.  The resulting mapping is available at
+    * buf->virtual.
+    */
+   int (*bo_map)(dri_bo *buf, int write_enable);
+
+   /** Reduces the refcount on the userspace mapping of the buffer object. */
+   int (*bo_unmap)(dri_bo *buf);
+
+   /**
+    * Write data into an object.
+    *
+    * This is an optional function, if missing,
+    * dri_bo will map/memcpy/unmap.
+    */
+   int (*bo_subdata) (dri_bo *buf, unsigned long offset,
+                     unsigned long size, const void *data);
+
+   /**
+    * Read data from an object
+    *
+    * This is an optional function, if missing,
+    * dri_bo will map/memcpy/unmap.
+    */
+   int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
+                         unsigned long size, void *data);
+
+   /**
+    * Waits for rendering to an object by the GPU to have completed.
+    *
+    * This is not required for any access to the BO by bo_map, bo_subdata, etc.
+    * It is merely a way for the driver to implement glFinish.
+    */
+   void (*bo_wait_rendering) (dri_bo *bo);
+
+   /**
+    * Tears down the buffer manager instance.
+    */
+   void (*destroy)(dri_bufmgr *bufmgr);
+
+   /**
+    * Processes the relocations, either in userland or by converting the list
+    * for use in batchbuffer submission.
+    *
+    * Kernel-based implementations will return a pointer to the arguments
+    * to be handed with batchbuffer submission to the kernel.  The userland
+    * implementation performs the buffer validation and emits relocations
+    * into them the appopriate order.
+    *
+    * \param batch_buf buffer at the root of the tree of relocations
+    * \return argument to be completed and passed to the execbuffers ioctl
+    *   (if any).
+    */
+   void *(*process_relocs)(dri_bo *batch_buf);
+
+   void (*post_submit)(dri_bo *batch_buf);
+
+   int (*check_aperture_space)(dri_bo *bo);
+   int debug; /**< Enables verbose debugging printouts */
+};
+
+dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+                    unsigned int alignment);
+void dri_bo_reference(dri_bo *bo);
+void dri_bo_unreference(dri_bo *bo);
+int dri_bo_map(dri_bo *buf, int write_enable);
+int dri_bo_unmap(dri_bo *buf);
+
+int dri_bo_subdata(dri_bo *bo, unsigned long offset,
+                  unsigned long size, const void *data);
+int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+                      unsigned long size, void *data);
+void dri_bo_wait_rendering(dri_bo *bo);
+
+void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
+void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
+
+void *dri_process_relocs(dri_bo *batch_buf);
+void dri_post_process_relocs(dri_bo *batch_buf);
+void dri_post_submit(dri_bo *batch_buf);
+int dri_bufmgr_check_aperture_space(dri_bo *bo);
+
+#endif
diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am
new file mode 100644 (file)
index 0000000..111204b
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright © 2008 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+#    Eric Anholt <eric@anholt.net>
+
+AM_CFLAGS = \
+       $(WARN_CFLAGS) \
+       -I$(top_srcdir)/shared-core
+
+noinst_LTLIBRARIES = libdrm_intel.la
+
+libdrm_intel_la_SOURCES = \
+       intel_bufmgr_fake.c \
+       intel_bufmgr_gem.c \
+       mm.c \
+       mm.h
+
+libdrm_intelincludedir = ${includedir}
+libdrm_intelinclude_HEADERS = intel_bufmgr.h
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
new file mode 100644 (file)
index 0000000..1cf0d51
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_GEM_H
+#define INTEL_BUFMGR_GEM_H
+
+#include "dri_bufmgr.h"
+
+/**
+ * Intel-specific bufmgr bits that follow immediately after the
+ * generic bufmgr structure.
+ */
+struct intel_bufmgr {
+    /**
+     * Add relocation entry in reloc_buf, which will be updated with the
+     * target buffer's real offset on on command submission.
+     *
+     * Relocations remain in place for the lifetime of the buffer object.
+     *
+     * \param reloc_buf Buffer to write the relocation into.
+     * \param read_domains GEM read domains which the buffer will be read into
+     *       by the command that this relocation is part of.
+     * \param write_domains GEM read domains which the buffer will be dirtied
+     *       in by the command that this relocation is part of.
+     * \param delta Constant value to be added to the relocation target's
+     *        offset.
+     * \param offset Byte offset within batch_buf of the relocated pointer.
+     * \param target Buffer whose offset should be written into the relocation
+     *      entry.
+     */
+    int (*emit_reloc)(dri_bo *reloc_buf,
+                     uint32_t read_domains, uint32_t write_domain,
+                     uint32_t delta, uint32_t offset, dri_bo *target);
+};
+
+/* intel_bufmgr_gem.c */
+dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
+dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+                                     unsigned int handle);
+void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
+
+/* intel_bufmgr_fake.c */
+dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+                                  unsigned long size,
+                                  unsigned int (*fence_emit)(void *private),
+                                  int (*fence_wait)(void *private,
+                                                    unsigned int cookie),
+                                  void *driver_priv);
+dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+                                  unsigned long offset, unsigned long size,
+                                  void *virtual);
+
+void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+                                        void (*invalidate_cb)(dri_bo *bo,
+                                                              void *ptr),
+                                        void *ptr);
+void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
+
+int intel_bo_emit_reloc(dri_bo *reloc_buf,
+                       uint32_t read_domains, uint32_t write_domain,
+                       uint32_t delta, uint32_t offset, dri_bo *target_buf);
+
+#endif /* INTEL_BUFMGR_GEM_H */
+
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
new file mode 100644 (file)
index 0000000..e988eb5
--- /dev/null
@@ -0,0 +1,1218 @@
+/**************************************************************************
+ * 
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * 
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "mm.h"
+
+#define DBG(...) do {                                  \
+   if (bufmgr_fake->bufmgr.debug)                      \
+      drmMsg(__VA_ARGS__);                             \
+} while (0)
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE                    0x00000001
+#define BM_NO_FENCE_SUBDATA                    0x00000002
+#define BM_PINNED                              0x00000004
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed.  This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc
+{
+   /** Buffer object that the relocation points at. */
+   dri_bo *target_buf;
+   /** Offset of the relocation entry within reloc_buf. */
+   uint32_t offset;
+   /** Cached value of the offset when we last performed this relocation. */
+   uint32_t last_target_offset;
+   /** Value added to target_buf's offset to get the relocation entry. */
+   uint32_t delta;
+   /** Cache domains the target buffer is read into. */
+   uint32_t read_domains;
+   /** Cache domain the target buffer will have dirty cachelines in. */
+   uint32_t write_domain;
+};
+
+struct block {
+   struct block *next, *prev;
+   struct mem_block *mem;      /* BM_MEM_AGP */
+
+   /**
+    * Marks that the block is currently in the aperture and has yet to be
+    * fenced.
+    */
+   unsigned on_hardware:1;
+   /**
+    * Marks that the block is currently fenced (being used by rendering) and
+    * can't be freed until @fence is passed.
+    */
+   unsigned fenced:1;
+
+   /** Fence cookie for the block. */
+   unsigned fence; /* Split to read_fence, write_fence */
+
+   dri_bo *bo;
+   void *virtual;
+};
+
+typedef struct _bufmgr_fake {
+   dri_bufmgr bufmgr;
+   struct intel_bufmgr intel_bufmgr;
+
+   unsigned long low_offset;
+   unsigned long size;
+   void *virtual;
+
+   struct mem_block *heap;
+
+   unsigned buf_nr;            /* for generating ids */
+
+   /**
+    * List of blocks which are currently in the GART but haven't been
+    * fenced yet.
+    */
+   struct block on_hardware;
+   /**
+    * List of blocks which are in the GART and have an active fence on them.
+    */
+   struct block fenced;
+   /**
+    * List of blocks which have an expired fence and are ready to be evicted.
+    */
+   struct block lru;
+
+   unsigned int last_fence;
+
+   unsigned fail:1;
+   unsigned need_fence:1;
+   int thrashing;
+
+   /**
+    * Driver callback to emit a fence, returning the cookie.
+    *
+    * Currently, this also requires that a write flush be emitted before
+    * emitting the fence, but this should change.
+    */
+   unsigned int (*fence_emit)(void *private);
+   /** Driver callback to wait for a fence cookie to have passed. */
+   int (*fence_wait)(void *private, unsigned int fence_cookie);
+   /** Driver-supplied argument to driver callbacks */
+   void *driver_priv;
+
+   int debug;
+
+   int performed_rendering;
+
+   /* keep track of the current total size of objects we have relocs for */
+   unsigned long current_total_size;
+} dri_bufmgr_fake;
+
+typedef struct _dri_bo_fake {
+   dri_bo bo;
+
+   unsigned id;                        /* debug only */
+   const char *name;
+
+   unsigned dirty:1;
+   unsigned size_accounted:1; /*this buffers size has been accounted against the aperture */
+   unsigned card_dirty:1; /* has the card written to this buffer - we make need to copy it back */
+   unsigned int refcount;
+   /* Flags may consist of any of the DRM_BO flags, plus
+    * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
+    * driver private flags.
+    */
+   uint64_t flags;
+   /** Cache domains the target buffer is read into. */
+   uint32_t read_domains;
+   /** Cache domain the target buffer will have dirty cachelines in. */
+   uint32_t write_domain;
+
+   unsigned int alignment;
+   int is_static, validated;
+   unsigned int map_count;
+
+   /** relocation list */
+   struct fake_buffer_reloc *relocs;
+   int nr_relocs;
+
+   struct block *block;
+   void *backing_store;
+   void (*invalidate_cb)(dri_bo *bo, void *ptr);
+   void *invalidate_ptr;
+} dri_bo_fake;
+
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+                       unsigned int fence_cookie);
+
+static int dri_fake_check_aperture_space(dri_bo *bo);
+
+#define MAXFENCE 0x7fffffff
+
+static int FENCE_LTE( unsigned a, unsigned b )
+{
+   if (a == b)
+      return 1;
+
+   if (a < b && b - a < (1<<24))
+      return 1;
+
+   if (a > b && MAXFENCE - a + b < (1<<24))
+      return 1;
+
+   return 0;
+}
+
+static unsigned int
+_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
+{
+   bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+   return bufmgr_fake->last_fence;
+}
+
+static void
+_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
+{
+   int ret;
+
+   ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
+   if (ret != 0) {
+      drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__);
+      abort();
+   }
+   clear_fenced(bufmgr_fake, cookie);
+}
+
+static int
+_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+   /* Slight problem with wrap-around:
+    */
+   return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+}
+
+/**
+ * Allocate a memory manager block for the buffer.
+ */
+static int
+alloc_block(dri_bo *bo)
+{
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
+   struct block *block = (struct block *)calloc(sizeof *block, 1);
+   unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+   unsigned int sz;
+
+   if (!block)
+      return 1;
+
+   sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+   block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+   if (!block->mem) {
+      free(block);
+      return 0;
+   }
+
+   DRMINITLISTHEAD(block);
+
+   /* Insert at head or at tail???   
+    */
+   DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+
+   block->virtual = (uint8_t *)bufmgr_fake->virtual +
+      block->mem->ofs - bufmgr_fake->low_offset;
+   block->bo = bo;
+
+   bo_fake->block = block;
+
+   return 1;
+}
+
+/* Release the card storage associated with buf:
+ */
+static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
+{
+   dri_bo_fake *bo_fake;
+   DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
+
+   if (!block)
+      return;
+
+   bo_fake = (dri_bo_fake *)block->bo;
+   if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
+     memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+     bo_fake->card_dirty = 1;
+     bo_fake->dirty = 1;
+   }
+
+   if (block->on_hardware) {
+      block->bo = NULL;
+   }
+   else if (block->fenced) {
+      block->bo = NULL;
+   }
+   else {
+      DBG("    - free immediately\n");
+      DRMLISTDEL(block);
+
+      mmFreeMem(block->mem);
+      free(block);
+   }
+}
+
+static void
+alloc_backing_store(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   assert(!bo_fake->backing_store);
+   assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+
+   bo_fake->backing_store = malloc(bo->size);
+
+   DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
+   assert(bo_fake->backing_store);
+}
+
+static void
+free_backing_store(dri_bo *bo)
+{
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   if (bo_fake->backing_store) {
+      assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+      free(bo_fake->backing_store);
+      bo_fake->backing_store = NULL;
+   }
+}
+
+static void
+set_dirty(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
+      bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+
+   assert(!(bo_fake->flags & BM_PINNED));
+
+   DBG("set_dirty - buf %d\n", bo_fake->id);
+   bo_fake->dirty = 1;
+}
+
+static int
+evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+{
+   struct block *block, *tmp;
+
+   DBG("%s\n", __FUNCTION__);
+
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+      if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+        continue;
+
+      if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
+        return 0;
+
+      set_dirty(&bo_fake->bo);
+      bo_fake->block = NULL;
+
+      free_block(bufmgr_fake, block);
+      return 1;
+   }
+
+   return 0;
+}
+
+static int
+evict_mru(dri_bufmgr_fake *bufmgr_fake)
+{
+   struct block *block, *tmp;
+
+   DBG("%s\n", __FUNCTION__);
+
+   DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+      if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+        continue;
+
+      set_dirty(&bo_fake->bo);
+      bo_fake->block = NULL;
+
+      free_block(bufmgr_fake, block);
+      return 1;
+   }
+
+   return 0;
+}
+
+/**
+ * Removes all objects from the fenced list older than the given fence.
+ */
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+                       unsigned int fence_cookie)
+{
+   struct block *block, *tmp;
+   int ret = 0;
+
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+      assert(block->fenced);
+
+      if (_fence_test(bufmgr_fake, block->fence)) {
+
+        block->fenced = 0;
+
+        if (!block->bo) {
+           DBG("delayed free: offset %x sz %x\n",
+               block->mem->ofs, block->mem->size);
+           DRMLISTDEL(block);
+           mmFreeMem(block->mem);
+           free(block);
+        }
+        else {
+           DBG("return to lru: offset %x sz %x\n",
+               block->mem->ofs, block->mem->size);
+           DRMLISTDEL(block);
+           DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+        }
+
+        ret = 1;
+      }
+      else {
+        /* Blocks are ordered by fence, so if one fails, all from
+         * here will fail also:
+         */
+       DBG("fence not passed: offset %x sz %x %d %d \n",
+           block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
+        break;
+      }
+   }
+
+   DBG("%s: %d\n", __FUNCTION__, ret);
+   return ret;
+}
+
+static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+   struct block *block, *tmp;
+
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+      DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
+         block->mem->size, block->mem->ofs, block->bo, fence);
+      block->fence = fence;
+
+      block->on_hardware = 0;
+      block->fenced = 1;
+
+      /* Move to tail of pending list here
+       */
+      DRMLISTDEL(block);
+      DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+   }
+
+   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+}
+
+static int evict_and_alloc_block(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   assert(bo_fake->block == NULL);
+
+   /* Search for already free memory:
+    */
+   if (alloc_block(bo))
+      return 1;
+
+   /* If we're not thrashing, allow lru eviction to dig deeper into
+    * recently used textures.  We'll probably be thrashing soon:
+    */
+   if (!bufmgr_fake->thrashing) {
+      while (evict_lru(bufmgr_fake, 0))
+        if (alloc_block(bo))
+           return 1;
+   }
+
+   /* Keep thrashing counter alive?
+    */
+   if (bufmgr_fake->thrashing)
+      bufmgr_fake->thrashing = 20;
+
+   /* Wait on any already pending fences - here we are waiting for any
+    * freed memory that has been submitted to hardware and fenced to
+    * become available:
+    */
+   while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+      uint32_t fence = bufmgr_fake->fenced.next->fence;
+      _fence_wait_internal(bufmgr_fake, fence);
+
+      if (alloc_block(bo))
+        return 1;
+   }
+
+   if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+      while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+        uint32_t fence = bufmgr_fake->fenced.next->fence;
+        _fence_wait_internal(bufmgr_fake, fence);
+      }
+
+      if (!bufmgr_fake->thrashing) {
+        DBG("thrashing\n");
+      }
+      bufmgr_fake->thrashing = 20;
+
+      if (alloc_block(bo))
+        return 1;
+   }
+
+   while (evict_mru(bufmgr_fake))
+      if (alloc_block(bo))
+        return 1;
+
+   DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
+
+   return 0;
+}
+
+/***********************************************************************
+ * Public functions
+ */
+
+/**
+ * Wait for hardware idle by emitting a fence and waiting for it.
+ */
+static void
+dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
+{
+   unsigned int cookie;
+
+   cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+   _fence_wait_internal(bufmgr_fake, cookie);
+}
+
+/**
+ * Wait for rendering to a buffer to complete.
+ *
+ * It is assumed that the bathcbuffer which performed the rendering included
+ * the necessary flushing.
+ */
+static void
+dri_fake_bo_wait_rendering(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   if (bo_fake->block == NULL || !bo_fake->block->fenced)
+      return;
+
+   _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+/* Specifically ignore texture memory sharing.
+ *  -- just evict everything
+ *  -- and wait for idle
+ */
+void
+intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+   struct block *block, *tmp;
+
+   bufmgr_fake->need_fence = 1;
+   bufmgr_fake->fail = 0;
+
+   /* Wait for hardware idle.  We don't know where acceleration has been
+    * happening, so we'll need to wait anyway before letting anything get
+    * put on the card again.
+    */
+   dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+   /* Check that we hadn't released the lock without having fenced the last
+    * set of buffers.
+    */
+   assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+      assert(_fence_test(bufmgr_fake, block->fence));
+      set_dirty(block->bo);
+   }
+}
+
+static dri_bo *
+dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+                 unsigned long size, unsigned int alignment)
+{
+   dri_bufmgr_fake *bufmgr_fake;
+   dri_bo_fake *bo_fake;
+
+   bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+   assert(size != 0);
+
+   bo_fake = calloc(1, sizeof(*bo_fake));
+   if (!bo_fake)
+      return NULL;
+
+   bo_fake->bo.size = size;
+   bo_fake->bo.offset = -1;
+   bo_fake->bo.virtual = NULL;
+   bo_fake->bo.bufmgr = bufmgr;
+   bo_fake->refcount = 1;
+
+   /* Alignment must be a power of two */
+   assert((alignment & (alignment - 1)) == 0);
+   if (alignment == 0)
+      alignment = 1;
+   bo_fake->alignment = alignment;
+   bo_fake->id = ++bufmgr_fake->buf_nr;
+   bo_fake->name = name;
+   bo_fake->flags = 0;
+   bo_fake->is_static = 0;
+
+   DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+       bo_fake->bo.size / 1024);
+
+   return &bo_fake->bo;
+}
+
+dri_bo *
+intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+                          unsigned long offset, unsigned long size,
+                          void *virtual)
+{
+   dri_bufmgr_fake *bufmgr_fake;
+   dri_bo_fake *bo_fake;
+
+   bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+   assert(size != 0);
+
+   bo_fake = calloc(1, sizeof(*bo_fake));
+   if (!bo_fake)
+      return NULL;
+
+   bo_fake->bo.size = size;
+   bo_fake->bo.offset = offset;
+   bo_fake->bo.virtual = virtual;
+   bo_fake->bo.bufmgr = bufmgr;
+   bo_fake->refcount = 1;
+   bo_fake->id = ++bufmgr_fake->buf_nr;
+   bo_fake->name = name;
+   bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
+   bo_fake->is_static = 1;
+
+   DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+       bo_fake->bo.size / 1024);
+
+   return &bo_fake->bo;
+}
+
+static void
+dri_fake_bo_reference(dri_bo *bo)
+{
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   bo_fake->refcount++;
+}
+
+static void
+dri_fake_bo_unreference(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   int i;
+
+   if (!bo)
+      return;
+
+   if (--bo_fake->refcount == 0) {
+      assert(bo_fake->map_count == 0);
+      /* No remaining references, so free it */
+      if (bo_fake->block)
+        free_block(bufmgr_fake, bo_fake->block);
+      free_backing_store(bo);
+
+      for (i = 0; i < bo_fake->nr_relocs; i++)
+        dri_bo_unreference(bo_fake->relocs[i].target_buf);
+
+      DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
+
+      free(bo_fake->relocs);
+      free(bo);
+
+      return;
+   }
+}
+
+/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ */
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+                                        void (*invalidate_cb)(dri_bo *bo,
+                                                              void *ptr),
+                                        void *ptr)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   if (bo_fake->backing_store)
+      free_backing_store(bo);
+
+   bo_fake->flags |= BM_NO_BACKING_STORE;
+
+   DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+   bo_fake->dirty = 1;
+   bo_fake->invalidate_cb = invalidate_cb;
+   bo_fake->invalidate_ptr = ptr;
+
+   /* Note that it is invalid right from the start.  Also note
+    * invalidate_cb is called with the bufmgr locked, so cannot
+    * itself make bufmgr calls.
+    */
+   if (invalidate_cb != NULL)
+      invalidate_cb(bo, ptr);
+}
+
+/**
+ * Map a buffer into bo->virtual, allocating either card memory space (If
+ * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
+ */
+static int
+dri_fake_bo_map(dri_bo *bo, int write_enable)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   /* Static buffers are always mapped. */
+   if (bo_fake->is_static)
+      return 0;
+
+   /* Allow recursive mapping.  Mesa may recursively map buffers with
+    * nested display loops, and it is used internally in bufmgr_fake
+    * for relocation.
+    */
+   if (bo_fake->map_count++ != 0)
+      return 0;
+
+   {
+      DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+         bo_fake->bo.size / 1024);
+
+      if (bo->virtual != NULL) {
+        drmMsg("%s: already mapped\n", __FUNCTION__);
+        abort();
+      }
+      else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
+
+        if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+           DBG("%s: alloc failed\n", __FUNCTION__);
+           bufmgr_fake->fail = 1;
+           return 1;
+        }
+        else {
+           assert(bo_fake->block);
+           bo_fake->dirty = 0;
+
+           if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+               bo_fake->block->fenced) {
+              dri_fake_bo_wait_rendering(bo);
+           }
+
+           bo->virtual = bo_fake->block->virtual;
+        }
+      }
+      else {
+        if (write_enable)
+           set_dirty(bo);
+
+        if (bo_fake->backing_store == 0)
+           alloc_backing_store(bo);
+
+        bo->virtual = bo_fake->backing_store;
+      }
+   }
+
+   return 0;
+}
+
+static int
+dri_fake_bo_unmap(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   /* Static buffers are always mapped. */
+   if (bo_fake->is_static)
+      return 0;
+
+   assert(bo_fake->map_count != 0);
+   if (--bo_fake->map_count != 0)
+      return 0;
+
+   DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+       bo_fake->bo.size / 1024);
+
+   bo->virtual = NULL;
+
+   return 0;
+}
+
+static void
+dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
+{
+   struct block *block, *tmp;
+
+   bufmgr_fake->performed_rendering = 0;
+   /* okay for ever BO that is on the HW kick it off.
+      seriously not afraid of the POLICE right now */
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+      block->on_hardware = 0;
+      free_block(bufmgr_fake, block);
+      bo_fake->block = NULL;
+      bo_fake->validated = 0;
+      if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+         bo_fake->dirty = 1;
+   }
+}
+
+static int
+dri_fake_bo_validate(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+   /* XXX: Sanity-check whether we've already validated this one under
+    * different flags.  See drmAddValidateItem().
+    */
+   bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+
+   DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+       bo_fake->bo.size / 1024);
+
+   /* Sanity check: Buffers should be unmapped before being validated.
+    * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+    * and the problem is harder to debug there.
+    */
+   assert(bo_fake->map_count == 0);
+
+   if (bo_fake->is_static) {
+      /* Add it to the needs-fence list */
+      bufmgr_fake->need_fence = 1;
+      return 0;
+   }
+
+   /* reset size accounted */
+   bo_fake->size_accounted = 0;
+
+   /* Allocate the card memory */
+   if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+      bufmgr_fake->fail = 1;
+      DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
+      return -1;
+   }
+
+   assert(bo_fake->block);
+   assert(bo_fake->block->bo == &bo_fake->bo);
+
+   bo->offset = bo_fake->block->mem->ofs;
+
+   /* Upload the buffer contents if necessary */
+   if (bo_fake->dirty) {
+      DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
+         bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+      assert(!(bo_fake->flags &
+              (BM_NO_BACKING_STORE|BM_PINNED)));
+
+      /* Actually, should be able to just wait for a fence on the memory,
+       * which we would be tracking when we free it.  Waiting for idle is
+       * a sufficiently large hammer for now.
+       */
+      dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+      /* we may never have mapped this BO so it might not have any backing
+       * store if this happens it should be rare, but 0 the card memory
+       * in any case */
+      if (bo_fake->backing_store)
+         memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
+      else
+         memset(bo_fake->block->virtual, 0, bo->size);
+
+      bo_fake->dirty = 0;
+   }
+
+   bo_fake->block->fenced = 0;
+   bo_fake->block->on_hardware = 1;
+   DRMLISTDEL(bo_fake->block);
+   DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+   bo_fake->validated = 1;
+   bufmgr_fake->need_fence = 1;
+
+   return 0;
+}
+
+static void
+dri_fake_fence_validated(dri_bufmgr *bufmgr)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+   unsigned int cookie;
+
+   cookie = _fence_emit_internal(bufmgr_fake);
+   fence_blocks(bufmgr_fake, cookie);
+
+   DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+}
+
+static void
+dri_fake_destroy(dri_bufmgr *bufmgr)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+   mmDestroy(bufmgr_fake->heap);
+   free(bufmgr);
+}
+
+static int
+dri_fake_emit_reloc(dri_bo *reloc_buf,
+                   uint32_t read_domains, uint32_t write_domain,
+                   uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
+   struct fake_buffer_reloc *r;
+   dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
+   dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
+   int i;
+
+   assert(reloc_buf);
+   assert(target_buf);
+
+   assert(target_fake->is_static || target_fake->size_accounted);
+
+   if (reloc_fake->relocs == NULL) {
+      reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
+                                 MAX_RELOCS);
+   }
+
+   r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
+
+   assert(reloc_fake->nr_relocs <= MAX_RELOCS);
+
+   dri_bo_reference(target_buf);
+
+   r->target_buf = target_buf;
+   r->offset = offset;
+   r->last_target_offset = target_buf->offset;
+   r->delta = delta;
+   r->read_domains = read_domains;
+   r->write_domain = write_domain;
+
+   if (bufmgr_fake->debug) {
+      /* Check that a conflicting relocation hasn't already been emitted. */
+      for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
+        struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
+
+        assert(r->offset != r2->offset);
+      }
+   }
+
+   return 0;
+}
+
+/**
+ * Incorporates the validation flags associated with each relocation into
+ * the combined validation flags for the buffer on this batchbuffer submission.
+ */
+static void
+dri_fake_calculate_domains(dri_bo *bo)
+{
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   int i;
+
+   for (i = 0; i < bo_fake->nr_relocs; i++) {
+      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+      /* Do the same for the tree of buffers we depend on */
+      dri_fake_calculate_domains(r->target_buf);
+
+      target_fake->read_domains |= r->read_domains;
+      if (target_fake->write_domain != 0)
+        target_fake->write_domain = r->write_domain;
+   }
+}
+
+
+static int
+dri_fake_reloc_and_validate_buffer(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   int i, ret;
+
+   assert(bo_fake->map_count == 0);
+
+   for (i = 0; i < bo_fake->nr_relocs; i++) {
+      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+      uint32_t reloc_data;
+
+      /* Validate the target buffer if that hasn't been done. */
+      if (!target_fake->validated) {
+         ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
+         if (ret != 0) {
+            if (bo->virtual != NULL)
+                dri_bo_unmap(bo);
+            return ret;
+         }
+      }
+
+      /* Calculate the value of the relocation entry. */
+      if (r->target_buf->offset != r->last_target_offset) {
+        reloc_data = r->target_buf->offset + r->delta;
+
+        if (bo->virtual == NULL)
+           dri_bo_map(bo, 1);
+
+        *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
+
+        r->last_target_offset = r->target_buf->offset;
+      }
+   }
+
+   if (bo->virtual != NULL)
+      dri_bo_unmap(bo);
+
+   if (bo_fake->write_domain != 0) {
+      if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
+         if (bo_fake->backing_store == 0)
+            alloc_backing_store(bo);
+
+         bo_fake->card_dirty = 1;
+      }
+      bufmgr_fake->performed_rendering = 1;
+   }
+
+   return dri_fake_bo_validate(bo);
+}
+
+static void *
+dri_fake_process_relocs(dri_bo *batch_buf)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+   dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
+   int ret;
+   int retry_count = 0;
+
+   bufmgr_fake->performed_rendering = 0;
+
+   dri_fake_calculate_domains(batch_buf);
+
+   batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
+
+   /* we've ran out of RAM so blow the whole lot away and retry */
+ restart:
+   ret = dri_fake_reloc_and_validate_buffer(batch_buf);
+   if (bufmgr_fake->fail == 1) {
+      if (retry_count == 0) {
+         retry_count++;
+         dri_fake_kick_all(bufmgr_fake);
+         bufmgr_fake->fail = 0;
+         goto restart;
+      } else /* dump out the memory here */
+         mmDumpMemInfo(bufmgr_fake->heap);
+   }
+
+   assert(ret == 0);
+
+   bufmgr_fake->current_total_size = 0;
+   return NULL;
+}
+
+static void
+dri_bo_fake_post_submit(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   int i;
+
+   for (i = 0; i < bo_fake->nr_relocs; i++) {
+      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+      if (target_fake->validated)
+        dri_bo_fake_post_submit(r->target_buf);
+
+      DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+         bo_fake->name, (uint32_t)bo->offset, r->offset,
+         target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
+   }
+
+   assert(bo_fake->map_count == 0);
+   bo_fake->validated = 0;
+   bo_fake->read_domains = 0;
+   bo_fake->write_domain = 0;
+}
+
+
+static void
+dri_fake_post_submit(dri_bo *batch_buf)
+{
+   dri_fake_fence_validated(batch_buf->bufmgr);
+
+   dri_bo_fake_post_submit(batch_buf);
+}
+
+static int
+dri_fake_check_aperture_space(dri_bo *bo)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+   unsigned int sz;
+
+   sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+   if (bo_fake->size_accounted || bo_fake->is_static)
+      return 0;
+
+   if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
+     DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
+      return -1;
+   }
+
+   bufmgr_fake->current_total_size += sz;
+   bo_fake->size_accounted = 1;
+   DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
+   return 0;
+}
+
+/**
+ * Evicts all buffers, waiting for fences to pass and copying contents out
+ * as necessary.
+ *
+ * Used by the X Server on LeaveVT, when the card memory is no longer our
+ * own.
+ */
+void
+intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
+{
+   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+   struct block *block, *tmp;
+
+   bufmgr_fake->need_fence = 1;
+   bufmgr_fake->fail = 0;
+
+   /* Wait for hardware idle.  We don't know where acceleration has been
+    * happening, so we'll need to wait anyway before letting anything get
+    * put on the card again.
+    */
+   dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+   /* Check that we hadn't released the lock without having fenced the last
+    * set of buffers.
+    */
+   assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+      /* Releases the memory, and memcpys dirty contents out if necessary. */
+      free_block(bufmgr_fake, block);
+   }
+}
+
+dri_bufmgr *
+intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+                      unsigned long size,
+                      unsigned int (*fence_emit)(void *private),
+                      int (*fence_wait)(void *private, unsigned int cookie),
+                      void *driver_priv)
+{
+   dri_bufmgr_fake *bufmgr_fake;
+
+   bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+   /* Initialize allocator */
+   DRMINITLISTHEAD(&bufmgr_fake->fenced);
+   DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+   DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+   bufmgr_fake->low_offset = low_offset;
+   bufmgr_fake->virtual = low_virtual;
+   bufmgr_fake->size = size;
+   bufmgr_fake->heap = mmInit(low_offset, size);
+
+   /* Hook in methods */
+   bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
+   bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
+   bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
+   bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
+   bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
+   bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
+   bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
+   bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
+   bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
+   bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
+   bufmgr_fake->bufmgr.debug = 0;
+   bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc;
+
+   bufmgr_fake->fence_emit = fence_emit;
+   bufmgr_fake->fence_wait = fence_wait;
+   bufmgr_fake->driver_priv = driver_priv;
+
+   return &bufmgr_fake->bufmgr;
+}
+
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
new file mode 100644 (file)
index 0000000..cdc2a7a
--- /dev/null
@@ -0,0 +1,853 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ *         Eric Anholt <eric@anholt.net>
+ *         Dave Airlie <airlied@linux.ie>
+ */
+
+#include <xf86drm.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "errno.h"
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "string.h"
+
+#include "i915_drm.h"
+
+#define DBG(...) do {                                  \
+   if (bufmgr_gem->bufmgr.debug)                       \
+      fprintf(stderr, __VA_ARGS__);                    \
+} while (0)
+
+typedef struct _dri_bo_gem dri_bo_gem;
+
+struct dri_gem_bo_bucket {
+   dri_bo_gem *head, **tail;
+   /**
+    * Limit on the number of entries in this bucket.
+    *
+    * 0 means that this caching at this bucket size is disabled.
+    * -1 means that there is no limit to caching at this size.
+    */
+   int max_entries;
+   int num_entries;
+};
+
+/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
+ * is 1 << 16 pages, or 256MB.
+ */
+#define INTEL_GEM_BO_BUCKETS   16
+typedef struct _dri_bufmgr_gem {
+    dri_bufmgr bufmgr;
+
+    struct intel_bufmgr intel_bufmgr;
+
+    int fd;
+
+    int max_relocs;
+
+    struct drm_i915_gem_exec_object *exec_objects;
+    dri_bo **exec_bos;
+    int exec_size;
+    int exec_count;
+
+    /** Array of lists of cached gem objects of power-of-two sizes */
+    struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
+
+    struct drm_i915_gem_execbuffer exec_arg;
+} dri_bufmgr_gem;
+
+struct _dri_bo_gem {
+    dri_bo bo;
+
+    int refcount;
+    /** Boolean whether the mmap ioctl has been called for this buffer yet. */
+    int mapped;
+    uint32_t gem_handle;
+    const char *name;
+
+    /**
+     * Index of the buffer within the validation list while preparing a
+     * batchbuffer execution.
+     */
+    int validate_index;
+
+    /**
+     * Boolean whether we've started swrast
+     * Set when the buffer has been mapped
+     * Cleared when the buffer is unmapped
+     */
+    int swrast;
+
+    /** Array passed to the DRM containing relocation information. */
+    struct drm_i915_gem_relocation_entry *relocs;
+    /** Array of bos corresponding to relocs[i].target_handle */
+    dri_bo **reloc_target_bo;
+    /** Number of entries in relocs */
+    int reloc_count;
+    /** Mapped address for the buffer */
+    void *virtual;
+
+    /** free list */
+    dri_bo_gem *next;
+};
+
+static int
+logbase2(int n)
+{
+   int i = 1;
+   int log2 = 0;
+
+   while (n > i) {
+      i *= 2;
+      log2++;
+   }
+
+   return log2;
+}
+
+static struct dri_gem_bo_bucket *
+dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
+{
+    int i;
+
+    /* We only do buckets in power of two increments */
+    if ((size & (size - 1)) != 0)
+       return NULL;
+
+    /* We should only see sizes rounded to pages. */
+    assert((size % 4096) == 0);
+
+    /* We always allocate in units of pages */
+    i = ffs(size / 4096) - 1;
+    if (i >= INTEL_GEM_BO_BUCKETS)
+       return NULL;
+
+    return &bufmgr_gem->cache_bucket[i];
+}
+
+
+static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
+{
+    int i, j;
+
+    for (i = 0; i < bufmgr_gem->exec_count; i++) {
+       dri_bo *bo = bufmgr_gem->exec_bos[i];
+       dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+       if (bo_gem->relocs == NULL) {
+           DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
+           continue;
+       }
+
+       for (j = 0; j < bo_gem->reloc_count; j++) {
+           dri_bo *target_bo = bo_gem->reloc_target_bo[j];
+           dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
+
+           DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
+               i,
+               bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
+               target_gem->gem_handle, target_gem->name, target_bo->offset,
+               bo_gem->relocs[j].delta);
+       }
+    }
+}
+
+/**
+ * Adds the given buffer to the list of buffers to be validated (moved into the
+ * appropriate memory type) with the next batch submission.
+ *
+ * If a buffer is validated multiple times in a batch submission, it ends up
+ * with the intersection of the memory type flags and the union of the
+ * access flags.
+ */
+static void
+intel_add_validate_buffer(dri_bo *bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    int index;
+
+    if (bo_gem->validate_index != -1)
+       return;
+
+    /* Extend the array of validation entries as necessary. */
+    if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+       int new_size = bufmgr_gem->exec_size * 2;
+
+       if (new_size == 0)
+           new_size = 5;
+
+       bufmgr_gem->exec_objects =
+           realloc(bufmgr_gem->exec_objects,
+                   sizeof(*bufmgr_gem->exec_objects) * new_size);
+       bufmgr_gem->exec_bos =
+           realloc(bufmgr_gem->exec_bos,
+                   sizeof(*bufmgr_gem->exec_bos) * new_size);
+       bufmgr_gem->exec_size = new_size;
+    }
+
+    index = bufmgr_gem->exec_count;
+    bo_gem->validate_index = index;
+    /* Fill in array entry */
+    bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+    bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+    bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
+    bufmgr_gem->exec_objects[index].alignment = 0;
+    bufmgr_gem->exec_objects[index].offset = 0;
+    bufmgr_gem->exec_bos[index] = bo;
+    dri_bo_reference(bo);
+    bufmgr_gem->exec_count++;
+}
+
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+       sizeof(uint32_t))
+
+static int
+intel_setup_reloc_list(dri_bo *bo)
+{
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+    bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
+                           sizeof(struct drm_i915_gem_relocation_entry));
+    bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
+
+    return 0;
+}
+
+static dri_bo *
+dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+                unsigned long size, unsigned int alignment)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+    dri_bo_gem *bo_gem;
+    unsigned int page_size = getpagesize();
+    int ret;
+    struct dri_gem_bo_bucket *bucket;
+    int alloc_from_cache = 0;
+    unsigned long bo_size;
+
+    /* Round the allocated size up to a power of two number of pages. */
+    bo_size = 1 << logbase2(size);
+    if (bo_size < page_size)
+       bo_size = page_size;
+    bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
+
+    /* If we don't have caching at this size, don't actually round the
+     * allocation up.
+     */
+    if (bucket == NULL || bucket->max_entries == 0) {
+       bo_size = size;
+       if (bo_size < page_size)
+           bo_size = page_size;
+    }
+
+    /* Get a buffer out of the cache if available */
+    if (bucket != NULL && bucket->num_entries > 0) {
+       struct drm_i915_gem_busy busy;
+       
+       bo_gem = bucket->head;
+        busy.handle = bo_gem->gem_handle;
+
+        ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+        alloc_from_cache = (ret == 0 && busy.busy == 0);
+
+       if (alloc_from_cache) {
+           bucket->head = bo_gem->next;
+           if (bo_gem->next == NULL)
+               bucket->tail = &bucket->head;
+           bucket->num_entries--;
+       }
+    }
+
+    if (!alloc_from_cache) {
+       struct drm_i915_gem_create create;
+
+       bo_gem = calloc(1, sizeof(*bo_gem));
+       if (!bo_gem)
+           return NULL;
+
+       bo_gem->bo.size = bo_size;
+       memset(&create, 0, sizeof(create));
+       create.size = bo_size;
+
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+       bo_gem->gem_handle = create.handle;
+       if (ret != 0) {
+           free(bo_gem);
+           return NULL;
+       }
+       bo_gem->bo.bufmgr = bufmgr;
+    }
+
+    bo_gem->name = name;
+    bo_gem->refcount = 1;
+    bo_gem->validate_index = -1;
+
+    DBG("bo_create: buf %d (%s) %ldb\n",
+       bo_gem->gem_handle, bo_gem->name, size);
+
+    return &bo_gem->bo;
+}
+
+/**
+ * Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_bo *
+intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+                             unsigned int handle)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+    dri_bo_gem *bo_gem;
+    int ret;
+    struct drm_gem_open open_arg;
+
+    bo_gem = calloc(1, sizeof(*bo_gem));
+    if (!bo_gem)
+       return NULL;
+
+    memset(&open_arg, 0, sizeof(open_arg));
+    open_arg.name = handle;
+    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+    if (ret != 0) {
+       fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
+              name, handle, strerror(-ret));
+       free(bo_gem);
+       return NULL;
+    }
+    bo_gem->bo.size = open_arg.size;
+    bo_gem->bo.offset = 0;
+    bo_gem->bo.virtual = NULL;
+    bo_gem->bo.bufmgr = bufmgr;
+    bo_gem->name = name;
+    bo_gem->refcount = 1;
+    bo_gem->validate_index = -1;
+    bo_gem->gem_handle = open_arg.handle;
+
+    DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+    return &bo_gem->bo;
+}
+
+static void
+dri_gem_bo_reference(dri_bo *bo)
+{
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+    bo_gem->refcount++;
+}
+
+static void
+dri_gem_bo_free(dri_bo *bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_gem_close close;
+    int ret;
+
+    if (bo_gem->mapped)
+       munmap (bo_gem->virtual, bo_gem->bo.size);
+
+    /* Close this object */
+    close.handle = bo_gem->gem_handle;
+    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+    if (ret != 0) {
+       fprintf(stderr,
+               "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+               bo_gem->gem_handle, bo_gem->name, strerror(-ret));
+    }
+    free(bo);
+}
+
+static void
+dri_gem_bo_unreference(dri_bo *bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+    if (!bo)
+       return;
+
+    if (--bo_gem->refcount == 0) {
+       struct dri_gem_bo_bucket *bucket;
+
+       if (bo_gem->relocs != NULL) {
+           int i;
+
+           /* Unreference all the target buffers */
+           for (i = 0; i < bo_gem->reloc_count; i++)
+                dri_bo_unreference(bo_gem->reloc_target_bo[i]);
+           free(bo_gem->reloc_target_bo);
+           free(bo_gem->relocs);
+       }
+
+       DBG("bo_unreference final: %d (%s)\n",
+           bo_gem->gem_handle, bo_gem->name);
+
+       bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+       /* Put the buffer into our internal cache for reuse if we can. */
+       if (bucket != NULL &&
+           (bucket->max_entries == -1 ||
+            (bucket->max_entries > 0 &&
+             bucket->num_entries < bucket->max_entries)))
+       {
+           bo_gem->name = 0;
+           bo_gem->validate_index = -1;
+           bo_gem->relocs = NULL;
+           bo_gem->reloc_target_bo = NULL;
+           bo_gem->reloc_count = 0;
+
+           bo_gem->next = NULL;
+           *bucket->tail = bo_gem;
+           bucket->tail = &bo_gem->next;
+           bucket->num_entries++;
+       } else {
+           dri_gem_bo_free(bo);
+       }
+
+       return;
+    }
+}
+
+static int
+dri_gem_bo_map(dri_bo *bo, int write_enable)
+{
+    dri_bufmgr_gem *bufmgr_gem;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_set_domain set_domain;
+    int ret;
+
+    bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+    /* Allow recursive mapping. Mesa may recursively map buffers with
+     * nested display loops.
+     */
+    if (!bo_gem->mapped) {
+    
+       assert(bo->virtual == NULL);
+    
+       DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+    
+       if (bo_gem->virtual == NULL) {
+           struct drm_i915_gem_mmap mmap_arg;
+    
+           memset(&mmap_arg, 0, sizeof(mmap_arg));
+           mmap_arg.handle = bo_gem->gem_handle;
+           mmap_arg.offset = 0;
+           mmap_arg.size = bo->size;
+           ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+           if (ret != 0) {
+               fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
+                       __FILE__, __LINE__,
+                       bo_gem->gem_handle, bo_gem->name, strerror(errno));
+           }
+           bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
+       }
+       bo->virtual = bo_gem->virtual;
+       bo_gem->swrast = 0;
+       bo_gem->mapped = 1;
+       DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
+    }
+
+    if (!bo_gem->swrast) {
+       set_domain.handle = bo_gem->gem_handle;
+       set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+       if (write_enable)
+           set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+       else
+           set_domain.write_domain = 0;
+       do {
+           ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                       &set_domain);
+       } while (ret == -1 && errno == EINTR);
+       if (ret != 0) {
+           fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
+                    __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
+       }
+       bo_gem->swrast = 1;
+    }
+
+    return 0;
+}
+
+static int
+dri_gem_bo_unmap(dri_bo *bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_sw_finish sw_finish;
+    int ret;
+
+    if (bo == NULL)
+       return 0;
+
+    assert(bo_gem->mapped);
+
+    if (bo_gem->swrast) {
+       sw_finish.handle = bo_gem->gem_handle;
+       do {
+           ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+                       &sw_finish);
+       } while (ret == -1 && errno == EINTR);
+       bo_gem->swrast = 0;
+    }
+    return 0;
+}
+
+static int
+dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
+                   unsigned long size, const void *data)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_pwrite pwrite;
+    int ret;
+
+    memset (&pwrite, 0, sizeof (pwrite));
+    pwrite.handle = bo_gem->gem_handle;
+    pwrite.offset = offset;
+    pwrite.size = size;
+    pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+    do {
+       ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+    } while (ret == -1 && errno == EINTR);
+    if (ret != 0) {
+       fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+                __FILE__, __LINE__,
+                bo_gem->gem_handle, (int) offset, (int) size,
+                strerror (errno));
+    }
+    return 0;
+}
+
+static int
+dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
+                       unsigned long size, void *data)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_pread pread;
+    int ret;
+
+    memset (&pread, 0, sizeof (pread));
+    pread.handle = bo_gem->gem_handle;
+    pread.offset = offset;
+    pread.size = size;
+    pread.data_ptr = (uint64_t) (uintptr_t) data;
+    do {
+       ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+    } while (ret == -1 && errno == EINTR);
+    if (ret != 0) {
+       fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+                __FILE__, __LINE__,
+                bo_gem->gem_handle, (int) offset, (int) size,
+                strerror (errno));
+    }
+    return 0;
+}
+
+static void
+dri_gem_bo_wait_rendering(dri_bo *bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_set_domain set_domain;
+    int ret;
+
+    set_domain.handle = bo_gem->gem_handle;
+    set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+    set_domain.write_domain = 0;
+    ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+    if (ret != 0) {
+       fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+                __FILE__, __LINE__,
+                bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
+                strerror (errno));
+    }
+}
+
+static void
+dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+    int i;
+
+    free(bufmgr_gem->exec_objects);
+    free(bufmgr_gem->exec_bos);
+
+    /* Free any cached buffer objects we were going to reuse */
+    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+       struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+       dri_bo_gem *bo_gem;
+
+       while ((bo_gem = bucket->head) != NULL) {
+           bucket->head = bo_gem->next;
+           if (bo_gem->next == NULL)
+               bucket->tail = &bucket->head;
+           bucket->num_entries--;
+
+           dri_gem_bo_free(&bo_gem->bo);
+       }
+    }
+
+    free(bufmgr);
+}
+
+/**
+ * Adds the target buffer to the validation list and adds the relocation
+ * to the reloc_buffer's relocation list.
+ *
+ * The relocation entry at the given offset must already contain the
+ * precomputed relocation value, because the kernel will optimize out
+ * the relocation entry write when the buffer hasn't moved from the
+ * last known offset in target_bo.
+ */
+static int
+dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
+                  uint32_t delta, uint32_t offset, dri_bo *target_bo)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
+
+    /* Create a new relocation list if needed */
+    if (bo_gem->relocs == NULL)
+       intel_setup_reloc_list(bo);
+
+    /* Check overflow */
+    assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+
+    /* Check args */
+    assert (offset <= bo->size - 4);
+    assert ((write_domain & (write_domain-1)) == 0);
+
+    bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+    bo_gem->relocs[bo_gem->reloc_count].delta = delta;
+    bo_gem->relocs[bo_gem->reloc_count].target_handle =
+       target_bo_gem->gem_handle;
+    bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+    bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+    bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+
+    bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+    dri_bo_reference(target_bo);
+
+    bo_gem->reloc_count++;
+    return 0;
+}
+
+/**
+ * Walk the tree of relocations rooted at BO and accumulate the list of
+ * validations to be performed and update the relocation buffers with
+ * index values into the validation list.
+ */
+static void
+dri_gem_bo_process_reloc(dri_bo *bo)
+{
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    int i;
+
+    if (bo_gem->relocs == NULL)
+       return;
+
+    for (i = 0; i < bo_gem->reloc_count; i++) {
+       dri_bo *target_bo = bo_gem->reloc_target_bo[i];
+
+       /* Continue walking the tree depth-first. */
+       dri_gem_bo_process_reloc(target_bo);
+
+       /* Add the target to the validate list */
+       intel_add_validate_buffer(target_bo);
+    }
+}
+
+static void *
+dri_gem_process_reloc(dri_bo *batch_buf)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
+
+    /* Update indices and set up the validate list. */
+    dri_gem_bo_process_reloc(batch_buf);
+
+    /* Add the batch buffer to the validation list.  There are no relocations
+     * pointing to it.
+     */
+    intel_add_validate_buffer(batch_buf);
+
+    bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
+    bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
+    bufmgr_gem->exec_arg.batch_start_offset = 0;
+    bufmgr_gem->exec_arg.batch_len = 0;        /* written in intel_exec_ioctl */
+
+    return &bufmgr_gem->exec_arg;
+}
+
+static void
+intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
+{
+    int i;
+
+    for (i = 0; i < bufmgr_gem->exec_count; i++) {
+       dri_bo *bo = bufmgr_gem->exec_bos[i];
+       dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+       /* Update the buffer offset */
+       if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+           DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+               bo_gem->gem_handle, bo_gem->name, bo->offset,
+               bufmgr_gem->exec_objects[i].offset);
+           bo->offset = bufmgr_gem->exec_objects[i].offset;
+       }
+    }
+}
+
+static void
+dri_gem_post_submit(dri_bo *batch_buf)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
+    int i;
+
+    intel_update_buffer_offsets (bufmgr_gem);
+
+    if (bufmgr_gem->bufmgr.debug)
+       dri_gem_dump_validation_list(bufmgr_gem);
+
+    for (i = 0; i < bufmgr_gem->exec_count; i++) {
+       dri_bo *bo = bufmgr_gem->exec_bos[i];
+       dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+       /* Need to call swrast on next bo_map */
+       bo_gem->swrast = 0;
+
+       /* Disconnect the buffer from the validate list */
+       bo_gem->validate_index = -1;
+       dri_bo_unreference(bo);
+       bufmgr_gem->exec_bos[i] = NULL;
+    }
+    bufmgr_gem->exec_count = 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
+{
+    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+    int i;
+
+    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+       bufmgr_gem->cache_bucket[i].max_entries = -1;
+    }
+}
+
+/*
+ *
+ */
+static int
+dri_gem_check_aperture_space(dri_bo *bo)
+{
+    return 0;
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+dri_bufmgr *
+intel_bufmgr_gem_init(int fd, int batch_size)
+{
+    dri_bufmgr_gem *bufmgr_gem;
+    int i;
+
+    bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+    bufmgr_gem->fd = fd;
+
+    /* Let's go with one relocation per every 2 dwords (but round down a bit
+     * since a power of two will mean an extra page allocation for the reloc
+     * buffer).
+     *
+     * Every 4 was too few for the blender benchmark.
+     */
+    bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+    bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
+    bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
+    bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
+    bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
+    bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
+    bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
+    bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
+    bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
+    bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
+    bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
+    bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
+    bufmgr_gem->bufmgr.debug = 0;
+    bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
+    bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
+    /* Initialize the linked lists for BO reuse cache. */
+    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
+       bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
+
+    return &bufmgr_gem->bufmgr;
+}
+
+int
+intel_bo_emit_reloc(dri_bo *reloc_buf,
+                   uint32_t read_domains, uint32_t write_domain,
+                   uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+    struct intel_bufmgr *intel_bufmgr;
+
+    intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
+
+    return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
+                                   delta, offset, target_buf);
+}
diff --git a/libdrm/intel/mm.c b/libdrm/intel/mm.c
new file mode 100644 (file)
index 0000000..9814640
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "xf86drm.h"
+#include "mm.h"
+
+void
+mmDumpMemInfo(const struct mem_block *heap)
+{
+   drmMsg("Memory heap %p:\n", (void *)heap);
+   if (heap == 0) {
+      drmMsg("  heap == 0\n");
+   } else {
+      const struct mem_block *p;
+
+      for(p = heap->next; p != heap; p = p->next) {
+        drmMsg("  Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+               p->free ? 'F':'.',
+               p->reserved ? 'R':'.');
+      }
+
+      drmMsg("\nFree list:\n");
+
+      for(p = heap->next_free; p != heap; p = p->next_free) {
+        drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+               p->free ? 'F':'.',
+               p->reserved ? 'R':'.');
+      }
+
+   }
+   drmMsg("End of memory blocks\n");
+}
+
+struct mem_block *
+mmInit(int ofs, int size)
+{
+   struct mem_block *heap, *block;
+  
+   if (size <= 0) 
+      return NULL;
+
+   heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+   if (!heap) 
+      return NULL;
+   
+   block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+   if (!block) {
+      free(heap);
+      return NULL;
+   }
+
+   heap->next = block;
+   heap->prev = block;
+   heap->next_free = block;
+   heap->prev_free = block;
+
+   block->heap = heap;
+   block->next = heap;
+   block->prev = heap;
+   block->next_free = heap;
+   block->prev_free = heap;
+
+   block->ofs = ofs;
+   block->size = size;
+   block->free = 1;
+
+   return heap;
+}
+
+
+static struct mem_block *
+SliceBlock(struct mem_block *p, 
+           int startofs, int size, 
+           int reserved, int alignment)
+{
+   struct mem_block *newblock;
+
+   /* break left  [p, newblock, p->next], then p = newblock */
+   if (startofs > p->ofs) {
+      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+      if (!newblock)
+        return NULL;
+      newblock->ofs = startofs;
+      newblock->size = p->size - (startofs - p->ofs);
+      newblock->free = 1;
+      newblock->heap = p->heap;
+
+      newblock->next = p->next;
+      newblock->prev = p;
+      p->next->prev = newblock;
+      p->next = newblock;
+
+      newblock->next_free = p->next_free;
+      newblock->prev_free = p;
+      p->next_free->prev_free = newblock;
+      p->next_free = newblock;
+
+      p->size -= newblock->size;
+      p = newblock;
+   }
+
+   /* break right, also [p, newblock, p->next] */
+   if (size < p->size) {
+      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+      if (!newblock)
+        return NULL;
+      newblock->ofs = startofs + size;
+      newblock->size = p->size - size;
+      newblock->free = 1;
+      newblock->heap = p->heap;
+
+      newblock->next = p->next;
+      newblock->prev = p;
+      p->next->prev = newblock;
+      p->next = newblock;
+
+      newblock->next_free = p->next_free;
+      newblock->prev_free = p;
+      p->next_free->prev_free = newblock;
+      p->next_free = newblock;
+        
+      p->size = size;
+   }
+
+   /* p = middle block */
+   p->free = 0;
+
+   /* Remove p from the free list: 
+    */
+   p->next_free->prev_free = p->prev_free;
+   p->prev_free->next_free = p->next_free;
+
+   p->next_free = 0;
+   p->prev_free = 0;
+
+   p->reserved = reserved;
+   return p;
+}
+
+
+struct mem_block *
+mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+{
+   struct mem_block *p;
+   const int mask = (1 << align2)-1;
+   int startofs = 0;
+   int endofs;
+
+   if (!heap || align2 < 0 || size <= 0)
+      return NULL;
+
+   for (p = heap->next_free; p != heap; p = p->next_free) {
+      assert(p->free);
+
+      startofs = (p->ofs + mask) & ~mask;
+      if ( startofs < startSearch ) {
+        startofs = startSearch;
+      }
+      endofs = startofs+size;
+      if (endofs <= (p->ofs+p->size))
+        break;
+   }
+
+   if (p == heap) 
+      return NULL;
+
+   assert(p->free);
+   p = SliceBlock(p,startofs,size,0,mask+1);
+
+   return p;
+}
+
+
+struct mem_block *
+mmFindBlock(struct mem_block *heap, int start)
+{
+   struct mem_block *p;
+
+   for (p = heap->next; p != heap; p = p->next) {
+      if (p->ofs == start) 
+        return p;
+   }
+
+   return NULL;
+}
+
+
+static int
+Join2Blocks(struct mem_block *p)
+{
+   /* XXX there should be some assertions here */
+
+   /* NOTE: heap->free == 0 */
+
+   if (p->free && p->next->free) {
+      struct mem_block *q = p->next;
+
+      assert(p->ofs + p->size == q->ofs);
+      p->size += q->size;
+
+      p->next = q->next;
+      q->next->prev = p;
+
+      q->next_free->prev_free = q->prev_free; 
+      q->prev_free->next_free = q->next_free;
+     
+      free(q);
+      return 1;
+   }
+   return 0;
+}
+
+int
+mmFreeMem(struct mem_block *b)
+{
+   if (!b)
+      return 0;
+
+   if (b->free) {
+      drmMsg("block already free\n");
+      return -1;
+   }
+   if (b->reserved) {
+      drmMsg("block is reserved\n");
+      return -1;
+   }
+
+   b->free = 1;
+   b->next_free = b->heap->next_free;
+   b->prev_free = b->heap;
+   b->next_free->prev_free = b;
+   b->prev_free->next_free = b;
+
+   Join2Blocks(b);
+   if (b->prev != b->heap)
+      Join2Blocks(b->prev);
+
+   return 0;
+}
+
+
+void
+mmDestroy(struct mem_block *heap)
+{
+   struct mem_block *p;
+
+   if (!heap)
+      return;
+
+   for (p = heap->next; p != heap; ) {
+      struct mem_block *next = p->next;
+      free(p);
+      p = next;
+   }
+
+   free(heap);
+}
diff --git a/libdrm/intel/mm.h b/libdrm/intel/mm.h
new file mode 100644 (file)
index 0000000..49e3eec
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Memory manager code.  Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+
+#ifndef MM_H
+#define MM_H
+
+struct mem_block {
+   struct mem_block *next, *prev;
+   struct mem_block *next_free, *prev_free;
+   struct mem_block *heap;
+   int ofs,size;
+   unsigned int free:1;
+   unsigned int reserved:1;
+};
+
+/* Rename the variables in the drm copy of this code so that it doesn't
+ * conflict with mesa or whoever else has copied it around.
+ */
+#define mmInit drm_mmInit
+#define mmAllocMem drm_mmAllocMem
+#define mmFreeMem drm_mmFreeMem
+#define mmFindBlock drm_mmFindBlock
+#define mmDestroy drm_mmDestroy
+#define mmDumpMemInfo drm_mmDumpMemInfo
+
+/** 
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+extern struct mem_block *mmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input:      size = size of block
+ *             align2 = 2^align2 bytes alignment
+ *             startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
+                                      int align2, int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+extern int mmFreeMem(struct mem_block *b);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
+
+/**
+ * destroy MM
+ */
+extern void mmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debuging purpose.
+ */
+extern void mmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif
index 7b67813..4b4d009 100644 (file)
@@ -113,7 +113,7 @@ static int drmDebugPrint(const char *format, va_list ap)
 
 static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint;
 
-static void
+void
 drmMsg(const char *format, ...)
 {
     va_list    ap;
@@ -174,6 +174,19 @@ static char *drmStrdup(const char *s)
     return retval;
 }
 
+/**
+ * Call ioctl, restarting if it is interupted
+ */
+static int
+drmIoctl(int fd, int request, void *arg)
+{
+    int        ret;
+
+    do {
+       ret = ioctl(fd, request, arg);
+    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+    return ret;
+}
 
 static unsigned long drmGetKeyFromFd(int fd)
 {
@@ -675,7 +688,7 @@ drmVersionPtr drmGetVersion(int fd)
     version->desc_len    = 0;
     version->desc        = NULL;
 
-    if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+    if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
        drmFreeKernelVersion(version);
        return NULL;
     }
@@ -687,7 +700,7 @@ drmVersionPtr drmGetVersion(int fd)
     if (version->desc_len)
        version->desc    = drmMalloc(version->desc_len + 1);
 
-    if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+    if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
        drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
        drmFreeKernelVersion(version);
        return NULL;
@@ -773,10 +786,10 @@ char *drmGetBusid(int fd)
     u.unique_len = 0;
     u.unique     = NULL;
 
-    if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+    if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
        return NULL;
     u.unique = drmMalloc(u.unique_len + 1);
-    if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+    if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
        return NULL;
     u.unique[u.unique_len] = '\0';
 
@@ -803,7 +816,7 @@ int drmSetBusid(int fd, const char *busid)
     u.unique     = (char *)busid;
     u.unique_len = strlen(busid);
 
-    if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
+    if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
        return -errno;
     }
     return 0;
@@ -814,7 +827,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
     drm_auth_t auth;
 
     *magic = 0;
-    if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
+    if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
        return -errno;
     *magic = auth.magic;
     return 0;
@@ -825,7 +838,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
     drm_auth_t auth;
 
     auth.magic = magic;
-    if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
+    if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
        return -errno;
     return 0;
 }
@@ -890,7 +903,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
     map.handle  = 0;
     map.type    = type;
     map.flags   = flags;
-    if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
+    if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
        return -errno;
     if (handle)
        *handle = (drm_handle_t)map.handle;
@@ -903,7 +916,7 @@ int drmRmMap(int fd, drm_handle_t handle)
 
     map.handle = (void *)handle;
 
-    if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
+    if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
        return -errno;
     return 0;
 }
@@ -936,7 +949,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
     request.flags     = flags;
     request.agp_start = agp_offset;
 
-    if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
+    if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
        return -errno;
     return request.count;
 }
@@ -949,7 +962,7 @@ int drmMarkBufs(int fd, double low, double high)
     info.count = 0;
     info.list  = NULL;
 
-    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
        return -EINVAL;
 
     if (!info.count)
@@ -958,7 +971,7 @@ int drmMarkBufs(int fd, double low, double high)
     if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
        return -ENOMEM;
 
-    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
        int retval = -errno;
        drmFree(info.list);
        return retval;
@@ -967,7 +980,7 @@ int drmMarkBufs(int fd, double low, double high)
     for (i = 0; i < info.count; i++) {
        info.list[i].low_mark  = low  * info.list[i].count;
        info.list[i].high_mark = high * info.list[i].count;
-       if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
+       if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
            int retval = -errno;
            drmFree(info.list);
            return retval;
@@ -999,7 +1012,7 @@ int drmFreeBufs(int fd, int count, int *list)
 
     request.count = count;
     request.list  = list;
-    if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
+    if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
        return -errno;
     return 0;
 }
@@ -1088,14 +1101,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)
     info.count = 0;
     info.list  = NULL;
 
-    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
        return NULL;
 
     if (info.count) {
        if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
            return NULL;
 
-       if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+       if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
            drmFree(info.list);
            return NULL;
        }
@@ -1139,7 +1152,7 @@ drmBufMapPtr drmMapBufs(int fd)
     bufs.count = 0;
     bufs.list  = NULL;
     bufs.virtual = NULL;
-    if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
+    if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
        return NULL;
 
     if (!bufs.count)
@@ -1148,7 +1161,7 @@ drmBufMapPtr drmMapBufs(int fd)
        if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
            return NULL;
 
-       if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
+       if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
            drmFree(bufs.list);
            return NULL;
        }
@@ -1263,7 +1276,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
     if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
     if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
 
-    while (ioctl(fd, DRM_IOCTL_LOCK, &lock))
+    while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
        ;
     return 0;
 }
@@ -1286,7 +1299,7 @@ int drmUnlock(int fd, drm_context_t context)
 
     lock.context = context;
     lock.flags   = 0;
-    return ioctl(fd, DRM_IOCTL_UNLOCK, &lock);
+    return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
 }
 
 drm_context_t *drmGetReservedContextList(int fd, int *count)
@@ -1298,7 +1311,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
 
     res.count    = 0;
     res.contexts = NULL;
-    if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+    if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
        return NULL;
 
     if (!res.count)
@@ -1312,7 +1325,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
     }
 
     res.contexts = list;
-    if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+    if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
        return NULL;
 
     for (i = 0; i < res.count; i++)
@@ -1351,7 +1364,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
     drm_ctx_t ctx;
 
     ctx.flags = 0;     /* Modified with functions below */
-    if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
+    if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
        return -errno;
     *handle = ctx.handle;
     return 0;
@@ -1362,7 +1375,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
     drm_ctx_t ctx;
 
     ctx.handle = context;
-    if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
+    if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
        return -errno;
     return 0;
 }
@@ -1383,7 +1396,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
        ctx.flags |= _DRM_CONTEXT_PRESERVED;
     if (flags & DRM_CONTEXT_2DONLY)
        ctx.flags |= _DRM_CONTEXT_2DONLY;
-    if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
+    if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
        return -errno;
     return 0;
 }
@@ -1394,7 +1407,7 @@ int drmGetContextFlags(int fd, drm_context_t context,
     drm_ctx_t ctx;
 
     ctx.handle = context;
-    if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
+    if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
        return -errno;
     *flags = 0;
     if (ctx.flags & _DRM_CONTEXT_PRESERVED)
@@ -1425,7 +1438,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
 {
     drm_ctx_t ctx;
     ctx.handle = handle;
-    if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
+    if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
        return -errno;
     return 0;
 }
@@ -1433,7 +1446,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
 int drmCreateDrawable(int fd, drm_drawable_t *handle)
 {
     drm_draw_t draw;
-    if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
+    if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
        return -errno;
     *handle = draw.handle;
     return 0;
@@ -1443,7 +1456,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
 {
     drm_draw_t draw;
     draw.handle = handle;
-    if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
+    if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
        return -errno;
     return 0;
 }
@@ -1459,7 +1472,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
     update.num = num;
     update.data = (unsigned long long)(unsigned long)data;
 
-    if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
+    if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
        return -errno;
 
     return 0;
@@ -1479,7 +1492,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
  */
 int drmAgpAcquire(int fd)
 {
-    if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
        return -errno;
     return 0;
 }
@@ -1497,7 +1510,7 @@ int drmAgpAcquire(int fd)
  */
 int drmAgpRelease(int fd)
 {
-    if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
        return -errno;
     return 0;
 }
@@ -1520,7 +1533,7 @@ int drmAgpEnable(int fd, unsigned long mode)
     drm_agp_mode_t m;
 
     m.mode = mode;
-    if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
        return -errno;
     return 0;
 }
@@ -1551,7 +1564,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
     b.size   = size;
     b.handle = 0;
     b.type   = type;
-    if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
        return -errno;
     if (address != 0UL)
        *address = b.physical;
@@ -1578,7 +1591,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
 
     b.size   = 0;
     b.handle = handle;
-    if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
        return -errno;
     return 0;
 }
@@ -1603,7 +1616,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
 
     b.handle = handle;
     b.offset = offset;
-    if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
        return -errno;
     return 0;
 }
@@ -1627,7 +1640,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
 
     b.handle = handle;
     b.offset = 0;
-    if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
        return -errno;
     return 0;
 }
@@ -1648,7 +1661,7 @@ int drmAgpVersionMajor(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return -errno;
     return i.agp_version_major;
 }
@@ -1669,7 +1682,7 @@ int drmAgpVersionMinor(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return -errno;
     return i.agp_version_minor;
 }
@@ -1690,7 +1703,7 @@ unsigned long drmAgpGetMode(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.mode;
 }
@@ -1711,7 +1724,7 @@ unsigned long drmAgpBase(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.aperture_base;
 }
@@ -1732,7 +1745,7 @@ unsigned long drmAgpSize(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.aperture_size;
 }
@@ -1753,7 +1766,7 @@ unsigned long drmAgpMemoryUsed(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.memory_used;
 }
@@ -1774,7 +1787,7 @@ unsigned long drmAgpMemoryAvail(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.memory_allowed;
 }
@@ -1795,7 +1808,7 @@ unsigned int drmAgpVendorId(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.id_vendor;
 }
@@ -1816,7 +1829,7 @@ unsigned int drmAgpDeviceId(int fd)
 {
     drm_agp_info_t i;
 
-    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
        return 0;
     return i.id_device;
 }
@@ -1828,7 +1841,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
     *handle = 0;
     sg.size   = size;
     sg.handle = 0;
-    if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
+    if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
        return -errno;
     *handle = sg.handle;
     return 0;
@@ -1840,7 +1853,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
 
     sg.size   = 0;
     sg.handle = handle;
-    if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
+    if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
        return -errno;
     return 0;
 }
@@ -1861,7 +1874,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
     int ret;
 
     do {
-       ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
+       ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
        vbl->request.type &= ~DRM_VBLANK_RELATIVE;
     } while (ret && errno == EINTR);
 
@@ -1911,7 +1924,7 @@ int drmCtlInstHandler(int fd, int irq)
 
     ctl.func  = DRM_INST_HANDLER;
     ctl.irq   = irq;
-    if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+    if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
        return -errno;
     return 0;
 }
@@ -1934,7 +1947,7 @@ int drmCtlUninstHandler(int fd)
 
     ctl.func  = DRM_UNINST_HANDLER;
     ctl.irq   = 0;
-    if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+    if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
        return -errno;
     return 0;
 }
@@ -1951,7 +1964,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
     if (flags & DRM_LOCK_FLUSH_ALL)  lock.flags |= _DRM_LOCK_FLUSH_ALL;
     if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
     if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
-    if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
+    if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
        return -errno;
     return 0;
 }
@@ -1977,7 +1990,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
     p.busnum  = busnum;
     p.devnum  = devnum;
     p.funcnum = funcnum;
-    if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
+    if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
        return -errno;
     return p.irq;
 }
@@ -2019,7 +2032,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
     map.ctx_id = ctx_id;
     map.handle = (void *)handle;
 
-    if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
+    if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
        return -errno;
     return 0;
 }
@@ -2031,7 +2044,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
 
     map.ctx_id = ctx_id;
 
-    if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
+    if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
        return -errno;
     if (handle)
        *handle = (drm_handle_t)map.handle;
@@ -2046,7 +2059,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
     drm_map_t map;
 
     map.offset = idx;
-    if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
+    if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
        return -errno;
     *offset = map.offset;
     *size   = map.size;
@@ -2063,7 +2076,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
     drm_client_t client;
 
     client.idx = idx;
-    if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
+    if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
        return -errno;
     *auth      = client.auth;
     *pid       = client.pid;
@@ -2078,7 +2091,7 @@ int drmGetStats(int fd, drmStatsT *stats)
     drm_stats_t s;
     int         i;
 
-    if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
+    if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
        return -errno;
 
     stats->count = 0;
@@ -2220,7 +2233,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
     sv.drm_dd_major = version->drm_dd_major;
     sv.drm_dd_minor = version->drm_dd_minor;
 
-    if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
+    if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
        retcode = -errno;
     }
 
@@ -2251,7 +2264,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
 
     request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
 
-    if (ioctl(fd, request, data)) {
+    if (drmIoctl(fd, request, data)) {
        return -errno;
     }
     return 0;
@@ -2280,7 +2293,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
     request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE, 
        DRM_COMMAND_BASE + drmCommandIndex, size);
 
-    if (ioctl(fd, request, data)) {
+    if (drmIoctl(fd, request, data)) {
        return -errno;
     }
     return 0;
@@ -2309,7 +2322,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
     request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE, 
        DRM_COMMAND_BASE + drmCommandIndex, size);
 
-    if (ioctl(fd, request, data)) {
+    if (drmIoctl(fd, request, data)) {
        return -errno;
     }
     return 0;
@@ -2338,9 +2351,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
     request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE, 
        DRM_COMMAND_BASE + drmCommandIndex, size);
 
-    if (ioctl(fd, request, data)) {
+    if (drmIoctl(fd, request, data))
        return -errno;
-    }
     return 0;
 }
 
@@ -2362,7 +2374,7 @@ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
     arg.type = type;
     arg.fence_class = fence_class;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
        return -errno;
     fence->handle = arg.handle;
     fence->fence_class = arg.fence_class;
@@ -2386,7 +2398,7 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc
     arg.flags = flags;
     arg.fence_class = fence_class;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
        return -errno;
     fence->handle = arg.handle;
     fence->fence_class = arg.fence_class;
@@ -2404,7 +2416,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
     memset(&arg, 0, sizeof(arg));
     arg.handle = handle;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
        return -errno;
     fence->handle = arg.handle;
     fence->fence_class = arg.fence_class;
@@ -2421,7 +2433,7 @@ int drmFenceUnreference(int fd, const drmFence *fence)
     memset(&arg, 0, sizeof(arg));
     arg.handle = fence->handle;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
        return -errno;
     return 0;
 }
@@ -2434,7 +2446,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
     arg.handle = fence->handle;
     arg.type = flush_type;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
        return -errno;
     fence->fence_class = arg.fence_class;
     fence->type = arg.type;
@@ -2449,7 +2461,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
     memset(&arg, 0, sizeof(arg));
     arg.handle = fence->handle;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
        return -errno;
     fence->fence_class = arg.fence_class;
     fence->type = arg.type;
@@ -2489,7 +2501,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
     arg.handle = fence->handle;
     arg.type = emit_type;
 
-    if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
        return -errno;
     fence->fence_class = arg.fence_class;
     fence->type = arg.type;
@@ -2527,7 +2539,7 @@ drmIoctlTimeout(int fd, unsigned long request, void *argp)
     int ret;
 
     do {
-       ret = ioctl(fd, request, argp);
+       ret = drmIoctl(fd, request, argp);
        if (ret != 0 && errno == EAGAIN) {
            if (!haveThen) {
                gettimeofday(&then, NULL);
@@ -2637,7 +2649,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
     memset(&arg, 0, sizeof(arg));
     req->handle = handle;
     
-    if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
        return -errno;
 
     drmBOCopyReply(rep, buf);
@@ -2661,7 +2673,7 @@ int drmBOUnreference(int fd, drmBO *buf)
     memset(&arg, 0, sizeof(arg));
     arg.handle = buf->handle;
 
-    if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
        return -errno;
 
     buf->handle = 0;
@@ -2731,7 +2743,7 @@ int drmBOUnmap(int fd, drmBO *buf)
     memset(&arg, 0, sizeof(arg));
     arg.handle = buf->handle;
 
-    if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
+    if (drmIoctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
        return -errno;
     }
     buf->mapCount--;
@@ -2777,7 +2789,7 @@ int drmBOInfo(int fd, drmBO *buf)
     memset(&arg, 0, sizeof(arg));
     req->handle = buf->handle;
 
-    ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
+    ret = drmIoctl(fd, DRM_IOCTL_BO_INFO, &arg);
     if (ret) 
        return -errno;
 
@@ -2832,7 +2844,7 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
     arg.p_size = pSize;
     arg.mem_type = memType;
 
-    if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_MM_INIT, &arg))
        return -errno;
     return 0;  
 }
@@ -2844,7 +2856,7 @@ int drmMMTakedown(int fd, unsigned memType)
     memset(&arg, 0, sizeof(arg));
     arg.mem_type = memType;
 
-    if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
        return -errno;
     return 0;  
 }
@@ -2886,7 +2898,7 @@ int drmMMInfo(int fd, unsigned memType, uint64_t *size)
     
     arg.mem_type = memType;
 
-    if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg))
+    if (drmIoctl(fd, DRM_IOCTL_MM_INFO, &arg))
        return -errno;
 
     *size = arg.p_size;
@@ -2901,7 +2913,7 @@ int drmBOVersion(int fd, unsigned int *major,
     int ret;
 
     memset(&arg, 0, sizeof(arg));
-    ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
+    ret = drmIoctl(fd, DRM_IOCTL_BO_VERSION, &arg);
     if (ret)
        return -errno;
 
index 35780ac..b29b3e5 100644 (file)
@@ -659,6 +659,7 @@ extern int  drmSLLookupNeighbors(void *l, unsigned long key,
 
 extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
 extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...);
 
 extern int drmSetMaster(int fd);
 extern int drmDropMaster(int fd);
index bb57340..a31de42 100644 (file)
@@ -94,6 +94,18 @@ typedef struct _drmMMListHead
 #define DRMLISTENTRY(__type, __item, __field)   \
     ((__type *)(((char *) (__item)) - offsetof(__type, __field)))
 
+#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
+
+#define DRMLISTFOREACHSAFE(__item, __temp, __list)                     \
+       for ((__item) = (__list)->next, (__temp) = (__item)->next;      \
+            (__item) != (__list);                                      \
+            (__item) = (__temp), (__temp) = (__item)->next)
+
+#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list)              \
+       for ((__item) = (__list)->prev, (__temp) = (__item)->prev;      \
+            (__item) != (__list);                                      \
+            (__item) = (__temp), (__temp) = (__item)->prev)
+
 typedef struct _drmFence
 {
     unsigned handle;
index 9b28885..1790bdb 100644 (file)
@@ -117,7 +117,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
 
 ifeq ($(V),"$(RUNNING_REL)")
 HEADERFROMBOOT := 1
-GETCONFIG := MAKEFILES=$(shell pwd)/.config
+GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
 HAVECONFIG := y
 endif
 
@@ -164,7 +164,7 @@ endif
 all: modules
 
 modules: includes
-       +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+       +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
 
 ifeq ($(HEADERFROMBOOT),1)
 
@@ -240,7 +240,7 @@ drmstat: drmstat.c
        $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
 
 install:
-       make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
+       make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
 
 else
 
index 246c0b3..768cd22 100644 (file)
@@ -12,16 +12,16 @@ drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
                drm_memory_debug.o ati_pcigart.o drm_sman.o \
-               drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+               drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
                drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
                drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o \
-               drm_vm_nopage_compat.o drm_crtc_helper.o
+               drm_vm_nopage_compat.o drm_crtc_helper.o drm_gem.o
 tdfx-objs   := tdfx_drv.o
 r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
 mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
 i810-objs   := i810_drv.o i810_dma.o
 i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
-               i915_buffer.o i915_execbuf.o \
+               i915_buffer.o i915_execbuf.o i915_gem.o \
                intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
                intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
                intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
diff --git a/linux-core/drm-gem.txt b/linux-core/drm-gem.txt
new file mode 100644 (file)
index 0000000..5cda87f
--- /dev/null
@@ -0,0 +1,805 @@
+                  The Graphics Execution Manager
+             Part of the Direct Rendering Manager
+                  ==============================
+                 
+                Keith Packard <keithp@keithp.com>
+                  Eric Anholt <eric@anholt.net>
+                          2008-5-9
+
+Contents:
+
+ 1. GEM Overview
+ 2. API overview and conventions
+ 3. Object Creation/Destruction
+ 4. Reading/writing contents
+ 5. Mapping objects to userspace
+ 6. Memory Domains
+ 7. Execution (Intel specific)
+ 8. Other misc Intel-specific functions
+
+1. Graphics Execution Manager Overview
+
+Gem is designed to manage graphics memory, control access to the graphics
+device execution context and handle the essentially NUMA environment unique
+to modern graphics hardware. Gem allows multiple applications to share
+graphics device resources without the need to constantly reload the entire
+graphics card. Data may be shared between multiple applications with gem
+ensuring that the correct memory synchronization occurs.
+
+Graphics data can consume arbitrary amounts of memory, with 3D applications
+constructing ever larger sets of textures and vertices. With graphics cards
+memory space growing larger every year, and graphics APIs growing more
+complex, we can no longer insist that each application save a complete copy
+of their graphics state so that the card can be re-initialized from user
+space at each context switch. Ensuring that graphics data remains persistent
+across context switches allows applications significant new functionality
+while also improving performance for existing APIs.
+
+Modern linux desktops include significant 3D rendering as a fundemental
+component of the desktop image construction process. 2D and 3D applications
+paint their content to offscreen storage and the central 'compositing
+manager' constructs the final screen image from those window contents.  This
+means that pixel image data from these applications must move within reach
+of the compositing manager and used as source operands for screen image
+rendering operations.
+
+Gem provides simple mechanisms to manage graphics data and control execution
+flow within the linux operating system. Using many existing kernel
+subsystems, it does this with a modest amount of code.
+
+2. API Overview and Conventions
+
+All APIs here are defined in terms of ioctls appplied to the DRM file
+descriptor. To create and manipulate objects, an application must be
+'authorized' using the DRI or DRI2 protocols with the X server. To relax
+that, we will need to implement some better access control mechanisms within
+the hardware portion of the driver to prevent inappropriate
+cross-application data access.
+
+Any DRM driver which does not support GEM will return -ENODEV for all of
+these ioctls. Invalid object handles return -EINVAL. Invalid object names
+return -ENOENT. Other errors are as documented in the specific API below.
+
+To avoid the need to translate ioctl contents on mixed-size systems (with
+32-bit user space running on a 64-bit kernel), the ioctl data structures
+contain explicitly sized objects, using 64-bits for all size and pointer
+data and 32-bits for identifiers. In addition, the 64-bit objects are all
+carefully aligned on 64-bit boundaries. Because of this, all pointers in the
+ioctl data structures are passed as uint64_t values. Suitable casts will
+be necessary.
+
+One significant operation which is explicitly left out of this API is object
+locking. Applications are expected to perform locking of shared objects
+outside of the GEM api. This kind of locking is not necessary to safely
+manipulate the graphics engine, and with multiple objects interacting in
+unknown ways, per-object locking would likely introduce all kinds of
+lock-order issues. Punting this to the application seems like the only
+sensible plan. Given that DRM already offers a global lock on the hardware,
+this doesn't change the current situation.
+
+3. Object Creation and Destruction
+
+Gem provides explicit memory management primitives. System pages are
+allocated when the object is created, either as the fundemental storage for
+hardware where system memory is used by the graphics processor directly, or
+as backing store for graphics-processor resident memory.
+
+Objects are referenced from user space using handles. These are, for all
+intents and purposes, equivalent to file descriptors. We could simply use
+file descriptors were it not for the small limit (1024) of file descriptors
+available to applications, and for the fact that the X server (a rather
+significant user of this API) uses 'select' and has a limited maximum file
+descriptor for that operation. Given the ability to allocate more file
+descriptors, and given the ability to place these 'higher' in the file
+descriptor space, we'd love to simply use file descriptors.
+
+Objects may be published with a name so that other applications can access
+them. The name remains valid as long as the object exists. Right now, our
+DRI APIs use 32-bit integer names, so that's what we expose here
+
+ A. Creation
+
+               struct drm_gem_create {
+                       /**
+                        * Requested size for the object.
+                        *
+                        * The (page-aligned) allocated size for the object
+                        * will be returned.
+                        */
+                       uint64_t size;
+                       /**
+                        * Returned handle for the object.
+                        *
+                        * Object handles are nonzero.
+                        */
+                       uint32_t handle;
+                       uint32_t pad;
+               };
+       
+               /* usage */
+               create.size = 16384;
+               ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create);
+               if (ret == 0)
+                       return create.handle;
+
+       Note that the size is rounded up to a page boundary, and that
+       the rounded-up size is returned in 'size'. No name is assigned to
+       this object, making it local to this process.
+
+       If insufficient memory is availabe, -ENOMEM will be returned.
+
+ B. Closing
+
+               struct drm_gem_close {
+                       /** Handle of the object to be closed. */
+                       uint32_t handle;
+                       uint32_t pad;
+               };
+               
+
+               /* usage */
+               close.handle = <handle>;
+               ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+       This call makes the specified handle invalid, and if no other
+       applications are using the object, any necessary graphics hardware
+       synchronization is performed and the resources used by the object
+       released.
+
+ C. Naming
+
+               struct drm_gem_flink {
+                       /** Handle for the object being named */
+                       uint32_t handle;
+               
+                       /** Returned global name */
+                       uint32_t name;
+               };
+               
+               /* usage */
+               flink.handle = <handle>;
+               ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink);
+               if (ret == 0)
+                       return flink.name;
+
+       Flink creates a name for the object and returns it to the
+       application. This name can be used by other applications to gain
+       access to the same object.
+
+ D. Opening by name
+
+               struct drm_gem_open {
+                       /** Name of object being opened */
+                       uint32_t name;
+               
+                       /** Returned handle for the object */
+                       uint32_t handle;
+                       
+                       /** Returned size of the object */
+                       uint64_t size;
+               };
+               
+               /* usage */
+               open.name = <name>;
+               ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open);
+               if (ret == 0) {
+                       *sizep = open.size;
+                       return open.handle;
+               }
+
+       Open accesses an existing object and returns a handle for it. If the
+       object doesn't exist, -ENOENT is returned. The size of the object is
+       also returned. This handle has all the same capabilities as the
+       handle used to create the object. In particular, the object is not
+       destroyed until all handles are closed.
+
+4. Basic read/write operations
+
+By default, gem objects are not mapped to the applications address space,
+getting data in and out of them is done with I/O operations instead. This
+allows the data to reside in otherwise unmapped pages, including pages in
+video memory on an attached discrete graphics card. In addition, using
+explicit I/O operations allows better control over cache contents, as
+graphics devices are generally not cache coherent with the CPU, mapping
+pages used for graphics into an application address space requires the use
+of expensive cache flushing operations. Providing direct control over
+graphics data access ensures that data are handled in the most efficient
+possible fashion.
+
+ A. Reading
+
+               struct drm_gem_pread {
+                       /** Handle for the object being read. */
+                       uint32_t handle;
+                       uint32_t pad;
+                       /** Offset into the object to read from */
+                       uint64_t offset;
+                       /** Length of data to read */
+                       uint64_t size;
+                       /** Pointer to write the data into. */
+                       uint64_t data_ptr;      /* void * */
+               };
+
+       This copies data into the specified object at the specified
+       position. Any necessary graphics device synchronization and
+       flushing will be done automatically.
+               
+               struct drm_gem_pwrite {
+                       /** Handle for the object being written to. */
+                       uint32_t handle;
+                       uint32_t pad;
+                       /** Offset into the object to write to */
+                       uint64_t offset;
+                       /** Length of data to write */
+                       uint64_t size;
+                       /** Pointer to read the data from. */
+                       uint64_t data_ptr;      /* void * */
+               };
+               
+       This copies data out of the specified object into the
+       waiting user memory. Again, device synchronization will
+       be handled by the kernel to ensure user space sees a
+       consistent view of the graphics device.
+
+5. Mapping objects to user space
+
+For most objects, reading/writing is the preferred interaction mode.
+However, when the CPU is involved in rendering to cover deficiencies in
+hardware support for particular operations, the CPU will want to directly
+access the relevant objects. 
+
+Because mmap is fairly heavyweight, we allow applications to retain maps to
+objects persistently and then update how they're using the memory through a
+separate interface. Applications which fail to use this separate interface
+may exhibit unpredictable behaviour as memory consistency will not be
+preserved.
+
+ A. Mapping
+
+               struct drm_gem_mmap {
+                       /** Handle for the object being mapped. */
+                       uint32_t handle;
+                       uint32_t pad;
+                       /** Offset in the object to map. */
+                       uint64_t offset;
+                       /**
+                        * Length of data to map.
+                        *
+                        * The value will be page-aligned.
+                        */
+                       uint64_t size;
+                       /** Returned pointer the data was mapped at */
+                       uint64_t addr_ptr;      /* void * */
+               };
+               
+               /* usage */
+               mmap.handle = <handle>;
+               mmap.offset = <offset>;
+               mmap.size = <size>;
+               ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap);
+               if (ret == 0)
+                       return (void *) (uintptr_t) mmap.addr_ptr;
+
+
+ B. Unmapping
+
+               munmap (addr, length);
+
+       Nothing strange here, just use the normal munmap syscall.
+
+6. Memory Domains
+
+Graphics devices remain a strong bastion of non cache-coherent memory. As a
+result, accessing data through one functional unit will end up loading that
+cache with data which then needs to be manually synchronized when that data
+is used with another functional unit.
+
+Tracking where data are resident is done by identifying how functional units
+deal with caches. Each cache is labeled as a separate memory domain. Then,
+each sequence of operations is expected to load data into various read
+domains and leave data in at most one write domain. Gem tracks the read and
+write memory domains of each object and performs the necessary
+synchronization operations when objects move from one domain set to another.
+
+For example, if operation 'A' constructs an image that is immediately used
+by operation 'B', then when the read domain for 'B' is not the same as the
+write domain for 'A', then the write domain must be flushed, and the read
+domain invalidated. If these two operations are both executed in the same
+command queue, then the flush operation can go inbetween them in the same
+queue, avoiding any kind of CPU-based synchronization and leaving the GPU to
+do the work itself.
+
+6.1 Memory Domains (GPU-independent)
+
+ * DRM_GEM_DOMAIN_CPU.
+
+ Objects in this domain are using caches which are connected to the CPU.
+ Moving objects from non-CPU domains into the CPU domain can involve waiting
+ for the GPU to finish with operations using this object. Moving objects
+ from this domain to a GPU domain can involve flushing CPU caches and chipset
+ buffers.
+
+6.1 GPU-independent memory domain ioctl
+
+This ioctl is independent of the GPU in use. So far, no use other than
+synchronizing objects to the CPU domain have been found; if that turns out
+to be generally true, this ioctl may be simplified further.
+   
+ A. Explicit domain control
+
+               struct drm_gem_set_domain {
+                       /** Handle for the object */
+                       uint32_t handle;
+               
+                       /** New read domains */
+                       uint32_t read_domains;
+               
+                       /** New write domain */
+                       uint32_t write_domain;
+               };
+
+               /* usage */
+               set_domain.handle = <handle>;
+               set_domain.read_domains = <read_domains>;
+               set_domain.write_domain = <write_domain>;
+               ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+               
+       When the application wants to explicitly manage memory domains for
+       an object, it can use this function. Usually, this is only used
+       when the application wants to synchronize object contents between
+       the GPU and CPU-based application rendering. In that case,
+       the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the
+       application were going to write to the object, the <write_domain>
+       would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem
+       guarantees that all previous rendering operations involving this
+       object are complete. The application is then free to access the
+       object through the address returned by the mmap call. Afterwards,
+       when the application again uses the object through the GPU, any
+       necessary CPU flushing will occur and the object will be correctly
+       synchronized with the GPU.
+
+       Note that this synchronization is not required for any accesses
+       going through the driver itself. The pread, pwrite and execbuffer
+       ioctls all perform the necessary domain management internally.
+       Explicit synchronization is only necessary when accessing the object
+       through the mmap'd address.
+
+7. Execution (Intel specific)
+
+Managing the command buffers is inherently chip-specific, so the core of gem
+doesn't have any intrinsic functions. Rather, execution is left to the
+device-specific portions of the driver.
+
+The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of
+which are mapped to the graphics device. The last object in the list is the
+command buffer.
+
+7.1. Relocations
+Command buffers often refer to other objects, and to allow the kernel driver
+to move objects around, a sequence of relocations is associated with each
+object. Device-specific relocation operations are used to place the
+target-object relative value into the object.
+
+The Intel driver has a single relocation type:
+
+               struct drm_i915_gem_relocation_entry {
+                       /**
+                        * Handle of the buffer being pointed to by this
+                        * relocation entry.
+                        *
+                        * It's appealing to make this be an index into the
+                        * mm_validate_entry list to refer to the buffer,
+                        * but this allows the driver to create a relocation
+                        * list for state buffers and not re-write it per
+                        * exec using the buffer.
+                        */
+                       uint32_t target_handle;
+               
+                       /**
+                        * Value to be added to the offset of the target
+                        * buffer to make up the relocation entry.
+                        */
+                       uint32_t delta;
+               
+                       /**
+                        * Offset in the buffer the relocation entry will be
+                        * written into
+                        */
+                       uint64_t offset;
+               
+                       /**
+                        * Offset value of the target buffer that the
+                        * relocation entry was last written as.
+                        *
+                        * If the buffer has the same offset as last time, we
+                        * can skip syncing and writing the relocation.  This
+                        * value is written back out by the execbuffer ioctl
+                        * when the relocation is written.
+                        */
+                       uint64_t presumed_offset;
+               
+                       /**
+                        * Target memory domains read by this operation.
+                        */
+                       uint32_t read_domains;
+               
+                       /*
+                        * Target memory domains written by this operation.
+                        *
+                        * Note that only one domain may be written by the
+                        * whole execbuffer operation, so that where there are
+                        * conflicts, the application will get -EINVAL back.
+                        */
+                       uint32_t write_domain;
+               };
+               
+       'target_handle', the handle to the target object. This object must
+       be one of the objects listed in the execbuffer request or
+       bad things will happen. The kernel doesn't check for this.
+
+       'offset' is where, in the source object, the relocation data
+       are written. Each relocation value is a 32-bit value consisting
+       of the location of the target object in the GPU memory space plus
+       the 'delta' value included in the relocation.
+
+       'presumed_offset' is where user-space believes the target object
+       lies in GPU memory space. If this value matches where the object
+       actually is, then no relocation data are written, the kernel
+       assumes that user space has set up data in the source object
+       using this presumption. This offers a fairly important optimization
+       as writing relocation data requires mapping of the source object
+       into the kernel memory space.
+
+       'read_domains' and 'write_domains' list the usage by the source
+       object of the target object. The kernel unions all of the domain
+       information from all relocations in the execbuffer request. No more
+       than one write_domain is allowed, otherwise an EINVAL error is
+       returned. read_domains must contain write_domain. This domain
+       information is used to synchronize buffer contents as described
+       above in the section on domains.
+
+7.1.1 Memory Domains (Intel specific)
+
+The Intel GPU has several internal caches which are not coherent and hence
+require explicit synchronization. Memory domains provide the necessary data
+to synchronize what is needed while leaving other cache contents intact.
+
+ * DRM_GEM_DOMAIN_I915_RENDER.
+   The GPU 3D and 2D rendering operations use a unified rendering cache, so
+   operations doing 3D painting and 2D blts will use this domain
+   
+ * DRM_GEM_DOMAIN_I915_SAMPLER
+   Textures are loaded by the sampler through a separate cache, so
+   any texture reading will use this domain. Note that the sampler
+   and renderer use different caches, so moving an object from render target
+   to texture source will require a domain transfer.
+   
+ * DRM_GEM_DOMAIN_I915_COMMAND
+   The command buffer doesn't have an explicit cache (although it does
+   read ahead quite a bit), so this domain just indicates that the object
+   needs to be flushed to the GPU.
+   
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION
+   All of the programs on Gen4 and later chips use an instruction cache to
+   speed program execution. It must be explicitly flushed when new programs
+   are written to memory by the CPU.
+
+ * DRM_GEM_DOMAIN_I915_VERTEX
+   Vertex data uses two different vertex caches, but they're
+   both flushed with the same instruction.
+
+7.2 Execution object list (Intel specific)
+
+               struct drm_i915_gem_exec_object {
+                       /**
+                        * User's handle for a buffer to be bound into the GTT
+                        * for this operation.
+                        */
+                       uint32_t handle;
+                       
+                       /**
+                        * List of relocations to be performed on this buffer
+                        */
+                       uint32_t relocation_count;
+                       /* struct drm_i915_gem_relocation_entry *relocs */
+                       uint64_t relocs_ptr;
+                       
+                       /** 
+                        * Required alignment in graphics aperture 
+                        */
+                       uint64_t alignment;
+               
+                       /**
+                        * Returned value of the updated offset of the object,
+                        * for future presumed_offset writes.
+                        */
+                       uint64_t offset;
+               };
+                       
+       Each object involved in a particular execution operation must be
+       listed using one of these structures.
+
+       'handle' references the object.
+
+       'relocs_ptr' is a user-mode pointer to a array of 'relocation_count'
+       drm_i915_gem_relocation_entry structs (see above) that
+       define the relocations necessary in this buffer. Note that all
+       relocations must reference other exec_object structures in the same
+       execbuffer ioctl and that those other buffers must come earlier in
+       the exec_object array. In other words, the dependencies mapped by the
+       exec_object relocations must form a directed acyclic graph.
+
+       'alignment' is the byte alignment necessary for this buffer. Each
+       object has specific alignment requirements, as the kernel doesn't
+       know what each object is being used for, those requirements must be
+       provided by user mode. If an object is used in two different ways,
+       it's quite possible that the alignment requirements will differ.
+
+       'offset' is a return value, receiving the location of the object
+       during this execbuffer operation. The application should use this
+       as the presumed offset in future operations; if the object does not
+       move, then kernel need not write relocation data.
+
+7.3 Execbuffer ioctl (Intel specific)
+
+               struct drm_i915_gem_execbuffer {
+                       /**
+                        * List of buffers to be validated with their
+                        * relocations to be performend on them.
+                        *
+                        * These buffers must be listed in an order such that
+                        * all relocations a buffer is performing refer to
+                        * buffers that have already appeared in the validate
+                        * list.
+                        */
+                       /* struct drm_i915_gem_validate_entry *buffers */
+                       uint64_t buffers_ptr;
+                       uint32_t buffer_count;
+               
+                       /**
+                        * Offset in the batchbuffer to start execution from.
+                        */
+                       uint32_t batch_start_offset;
+                       
+                       /**
+                        * Bytes used in batchbuffer from batch_start_offset
+                        */
+                       uint32_t batch_len;
+                       uint32_t DR1;
+                       uint32_t DR4;
+                       uint32_t num_cliprects;
+                       uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+               };
+               
+
+       'buffers_ptr' is a user-mode pointer to an array of 'buffer_count'
+       drm_i915_gem_exec_object structures which contains the complete set
+       of objects required for this execbuffer operation. The last entry in
+       this array, the 'batch buffer', is the buffer of commands which will
+       be linked to the ring and executed.
+
+       'batch_start_offset' is the byte offset within the batch buffer which
+       contains the first command to execute. So far, we haven't found a
+       reason to use anything other than '0' here, but the thought was that
+       some space might be allocated for additional initialization which
+       could be skipped in some cases. This must be a multiple of 4.
+
+       'batch_len' is the length, in bytes, of the data to be executed
+       (i.e., the amount of data after batch_start_offset). This must
+       be a multiple of 4.
+
+       'num_cliprects' and 'cliprects_ptr' reference an array of
+       drm_clip_rect structures that is num_cliprects long. The entire
+       batch buffer will be executed multiple times, once for each
+       rectangle in this list. If num_cliprects is 0, then no clipping
+       rectangle will be set.
+
+       'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE
+       command which will be queued when this operation is clipped
+       (num_cliprects != 0).
+
+               DR1 bit         definition
+               31              Fast Scissor Clip Disable (debug only).
+                               Disables a hardware optimization that
+                               improves performance. This should have
+                               no visible effect, other than reducing
+                               performance
+                               
+               30              Depth Buffer Coordinate Offset Disable.
+                               This disables the addition of the
+                               depth buffer offset bits which are used
+                               to change the location of the depth buffer
+                               relative to the front buffer.
+
+               27:26           X Dither Offset. Specifies the X pixel
+                               offset to use when accessing the dither table
+                               
+               25:24           Y Dither Offset. Specifies the Y pixel
+                               offset to use when accessing the dither
+                               table.
+
+               DR4 bit         definition
+               31:16           Drawing Rectangle Origin Y. Specifies the Y
+                               origin of coordinates relative to the
+                               draw buffer.
+
+               15:0            Drawing Rectangle Origin X. Specifies the X
+                               origin of coordinates relative to the
+                               draw buffer.
+
+       As you can see, these two fields are necessary for correctly
+       offsetting drawing within a buffer which contains multiple surfaces.
+       Note that DR1 is only used on Gen3 and earlier hardware and that
+       newer hardware sticks the dither offset elsewhere.
+
+7.3.1 Detailed Execution Description
+
+       Execution of a single batch buffer requires several preparatory
+       steps to make the objects visible to the graphics engine and resolve
+       relocations to account for their current addresses.
+
+ A. Mapping and Relocation
+
+       Each exec_object structure in the array is examined in turn. 
+       
+       If the object is not already bound to the GTT, it is assigned a
+       location in the graphics address space. If no space is available in
+       the GTT, some other object will be evicted. This may require waiting
+       for previous execbuffer requests to complete before that object can
+       be unmapped. With the location assigned, the pages for the object
+       are pinned in memory using find_or_create_page and the GTT entries
+       updated to point at the relevant pages using drm_agp_bind_pages.
+       
+       Then the array of relocations is traversed. Each relocation record
+       looks up the target object and, if the presumed offset does not
+       match the current offset (remember that this buffer has already been
+       assigned an address as it must have been mapped earlier), the
+       relocation value is computed using the current offset.  If the
+       object is currently in use by the graphics engine, writing the data
+       out must be preceeded by a delay while the object is still busy.
+       Once it is idle, then the page containing the relocation is mapped
+       by the CPU and the updated relocation data written out.
+
+       The read_domains and write_domain entries in each relocation are
+       used to compute the new read_domains and write_domain values for the
+       target buffers. The actual execution of the domain changes must wait
+       until all of the exec_object entries have been evaluated as the
+       complete set of domain information will not be available until then.
+       
+ B. Memory Domain Resolution
+
+       After all of the new memory domain data has been pulled out of the
+       relocations and computed for each object, the list of objects is
+       again traversed and the new memory domains compared against the
+       current memory domains. There are two basic operations involved here:
+
+        * Flushing the current write domain. If the new read domains
+          are not equal to the current write domain, then the current
+          write domain must be flushed. Otherwise, reads will not see data
+          present in the write domain cache. In addition, any new read domains
+          other than the current write domain must be invalidated to ensure
+          that the flushed data are re-read into their caches.
+
+        * Invaliding new read domains. Any domains which were not currently
+          used for this object must be invalidated as old objects which
+          were mapped at the same location may have stale data in the new
+          domain caches.
+
+       If the CPU cache is being invalidated and some GPU cache is being
+       flushed, then we'll have to wait for rendering to complete so that
+       any pending GPU writes will be complete before we flush the GPU
+       cache.
+
+       If the CPU cache is being flushed, then we use 'clflush' to get data
+       written from the CPU.
+
+       Because the GPU caches cannot be partially flushed or invalidated,
+       we don't actually flush them during this traversal stage. Rather, we
+       gather the invalidate and flush bits up in the device structure.
+
+       Once all of the object domain changes have been evaluated, then the
+       gathered invalidate and flush bits are examined. For any GPU flush
+       operations, we emit a single MI_FLUSH command that performs all of
+       the necessary flushes. We then look to see if the CPU cache was
+       flushed. If so, we use the chipset flush magic (writing to a special
+       page) to get the data out of the chipset and into memory.
+
+ C. Queuing Batch Buffer to the Ring
+
+       With all of the objects resident in graphics memory space, and all
+       of the caches prepared with appropriate data, the batch buffer
+       object can be queued to the ring. If there are clip rectangles, then
+       the buffer is queued once per rectangle, with suitable clipping
+       inserted into the ring just before the batch buffer.
+
+ D. Creating an IRQ Cookie
+
+       Right after the batch buffer is placed in the ring, a request to
+       generate an IRQ is added to the ring along with a command to write a
+       marker into memory. When the IRQ fires, the driver can look at the
+       memory location to see where in the ring the GPU has passed. This
+       magic cookie value is stored in each object used in this execbuffer
+       command; it is used whereever you saw 'wait for rendering' above in
+       this document.
+
+ E. Writing back the new object offsets
+
+       So that the application has a better idea what to use for
+       'presumed_offset' values later, the current object offsets are
+       written back to the exec_object structures.
+
+
+8. Other misc Intel-specific functions.
+
+To complete the driver, a few other functions were necessary.
+
+8.1 Initialization from the X server
+
+As the X server is currently responsible for apportioning memory between 2D
+and 3D, it must tell the kernel which region of the GTT aperture is
+available for 3D objects to be mapped into.
+
+               struct drm_i915_gem_init {
+                       /**
+                        * Beginning offset in the GTT to be managed by the
+                        * DRM memory manager.
+                        */
+                       uint64_t gtt_start;
+                       /**
+                        * Ending offset in the GTT to be managed by the DRM
+                        * memory manager.
+                        */
+                       uint64_t gtt_end;
+               };
+               /* usage */
+               init.gtt_start = <gtt_start>;
+               init.gtt_end = <gtt_end>;
+               ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init);
+
+       The GTT aperture between gtt_start and gtt_end will be used to map
+       objects. This also tells the kernel that the ring can be used,
+       pulling the ring addresses from the device registers.
+
+8.2 Pinning objects in the GTT
+
+For scan-out buffers and the current shared depth and back buffers, we need
+to have them always available in the GTT, at least for now. Pinning means to
+lock their pages in memory along with keeping them at a fixed offset in the
+graphics aperture. These operations are available only to root.
+               
+               struct drm_i915_gem_pin {
+                       /** Handle of the buffer to be pinned. */
+                       uint32_t handle;
+                       uint32_t pad;
+                       
+                       /** alignment required within the aperture */
+                       uint64_t alignment;
+               
+                       /** Returned GTT offset of the buffer. */
+                       uint64_t offset;
+               };
+
+               /* usage */
+               pin.handle = <handle>;
+               pin.alignment = <alignment>;
+               ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+               if (ret == 0)
+                       return pin.offset;
+
+       Pinning an object ensures that it will not be evicted from the GTT
+       or moved. It will stay resident until destroyed or unpinned.
+               
+               struct drm_i915_gem_unpin {
+                       /** Handle of the buffer to be unpinned. */
+                       uint32_t handle;
+                       uint32_t pad;
+               };
+               
+               /* usage */
+               unpin.handle = <handle>;
+               ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+               
+       Unpinning an object makes it possible to evict this object from the
+       GTT. It doesn't ensure that it will be evicted, just that it may.
+
index 5b2d782..708b4fa 100644 (file)
@@ -54,6 +54,8 @@
 #include <linux/smp_lock.h>    /* For (un)lock_kernel */
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kref.h>
 #include <linux/pagemap.h>
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
 #include <linux/mutex.h>
 struct drm_device;
 struct drm_file;
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+typedef unsigned long uintptr_t;
+#endif
+
 /* If you want the memory alloc debug functionality, change define below */
 /* #define DEBUG_MEMORY */
 
@@ -108,7 +114,7 @@ struct drm_file;
 #define DRIVER_DMA_QUEUE   0x100
 #define DRIVER_FB_DMA      0x200
 #define DRIVER_MODESET     0x400
-
+#define DRIVER_GEM        0x800
 
 /*@}*/
 
@@ -427,6 +433,11 @@ struct drm_file {
 
        struct list_head refd_objects;
 
+       /** Mapping of mm object handles to object pointers. */
+       struct idr object_idr;
+       /** Lock for synchronization of access to object_idr. */
+       spinlock_t table_lock;
+
        struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
        struct file *filp;
        void *driver_priv;
@@ -469,6 +480,11 @@ struct drm_lock_data {
        uint32_t kernel_waiters;
        uint32_t user_waiters;
        int idle_has_lock;
+       /**
+        * Boolean signaling that the lock is held on behalf of the
+        * file_priv client by the kernel in an ioctl handler.
+        */
+       int kernel_held;
 };
 
 /**
@@ -544,17 +560,17 @@ struct drm_sigdata {
  * Generic memory manager structs
  */
 
-struct drm_mm_node {
+struct drm_memrange_node {
        struct list_head fl_entry;
        struct list_head ml_entry;
        int free;
        unsigned long start;
        unsigned long size;
-       struct drm_mm *mm;
+       struct drm_memrange *mm;
        void *private;
 };
 
-struct drm_mm {
+struct drm_memrange {
        struct list_head fl_entry;
        struct list_head ml_entry;
 };
@@ -568,9 +584,9 @@ struct drm_map_list {
        struct drm_hash_item hash;
        struct drm_map *map;                    /**< mapping */
        uint64_t user_token;
-       struct drm_mm_node *file_offset_node;
        struct drm_master *master; /** if this map is associated with a specific
                                       master */
+       struct drm_memrange_node *file_offset_node;
 };
 
 typedef struct drm_map drm_local_map_t;
@@ -618,6 +634,56 @@ struct drm_ati_pcigart_info {
        int table_size;
 };
 
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+       /** Reference count of this object */
+       struct kref refcount;
+
+       /** Handle count of this object. Each handle also holds a reference */
+       struct kref handlecount;
+
+       /** Related drm device */
+       struct drm_device *dev;
+       
+       /** File representing the shmem storage */
+       struct file *filp;
+
+       /**
+        * Size of the object, in bytes.  Immutable over the object's
+        * lifetime.
+        */
+       size_t size;
+
+       /**
+        * Global name for this object, starts at 1. 0 means unnamed.
+        * Access is covered by the object_name_lock in the related drm_device
+        */
+       int name;
+
+       /**
+        * Memory domains. These monitor which caches contain read/write data
+        * related to the object. When transitioning from one set of domains
+        * to another, the driver is called to ensure that caches are suitably
+        * flushed and invalidated
+        */
+       uint32_t        read_domains;
+       uint32_t        write_domain;
+
+       /**
+        * While validating an exec operation, the
+        * new read/write domain values are computed here.
+        * They will be transferred to the above values
+        * at the point that any cache flushing occurs
+        */
+       uint32_t        pending_read_domains;
+       uint32_t        pending_write_domain;
+
+       void *driver_private;
+};
+
 #include "drm_objects.h"
 #include "drm_crtc.h"
 
@@ -746,6 +812,18 @@ struct drm_driver {
        int (*master_create)(struct drm_device *dev, struct drm_master *master);
        void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
 
+       int (*proc_init)(struct drm_minor *minor);
+       void (*proc_cleanup)(struct drm_minor *minor);
+
+       /**
+        * Driver-specific constructor for drm_gem_objects, to set up
+        * obj->driver_private.
+        *
+        * Returns 0 on success.
+        */
+       int (*gem_init_object) (struct drm_gem_object *obj);
+       void (*gem_free_object) (struct drm_gem_object *obj);
+
        struct drm_fence_driver *fence_driver;
        struct drm_bo_driver *bo_driver;
 
@@ -827,7 +905,7 @@ struct drm_device {
        struct list_head maplist;       /**< Linked list of regions */
        int map_count;                  /**< Number of mappable regions */
        struct drm_open_hash map_hash;       /**< User token hash table for maps */
-       struct drm_mm offset_manager;        /**< User token manager */
+       struct drm_memrange offset_manager;  /**< User token manager */
        struct drm_open_hash object_hash;    /**< User token hash table for objects */
        struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
        struct page *ttm_dummy_page;
@@ -943,6 +1021,21 @@ struct drm_device {
 
        /* DRM mode setting */
        struct drm_mode_config mode_config;
+
+       /** \name GEM information */
+       /*@{ */
+       spinlock_t object_name_lock;
+       struct idr object_name_idr;
+       atomic_t object_count;
+       atomic_t object_memory;
+       atomic_t pin_count;
+       atomic_t pin_memory;
+       atomic_t gtt_count;
+       atomic_t gtt_memory;
+       uint32_t gtt_total;
+       uint32_t invalidate_domains;    /* domains pending invalidation */
+       uint32_t flush_domains;         /* domains pending flush */
+       /*@} */
 };
 
 #if __OS_HAS_AGP
@@ -1069,6 +1162,10 @@ extern void drm_free_pages(unsigned long address, int order, int area);
 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+                                             struct page **pages,
+                                             unsigned long num_pages,
+                                             uint32_t gtt_offset);
 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 
 extern void drm_free_memctl(size_t size);
@@ -1273,7 +1370,7 @@ extern void drm_put_master(struct drm_master *master);
 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                     struct drm_driver *driver);
 extern int drm_put_dev(struct drm_device *dev);
-extern int drm_put_minor(struct drm_minor **minor);
+extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p);
 extern unsigned int drm_debug; /* 1 to enable debug output */
 
 extern struct class *drm_class;
@@ -1317,27 +1414,94 @@ extern int drm_sysfs_connector_add(struct drm_connector *connector);
 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
 
 /*
- * Basic memory manager support (drm_mm.c)
+ * Basic memory manager support (drm_memrange.c)
  */
 
-extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
-                                              unsigned alignment);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
-                                               unsigned alignment, int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(struct drm_mm *mm);
-extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
-extern void drm_mm_print(struct drm_mm *mm, const char *name);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
+                                                       unsigned long size,
+                                                       unsigned alignment);
+extern void drm_memrange_put_block(struct drm_memrange_node *cur);
+extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
+                                                         unsigned long size,
+                                                         unsigned alignment, int best_match);
+extern int drm_memrange_init(struct drm_memrange *mm,
+                            unsigned long start, unsigned long size);
+extern void drm_memrange_takedown(struct drm_memrange *mm);
+extern int drm_memrange_clean(struct drm_memrange *mm);
+extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
+extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
+                                              unsigned long size);
+extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
+                                         unsigned long size);
+static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
 {
        return block->mm;
 }
 
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int
+drm_gem_init (struct drm_device *dev);
+
+void
+drm_gem_object_free (struct kref *kref);
+
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size);
+
+void
+drm_gem_object_handle_free (struct kref *kref);
+    
+static inline void drm_gem_object_reference(struct drm_gem_object *obj)
+{
+       kref_get(&obj->refcount);
+}
+
+static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+       if (obj == NULL)
+               return;
+
+       kref_put (&obj->refcount, drm_gem_object_free);
+}
+
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                     struct drm_gem_object *obj,
+                     int *handlep);
+
+static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
+{
+       drm_gem_object_reference (obj);
+       kref_get(&obj->handlecount);
+}
+
+static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
+{
+       if (obj == NULL)
+               return;
+       
+       /*
+        * Must bump handle count first as this may be the last
+        * ref, in which case the object would disappear before we
+        * checked for a name
+        */
+       kref_put (&obj->handlecount, drm_gem_object_handle_free);
+       drm_gem_object_unreference (obj);
+}
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+                     int handle);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
 extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
 
index 0aa94a7..3cc94ff 100644 (file)
@@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
        return agp_unbind_memory(handle);
 }
 
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+                  struct page **pages,
+                  unsigned long num_pages,
+                  uint32_t gtt_offset)
+{
+       DRM_AGP_MEM *mem;
+       int ret, i;
 
+       DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+       mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
+#else
+       mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+                                     AGP_USER_MEMORY);
+#endif
+       if (mem == NULL) {
+               DRM_ERROR("Failed to allocate memory for %ld pages\n",
+                         num_pages);
+               return NULL;
+       }
+
+       for (i = 0; i < num_pages; i++)
+               mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+       mem->page_count = num_pages;
+
+       mem->is_flushed = true;
+       ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+       if (ret != 0) {
+               DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+               agp_free_memory(mem);
+               return NULL;
+       }
+
+       return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
 
 /*
  * AGP ttm backend interface.
@@ -554,7 +597,7 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
        int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
 
        DRM_DEBUG("drm_agp_bind_ttm\n");
-       mem->is_flushed = TRUE;
+       mem->is_flushed = true;
        mem->type = AGP_USER_MEMORY;
        /* CACHED MAPPED implies not snooped memory */
        if (snooped)
@@ -653,7 +696,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
        agp_be->mem = NULL;
 
        agp_be->bridge = dev->agp->bridge;
-       agp_be->populated = FALSE;
+       agp_be->populated = false;
        agp_be->backend.func = &agp_ttm_backend;
        agp_be->backend.dev = dev;
 
index f2d3ceb..2e0d124 100644 (file)
@@ -418,14 +418,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
        if (!bo->fence) {
                list_del_init(&bo->lru);
                if (bo->mem.mm_node) {
-                       drm_mm_put_block(bo->mem.mm_node);
+                       drm_memrange_put_block(bo->mem.mm_node);
                        if (bo->pinned_node == bo->mem.mm_node)
                                bo->pinned_node = NULL;
                        bo->mem.mm_node = NULL;
                }
                list_del_init(&bo->pinned_lru);
                if (bo->pinned_node) {
-                       drm_mm_put_block(bo->pinned_node);
+                       drm_memrange_put_block(bo->pinned_node);
                        bo->pinned_node = NULL;
                }
                list_del_init(&bo->ddestroy);
@@ -790,7 +790,7 @@ out:
        mutex_lock(&dev->struct_mutex);
        if (evict_mem.mm_node) {
                if (evict_mem.mm_node != bo->pinned_node)
-                       drm_mm_put_block(evict_mem.mm_node);
+                       drm_memrange_put_block(evict_mem.mm_node);
                evict_mem.mm_node = NULL;
        }
        drm_bo_add_to_lru(bo);
@@ -809,7 +809,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
                                  struct drm_bo_mem_reg *mem,
                                  uint32_t mem_type, int no_wait)
 {
-       struct drm_mm_node *node;
+       struct drm_memrange_node *node;
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_buffer_object *entry;
        struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -819,7 +819,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
 
        mutex_lock(&dev->struct_mutex);
        do {
-               node = drm_mm_search_free(&man->manager, num_pages,
+               node = drm_memrange_search_free(&man->manager, num_pages,
                                          mem->page_alignment, 1);
                if (node)
                        break;
@@ -845,7 +845,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
                return -ENOMEM;
        }
 
-       node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+       node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
        if (unlikely(!node)) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOMEM;
@@ -923,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
        int type_found = 0;
        int type_ok = 0;
        int has_eagain = 0;
-       struct drm_mm_node *node = NULL;
+       struct drm_memrange_node *node = NULL;
        int ret;
 
        mem->mm_node = NULL;
@@ -951,10 +951,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
                mutex_lock(&dev->struct_mutex);
                if (man->has_type && man->use_type) {
                        type_found = 1;
-                       node = drm_mm_search_free(&man->manager, mem->num_pages,
+                       node = drm_memrange_search_free(&man->manager, mem->num_pages,
                                                  mem->page_alignment, 1);
                        if (node)
-                               node = drm_mm_get_block(node, mem->num_pages,
+                               node = drm_memrange_get_block(node, mem->num_pages,
                                                        mem->page_alignment);
                }
                mutex_unlock(&dev->struct_mutex);
@@ -1339,7 +1339,7 @@ out_unlock:
        if (ret || !move_unfenced) {
                if (mem.mm_node) {
                        if (mem.mm_node != bo->pinned_node)
-                               drm_mm_put_block(mem.mm_node);
+                               drm_memrange_put_block(mem.mm_node);
                        mem.mm_node = NULL;
                }
                drm_bo_add_to_lru(bo);
@@ -1431,7 +1431,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
 
                if (bo->pinned_node != bo->mem.mm_node) {
                        if (bo->pinned_node != NULL)
-                               drm_mm_put_block(bo->pinned_node);
+                               drm_memrange_put_block(bo->pinned_node);
                        bo->pinned_node = bo->mem.mm_node;
                }
 
@@ -1442,7 +1442,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
                mutex_lock(&dev->struct_mutex);
 
                if (bo->pinned_node != bo->mem.mm_node)
-                       drm_mm_put_block(bo->pinned_node);
+                       drm_memrange_put_block(bo->pinned_node);
 
                list_del_init(&bo->pinned_lru);
                bo->pinned_node = NULL;
@@ -2081,7 +2081,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
                if (bo->pinned_node == bo->mem.mm_node)
                        bo->pinned_node = NULL;
                if (bo->pinned_node != NULL) {
-                       drm_mm_put_block(bo->pinned_node);
+                       drm_memrange_put_block(bo->pinned_node);
                        bo->pinned_node = NULL;
                }
                mutex_unlock(&dev->struct_mutex);
@@ -2222,8 +2222,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
                drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
                drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
 
-               if (drm_mm_clean(&man->manager)) {
-                       drm_mm_takedown(&man->manager);
+               if (drm_memrange_clean(&man->manager)) {
+                       drm_memrange_takedown(&man->manager);
                } else {
                        ret = -EBUSY;
                }
@@ -2294,7 +2294,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type,
                        DRM_ERROR("Zero size memory manager type %d\n", type);
                        return ret;
                }
-               ret = drm_mm_init(&man->manager, p_offset, p_size);
+               ret = drm_memrange_init(&man->manager, p_offset, p_size);
                if (ret)
                        return ret;
        }
@@ -2721,7 +2721,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
                list->user_token = 0;
        }
        if (list->file_offset_node) {
-               drm_mm_put_block(list->file_offset_node);
+               drm_memrange_put_block(list->file_offset_node);
                list->file_offset_node = NULL;
        }
 
@@ -2764,7 +2764,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
        atomic_inc(&bo->usage);
        map->handle = (void *)bo;
 
-       list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+       list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
                                                    bo->mem.num_pages, 0, 0);
 
        if (unlikely(!list->file_offset_node)) {
@@ -2772,7 +2772,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
                return -ENOMEM;
        }
 
-       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+       list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
                                                  bo->mem.num_pages, 0);
 
        if (unlikely(!list->file_offset_node)) {
index 5c290af..9147a47 100644 (file)
@@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
 
        if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
                mutex_lock(&bo->dev->struct_mutex);
-               drm_mm_put_block(old_mem->mm_node);
+               drm_memrange_put_block(old_mem->mm_node);
                mutex_unlock(&bo->dev->struct_mutex);
        }
        old_mem->mm_node = NULL;
index f35821b..cfa4fc6 100644 (file)
@@ -353,7 +353,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
 #endif
 
 
-#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
 #define DRM_KMAP_ATOMIC_PROT_PFN
 extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
                                  pgprot_t protection);
index ec76aa9..6f16dad 100644 (file)
@@ -95,7 +95,7 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector, ui
        }
        
        
-       drm_mode_prune_invalid(dev, &connector->modes, TRUE);
+       drm_mode_prune_invalid(dev, &connector->modes, true);
        
        if (list_empty(&connector->modes)) {
                struct drm_display_mode *stdmode;
index 4e7c531..9113fa5 100644 (file)
@@ -175,6 +175,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
 
        DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
 };
 
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
@@ -424,12 +428,13 @@ static void drm_cleanup(struct drm_device * dev)
 
        drm_ctxbitmap_cleanup(dev);
        drm_ht_remove(&dev->map_hash);
-       drm_mm_takedown(&dev->offset_manager);
+       drm_memrange_takedown(&dev->offset_manager);
        drm_ht_remove(&dev->object_hash);
 
-       drm_put_minor(&dev->primary);
+       drm_put_minor(dev, &dev->primary);
        if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(&dev->control);
+               drm_put_minor(dev, &dev->control);
+
        if (drm_put_dev(dev))
                DRM_ERROR("Cannot unload module\n");
 }
index 03881ee..3b3a0a3 100644 (file)
@@ -262,6 +262,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
                goto out_free;
        }
 
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_open(dev, priv);
+
        if (dev->driver->open) {
                ret = dev->driver->open(dev, priv);
                if (ret < 0)
@@ -462,6 +465,9 @@ int drm_release(struct inode *inode, struct file *filp)
                }
        }
 
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_release(dev, file_priv);
+
        drm_fasync(-1, filp, 0);
 
        mutex_lock(&dev->ctxlist_mutex);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
new file mode 100644 (file)
index 0000000..434155b
--- /dev/null
@@ -0,0 +1,420 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+       spin_lock_init(&dev->object_name_lock);
+       idr_init(&dev->object_name_idr);
+       atomic_set(&dev->object_count, 0);
+       atomic_set(&dev->object_memory, 0);
+       atomic_set(&dev->pin_count, 0);
+       atomic_set(&dev->pin_memory, 0);
+       atomic_set(&dev->gtt_count, 0);
+       atomic_set(&dev->gtt_memory, 0);
+       return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+       struct drm_gem_object *obj;
+
+       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+       obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+       obj->dev = dev;
+       obj->filp = shmem_file_setup("drm mm object", size, 0);
+       if (IS_ERR(obj->filp)) {
+               kfree(obj);
+               return NULL;
+       }
+
+       kref_init(&obj->refcount);
+       kref_init(&obj->handlecount);
+       obj->size = size;
+       if (dev->driver->gem_init_object != NULL &&
+           dev->driver->gem_init_object(obj) != 0) {
+               fput(obj->filp);
+               kfree(obj);
+               return NULL;
+       }
+       atomic_inc(&dev->object_count);
+       atomic_add(obj->size, &dev->object_memory);
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+       struct drm_device *dev;
+       struct drm_gem_object *obj;
+
+       /* This is gross. The idr system doesn't let us try a delete and
+        * return an error code.  It just spews if you fail at deleting.
+        * So, we have to grab a lock around finding the object and then
+        * doing the delete on it and dropping the refcount, or the user
+        * could race us to double-decrement the refcount and cause a
+        * use-after-free later.  Given the frequency of our handle lookups,
+        * we may want to use ida for number allocation and a hash table
+        * for the pointers, anyway.
+        */
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return -EINVAL;
+       }
+       dev = obj->dev;
+
+       /* Release reference and decrement refcount. */
+       idr_remove(&filp->object_idr, handle);
+       spin_unlock(&filp->table_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      int *handlep)
+{
+       int     ret;
+
+       /*
+        * Get the user-visible handle using idr.
+        */
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+               return -ENOMEM;
+
+       /* do the allocation under our spinlock */
+       spin_lock(&file_priv->table_lock);
+       ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+       spin_unlock(&file_priv->table_lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       if (ret != 0)
+               return ret;
+
+       drm_gem_object_handle_reference(obj);
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+                     int handle)
+{
+       struct drm_gem_object *obj;
+
+       spin_lock(&filp->table_lock);
+
+       /* Check if we currently have a reference on the object */
+       obj = idr_find(&filp->object_idr, handle);
+       if (obj == NULL) {
+               spin_unlock(&filp->table_lock);
+               return NULL;
+       }
+
+       drm_gem_object_reference(obj);
+
+       spin_unlock(&filp->table_lock);
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_close *args = data;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       ret = drm_gem_handle_delete(file_priv, args->handle);
+
+       return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_gem_flink *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+
+again:
+       if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+               return -ENOMEM;
+
+       spin_lock(&dev->object_name_lock);
+       if (obj->name) {
+               spin_unlock(&dev->object_name_lock);
+               return -EEXIST;
+       }
+       ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+                                &obj->name);
+       spin_unlock(&dev->object_name_lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       if (ret != 0) {
+               mutex_lock(&dev->struct_mutex);
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       /*
+        * Leave the reference from the lookup around as the
+        * name table now holds one
+        */
+       args->name = (uint64_t) obj->name;
+
+       return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_gem_open *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+       int handle;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       spin_lock(&dev->object_name_lock);
+       obj = idr_find(&dev->object_name_idr, (int) args->name);
+       if (obj)
+               drm_gem_object_reference(obj);
+       spin_unlock(&dev->object_name_lock);
+       if (!obj)
+               return -ENOENT;
+
+       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+       args->size = obj->size;
+
+       return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+       idr_init(&file_private->object_idr);
+       spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+
+       drm_gem_object_handle_unreference(obj);
+
+       return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+       mutex_lock(&dev->struct_mutex);
+       idr_for_each(&file_private->object_idr,
+                    &drm_gem_object_release_handle, NULL);
+
+       idr_destroy(&file_private->object_idr);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+       struct drm_device *dev = obj->dev;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (dev->driver->gem_free_object != NULL)
+               dev->driver->gem_free_object(obj);
+
+       fput(obj->filp);
+       atomic_dec(&dev->object_count);
+       atomic_sub(obj->size, &dev->object_memory);
+       kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+       struct drm_gem_object *obj = container_of(kref,
+                                                 struct drm_gem_object,
+                                                 handlecount);
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       spin_lock(&dev->object_name_lock);
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               spin_unlock(&dev->object_name_lock);
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+                */
+               drm_gem_object_unreference(obj);
+       } else
+               spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
index e1c9305..0dfbe57 100644 (file)
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
            p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
                return -EINVAL;
 
-       p->irq = dev->irq;
+       p->irq = dev->pdev->irq;
 
        DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
                  p->irq);
@@ -133,6 +133,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 
        setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
                    (unsigned long)dev);
+       init_timer_deferrable(&dev->vblank_disable_timer);
        spin_lock_init(&dev->vbl_lock);
        atomic_set(&dev->vbl_signal_pending, 0);
        dev->num_crtcs = num_crtcs;
@@ -284,7 +285,7 @@ int drm_irq_install(struct drm_device * dev)
        if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                return -EINVAL;
 
-       if (dev->irq == 0)
+       if (dev->pdev->irq == 0)
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
@@ -302,7 +303,7 @@ int drm_irq_install(struct drm_device * dev)
        dev->irq_enabled = 1;
        mutex_unlock(&dev->struct_mutex);
 
-       DRM_DEBUG("irq=%d\n", dev->irq);
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
        /* Before installing handler */
        dev->driver->irq_preinstall(dev);
@@ -311,7 +312,7 @@ int drm_irq_install(struct drm_device * dev)
        if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
                sh_flags = IRQF_SHARED;
 
-       ret = request_irq(dev->irq, dev->driver->irq_handler,
+       ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
                          sh_flags, dev->devname, dev);
        if (ret < 0) {
                mutex_lock(&dev->struct_mutex);
@@ -319,6 +320,10 @@ int drm_irq_install(struct drm_device * dev)
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
+       /* Expose the device irq to device drivers that want to export it for
+        * whatever reason.
+        */
+       dev->irq = dev->pdev->irq;
 
        /* After installing handler */
        ret = dev->driver->irq_postinstall(dev);
@@ -354,11 +359,11 @@ int drm_irq_uninstall(struct drm_device * dev)
        if (!irq_enabled)
                return -EINVAL;
 
-       DRM_DEBUG("irq=%d\n", dev->irq);
+       DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
        dev->driver->irq_uninstall(dev);
 
-       free_irq(dev->irq, dev);
+       free_irq(dev->pdev->irq, dev);
 
        drm_vblank_cleanup(dev);
 
@@ -396,7 +401,7 @@ int drm_control(struct drm_device *dev, void *data,
                if (drm_core_check_feature(dev, DRIVER_MODESET))
                        return 0;
                if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-                   ctl->irq != dev->irq)
+                   ctl->irq != dev->pdev->irq)
                        return -EINVAL;
                return drm_irq_install(dev);
        case DRM_UNINST_HANDLER:
@@ -579,7 +584,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
        int ret = 0;
        unsigned int flags, seq, crtc;
 
-       if ((!dev->irq) || (!dev->irq_enabled))
+       if ((!dev->pdev->irq) || (!dev->irq_enabled))
                return -EINVAL;
 
        if (vblwait->request.type &
index 6e90e97..59f95e4 100644 (file)
@@ -389,7 +389,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
 }
 EXPORT_SYMBOL(drm_idlelock_release);
 
-
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
        struct drm_master *master = file_priv->master;
index 75f5b52..4b494f9 100644 (file)
@@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
        return drm_agp_free_memory(handle) ? 0 : -EINVAL;
 }
+EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
        return drm_agp_unbind_memory(handle);
 }
+EXPORT_SYMBOL(drm_unbind_agp);
 
 #else  /* __OS_HAS_AGP*/
 static void *agp_remap(unsigned long offset, unsigned long size,
similarity index 69%
rename from linux-core/drm_mm.c
rename to linux-core/drm_memrange.c
index 28726a6..5921eff 100644 (file)
 #include "drmP.h"
 #include <linux/slab.h>
 
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
+unsigned long drm_memrange_tail_space(struct drm_memrange *mm)
 {
        struct list_head *tail_node;
-       struct drm_mm_node *entry;
+       struct drm_memrange_node *entry;
 
        tail_node = mm->ml_entry.prev;
-       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
        if (!entry->free)
                return 0;
 
        return entry->size;
 }
 
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size)
 {
        struct list_head *tail_node;
-       struct drm_mm_node *entry;
+       struct drm_memrange_node *entry;
 
        tail_node = mm->ml_entry.prev;
-       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
        if (!entry->free)
                return -ENOMEM;
 
@@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
 }
 
 
-static int drm_mm_create_tail_node(struct drm_mm *mm,
+static int drm_memrange_create_tail_node(struct drm_memrange *mm,
                            unsigned long start,
                            unsigned long size)
 {
-       struct drm_mm_node *child;
+       struct drm_memrange_node *child;
 
-       child = (struct drm_mm_node *)
+       child = (struct drm_memrange_node *)
                drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
        if (!child)
                return -ENOMEM;
@@ -98,26 +98,26 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
 }
 
 
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size)
 {
        struct list_head *tail_node;
-       struct drm_mm_node *entry;
+       struct drm_memrange_node *entry;
 
        tail_node = mm->ml_entry.prev;
-       entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+       entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
        if (!entry->free) {
-               return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+               return drm_memrange_create_tail_node(mm, entry->start + entry->size, size);
        }
        entry->size += size;
        return 0;
 }
 
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent,
                                            unsigned long size)
 {
-       struct drm_mm_node *child;
+       struct drm_memrange_node *child;
 
-       child = (struct drm_mm_node *)
+       child = (struct drm_memrange_node *)
                drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
        if (!child)
                return NULL;
@@ -137,19 +137,19 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
        return child;
 }
 
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
                                unsigned long size, unsigned alignment)
 {
 
-       struct drm_mm_node *align_splitoff = NULL;
-       struct drm_mm_node *child;
+       struct drm_memrange_node *align_splitoff = NULL;
+       struct drm_memrange_node *child;
        unsigned tmp = 0;
 
        if (alignment)
                tmp = parent->start % alignment;
 
        if (tmp) {
-               align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+               align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp);
                if (!align_splitoff)
                        return NULL;
        }
@@ -159,40 +159,41 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
                parent->free = 0;
                return parent;
        } else {
-               child = drm_mm_split_at_start(parent, size);
+               child = drm_memrange_split_at_start(parent, size);
        }
 
        if (align_splitoff)
-               drm_mm_put_block(align_splitoff);
+               drm_memrange_put_block(align_splitoff);
 
        return child;
 }
+EXPORT_SYMBOL(drm_memrange_get_block);
 
 /*
  * Put a block. Merge with the previous and / or next block if they are free.
  * Otherwise add to the free stack.
  */
 
-void drm_mm_put_block(struct drm_mm_node * cur)
+void drm_memrange_put_block(struct drm_memrange_node * cur)
 {
 
-       struct drm_mm *mm = cur->mm;
+       struct drm_memrange *mm = cur->mm;
        struct list_head *cur_head = &cur->ml_entry;
        struct list_head *root_head = &mm->ml_entry;
-       struct drm_mm_node *prev_node = NULL;
-       struct drm_mm_node *next_node;
+       struct drm_memrange_node *prev_node = NULL;
+       struct drm_memrange_node *next_node;
 
        int merged = 0;
 
        if (cur_head->prev != root_head) {
-               prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+               prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry);
                if (prev_node->free) {
                        prev_node->size += cur->size;
                        merged = 1;
                }
        }
        if (cur_head->next != root_head) {
-               next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+               next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry);
                if (next_node->free) {
                        if (merged) {
                                prev_node->size += next_node->size;
@@ -215,16 +216,16 @@ void drm_mm_put_block(struct drm_mm_node * cur)
                drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
        }
 }
-EXPORT_SYMBOL(drm_mm_put_block);
+EXPORT_SYMBOL(drm_memrange_put_block);
 
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm,
                                  unsigned long size,
                                  unsigned alignment, int best_match)
 {
        struct list_head *list;
        const struct list_head *free_stack = &mm->fl_entry;
-       struct drm_mm_node *entry;
-       struct drm_mm_node *best;
+       struct drm_memrange_node *entry;
+       struct drm_memrange_node *best;
        unsigned long best_size;
        unsigned wasted;
 
@@ -232,7 +233,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
        best_size = ~0UL;
 
        list_for_each(list, free_stack) {
-               entry = list_entry(list, struct drm_mm_node, fl_entry);
+               entry = list_entry(list, struct drm_memrange_node, fl_entry);
                wasted = 0;
 
                if (entry->size < size)
@@ -257,30 +258,31 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
 
        return best;
 }
+EXPORT_SYMBOL(drm_memrange_search_free);
 
-int drm_mm_clean(struct drm_mm * mm)
+int drm_memrange_clean(struct drm_memrange * mm)
 {
        struct list_head *head = &mm->ml_entry;
 
        return (head->next->next == head);
 }
 
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size)
 {
        INIT_LIST_HEAD(&mm->ml_entry);
        INIT_LIST_HEAD(&mm->fl_entry);
 
-       return drm_mm_create_tail_node(mm, start, size);
+       return drm_memrange_create_tail_node(mm, start, size);
 }
 
-EXPORT_SYMBOL(drm_mm_init);
+EXPORT_SYMBOL(drm_memrange_init);
 
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_memrange_takedown(struct drm_memrange * mm)
 {
        struct list_head *bnode = mm->fl_entry.next;
-       struct drm_mm_node *entry;
+       struct drm_memrange_node *entry;
 
-       entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+       entry = list_entry(bnode, struct drm_memrange_node, fl_entry);
 
        if (entry->ml_entry.next != &mm->ml_entry ||
            entry->fl_entry.next != &mm->fl_entry) {
@@ -292,20 +294,4 @@ void drm_mm_takedown(struct drm_mm * mm)
        list_del(&entry->ml_entry);
        drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
 }
-
-EXPORT_SYMBOL(drm_mm_takedown);
-
-void drm_mm_print(struct drm_mm *mm, const char *name)
-{
-       struct list_head *list;
-       const struct list_head *mm_stack = &mm->ml_entry;
-       struct drm_mm_node *entry;
-
-       DRM_DEBUG("Memory usage for '%s'\n", name ? name : "unknown");
-       list_for_each(list, mm_stack) {
-               entry = list_entry(list, struct drm_mm_node, ml_entry);
-               DRM_DEBUG("\t0x%08lx %li %s pages\n", entry->start, entry->size,
-                       entry->free ? "free" : "used");
-       }
-}
-EXPORT_SYMBOL(drm_mm_print);
+EXPORT_SYMBOL(drm_memrange_takedown);
index 7feacd3..d0c34ca 100644 (file)
@@ -300,7 +300,12 @@ struct drm_ttm_backend_func {
        void (*destroy) (struct drm_ttm_backend *backend);
 };
 
-
+/**
+ * This structure associates a set of flags and methods with a drm_ttm
+ * object, and will also be subclassed by the particular backend.
+ *
+ * \sa #drm_agp_ttm_backend
+ */
 struct drm_ttm_backend {
        struct drm_device *dev;
        uint32_t flags;
@@ -412,7 +417,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
  */
 
 struct drm_bo_mem_reg {
-       struct drm_mm_node *mm_node;
+       struct drm_memrange_node *mm_node;
        unsigned long size;
        unsigned long num_pages;
        uint32_t page_alignment;
@@ -493,7 +498,7 @@ struct drm_buffer_object {
        unsigned long num_pages;
 
        /* For pinned buffers */
-       struct drm_mm_node *pinned_node;
+       struct drm_memrange_node *pinned_node;
        uint32_t pinned_mem_type;
        struct list_head pinned_lru;
 
@@ -528,7 +533,7 @@ struct drm_mem_type_manager {
        int has_type;
        int use_type;
        int kern_init_type;
-       struct drm_mm manager;
+       struct drm_memrange manager;
        struct list_head lru;
        struct list_head pinned;
        uint32_t flags;
index 7f18520..127a798 100644 (file)
@@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset,
                         int request, int *eof, void *data);
 static int drm_objects_info(char *buf, char **start, off_t offset,
                         int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+                            int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data);
 #if DRM_DEBUG_CODE
 static int drm_vma_info(char *buf, char **start, off_t offset,
                        int request, int *eof, void *data);
@@ -70,6 +74,8 @@ static struct drm_proc_list {
        {"queues", drm_queues_info},
        {"bufs", drm_bufs_info},
        {"objects", drm_objects_info},
+       {"gem_names", drm_gem_name_info},
+       {"gem_objects", drm_gem_object_info},
 #if DRM_DEBUG_CODE
        {"vma", drm_vma_info},
 #endif
@@ -586,6 +592,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
        return ret;
 }
 
+struct drm_gem_name_info_data {
+       int                     len;
+       char                    *buf;
+       int                     eof;
+};
+
+static int drm_gem_one_name_info (int id, void *ptr, void *data)
+{
+       struct drm_gem_object *obj = ptr;
+       struct drm_gem_name_info_data   *nid = data;
+
+       DRM_INFO ("name %d size %d\n", obj->name, obj->size);
+       if (nid->eof)
+               return 0;
+       
+       nid->len += sprintf (&nid->buf[nid->len],
+                            "%6d%9d%8d%9d\n",
+                            obj->name, obj->size,
+                            atomic_read(&obj->handlecount.refcount),
+                            atomic_read(&obj->refcount.refcount));
+       if (nid->len > DRM_PROC_LIMIT) {
+               nid->eof = 1;
+               return 0;
+       }
+       return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+                            int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_gem_name_info_data nid;
+       
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       nid.len = sprintf (buf, "  name     size handles refcount\n");
+       nid.buf = buf;
+       nid.eof = 0;
+       idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid);
+       
+       *start = &buf[offset];
+       *eof = 0;
+       if (nid.len > request + offset)
+               return request;
+       *eof = 1;
+       return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       int len = 0;
+       
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count));
+       DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory));
+       DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count));
+       DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory));
+       DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory));
+       DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
 #if DRM_DEBUG_CODE
 
 static int drm__vma_info(char *buf, char **start, off_t offset, int request,
index 8421a93..7c16f68 100644 (file)
@@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init);
 static void *drm_sman_mm_allocate(void *private, unsigned long size,
                                  unsigned alignment)
 {
-       struct drm_mm *mm = (struct drm_mm *) private;
-       struct drm_mm_node *tmp;
+       struct drm_memrange *mm = (struct drm_memrange *) private;
+       struct drm_memrange_node *tmp;
 
-       tmp = drm_mm_search_free(mm, size, alignment, 1);
+       tmp = drm_memrange_search_free(mm, size, alignment, 1);
        if (!tmp) {
                return NULL;
        }
-       tmp = drm_mm_get_block(tmp, size, alignment);
+       tmp = drm_memrange_get_block(tmp, size, alignment);
        return tmp;
 }
 
 static void drm_sman_mm_free(void *private, void *ref)
 {
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
 
-       drm_mm_put_block(node);
+       drm_memrange_put_block(node);
 }
 
 static void drm_sman_mm_destroy(void *private)
 {
-       struct drm_mm *mm = (struct drm_mm *) private;
-       drm_mm_takedown(mm);
+       struct drm_memrange *mm = (struct drm_memrange *) private;
+       drm_memrange_takedown(mm);
        drm_free(mm, sizeof(*mm), DRM_MEM_MM);
 }
 
 static unsigned long drm_sman_mm_offset(void *private, void *ref)
 {
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
+       struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
        return node->start;
 }
 
@@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
                   unsigned long start, unsigned long size)
 {
        struct drm_sman_mm *sman_mm;
-       struct drm_mm *mm;
+       struct drm_memrange *mm;
        int ret;
 
        BUG_ON(manager >= sman->num_managers);
@@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
                return -ENOMEM;
        }
        sman_mm->private = mm;
-       ret = drm_mm_init(mm, start, size);
+       ret = drm_memrange_init(mm, start, size);
 
        if (ret) {
                drm_free(mm, sizeof(*mm), DRM_MEM_MM);
index 39a39fe..0299776 100644 (file)
@@ -45,7 +45,7 @@
 /*
  * A class that is an abstration of a simple memory allocator.
  * The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
+ * using the drm_memrange.c implementation. But the user can replace it.
  * See the SiS implementation, which may use the SiS FB kernel module
  * for memory management.
  */
@@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
                         unsigned int user_order, unsigned int owner_order);
 
 /*
- * Initialize a drm_mm.c allocator. Should be called only once for each
+ * Initialize a drm_memrange.c allocator. Should be called only once for each
  * manager unless a customized allogator is used.
  */
 
index 45b8f38..c62b901 100644 (file)
@@ -201,15 +201,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
        if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
                return -ENOMEM;
 
-       if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+       if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                             DRM_FILE_PAGE_OFFSET_SIZE)) {
                drm_ht_remove(&dev->map_hash);
                return -ENOMEM;
        }
 
        if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
                drm_ht_remove(&dev->map_hash);
-               drm_mm_takedown(&dev->offset_manager);
+               drm_memrange_takedown(&dev->offset_manager);
                return -ENOMEM;
        }
 
@@ -249,7 +249,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
                goto error_out_unreg;
        }
 
+       if (driver->driver_features & DRIVER_GEM) {
+               retcode = drm_gem_init (dev);
+               if (retcode) {
+                       DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+                       goto error_out_unreg;
+               }
+       }
+
        drm_fence_manager_init(dev);
+
        return 0;
 
 error_out_unreg:
@@ -300,6 +309,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
                        DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
                        goto err_mem;
                }
+               if (dev->driver->proc_init) {
+                       ret = dev->driver->proc_init(new_minor);
+                       if (ret) {
+                               DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
+                               goto err_mem;
+                       }
+               }
        } else
                new_minor->dev_root = NULL;
 
@@ -316,8 +332,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
 
 
 err_g2:
-       if (new_minor->type == DRM_MINOR_LEGACY)
+       if (new_minor->type == DRM_MINOR_LEGACY) {
+               if (dev->driver->proc_cleanup)
+                       dev->driver->proc_cleanup(new_minor);
                drm_proc_cleanup(new_minor, drm_proc_root);
+       }
 err_mem:
        kfree(new_minor);
 err_idr:
@@ -389,10 +408,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 
        return 0;
 err_g5:
-       drm_put_minor(&dev->primary);
+       drm_put_minor(dev, &dev->primary);
 err_g4:
        if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(&dev->control);
+               drm_put_minor(dev, &dev->control);
 err_g3:
        if (!drm_fb_loaded)
                pci_disable_device(pdev);
@@ -443,13 +462,16 @@ int drm_put_dev(struct drm_device * dev)
  * last minor released.
  *
  */
-int drm_put_minor(struct drm_minor **minor_p)
+int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p)
 {
        struct drm_minor *minor = *minor_p;
        DRM_DEBUG("release secondary minor %d\n", minor->index);
 
-       if (minor->type == DRM_MINOR_LEGACY)
+       if (minor->type == DRM_MINOR_LEGACY) {
+               if (dev->driver->proc_cleanup)
+                       dev->driver->proc_cleanup(minor);
                drm_proc_cleanup(minor, drm_proc_root);
+       }
        drm_sysfs_device_remove(minor);
 
        idr_remove(&drm_minors_idr, minor->index);
index ebf54bf..77c8639 100644 (file)
@@ -221,7 +221,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
                goto out;
        }
 
-       ch7xxx->quiet = FALSE;
+       ch7xxx->quiet = false;
        DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
                  name, vendor, device);
        return true;
index 3a29ab6..788b072 100644 (file)
@@ -265,7 +265,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
        dvo->i2c_bus = i2cbus;
        dvo->i2c_bus->slave_addr = dvo->slave_addr;
        dvo->dev_priv = priv;
-       priv->quiet = TRUE;
+       priv->quiet = true;
 
        if (!ivch_read(dvo, VR00, &temp))
                goto out;
index 8e26235..207fda8 100644 (file)
@@ -187,7 +187,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
        dvo->i2c_bus = i2cbus;
        dvo->i2c_bus->slave_addr = dvo->slave_addr;
        dvo->dev_priv = tfp;
-       tfp->quiet = TRUE;
+       tfp->quiet = true;
 
        if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
                DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
@@ -200,7 +200,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
                          id, i2cbus->adapter.name, i2cbus->slave_addr);
                goto out;
        }
-       tfp->quiet = FALSE;
+       tfp->quiet = false;
        return true;
 out:
        kfree(tfp);
index f755dcd..33a33e6 100644 (file)
@@ -48,11 +48,11 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 unsigned int i915_rightof = 1;
 module_param_named(i915_rightof, i915_rightof, int, 0400);
 
-#ifdef I915_HAVE_FENCE
+#if defined(I915_HAVE_FENCE) && defined(I915_TTM)
 extern struct drm_fence_driver i915_fence_driver;
 #endif
 
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
 
 static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
 static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
@@ -71,7 +71,7 @@ static struct drm_bo_driver i915_bo_driver = {
        .ttm_cache_flush = i915_flush_ttm,
        .command_stream_barrier = NULL,
 };
-#endif
+#endif /* ttm */
 
 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 {
@@ -569,18 +569,22 @@ static int i915_resume(struct drm_device *dev)
 }
 
 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void remove(struct pci_dev *pdev);
+
 static struct drm_driver driver = {
        /* don't use mtrr's here, the Xserver or user space app should
         * deal with them for intel hardware.
         */
        .driver_features =
            DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
        .load = i915_driver_load,
        .unload = i915_driver_unload,
        .firstopen = i915_driver_firstopen,
+       .open = i915_driver_open,
        .lastclose = i915_driver_lastclose,
        .preclose = i915_driver_preclose,
+       .postclose = i915_driver_postclose,
        .suspend = i915_suspend,
        .resume = i915_resume,
        .device_is_agp = i915_driver_device_is_agp,
@@ -596,7 +600,11 @@ static struct drm_driver driver = {
        .get_reg_ofs = drm_core_get_reg_ofs,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
+       .proc_init = i915_gem_proc_init,
+       .proc_cleanup = i915_gem_proc_cleanup,
        .ioctls = i915_ioctls,
+       .gem_init_object = i915_gem_init_object,
+       .gem_free_object = i915_gem_free_object,
        .fops = {
                .owner = THIS_MODULE,
                .open = drm_open,
@@ -613,12 +621,12 @@ static struct drm_driver driver = {
                .name = DRIVER_NAME,
                .id_table = pciidlist,
                .probe = probe,
-               .remove = __devexit_p(drm_cleanup_pci),
+               .remove = remove,
                },
-#ifdef I915_HAVE_FENCE
+#if defined(I915_HAVE_FENCE) && defined(I915_TTM)
        .fence_driver = &i915_fence_driver,
 #endif
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
        .bo_driver = &i915_bo_driver,
 #endif
        .name = DRIVER_NAME,
@@ -631,7 +639,28 @@ static struct drm_driver driver = {
 
 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-       return drm_get_dev(pdev, ent, &driver);
+       int ret;
+
+       /* On the 945G/GM, the chipset reports the MSI capability on the
+        * integrated graphics even though the support isn't actually there
+        * according to the published specs.  It doesn't appear to function
+        * correctly in testing on 945G.
+        * This may be a side effect of MSI having been made available for PEG
+        * and the registers being closely associated.
+        */
+       if (pdev->device != 0x2772 && pdev->device != 0x27A2)
+               (void )pci_enable_msi(pdev);
+
+       ret = drm_get_dev(pdev, ent, &driver);
+       if (ret && pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       return ret;
+}
+static void remove(struct pci_dev *pdev)
+{
+       drm_cleanup_pci(pdev);
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
 }
 
 static int __init i915_init(void)
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
new file mode 100644 (file)
index 0000000..63f4b91
--- /dev/null
@@ -0,0 +1,2710 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define WATCH_COHERENCY        0
+#define WATCH_BUF      0
+#define WATCH_EXEC     0
+#define WATCH_LRU      0
+#define WATCH_RELOC    0
+#define WATCH_INACTIVE 0
+#define WATCH_PWRITE   0
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+                    const char *where, uint32_t mark);
+#endif
+       
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+                           uint32_t read_domains,
+                           uint32_t write_domain);
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+                   struct drm_file *file_priv,
+                   uint32_t read_domains,
+                   uint32_t write_domain);
+
+static void
+i915_gem_clflush_object(struct drm_gem_object *obj);
+
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+                    unsigned long end)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (start >= end ||
+           (start & (PAGE_SIZE - 1)) != 0 ||
+           (end & (PAGE_SIZE - 1)) != 0) {
+               return -EINVAL;
+       }
+
+       drm_memrange_init(&dev_priv->mm.gtt_space, start,
+                         end - start);
+
+       dev->gtt_total = (uint32_t) (end - start);
+
+       return 0;
+}
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_i915_gem_init *args = data;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_i915_gem_create *args = data;
+       struct drm_gem_object *obj;
+       int handle, ret;
+
+       args->size = roundup(args->size, PAGE_SIZE);
+
+       /* Allocate the new object */
+       obj = drm_gem_object_alloc(dev, args->size);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+
+       return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_i915_gem_pread *args = data;
+       struct drm_gem_object *obj;
+       ssize_t read;
+       loff_t offset;
+       int ret;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_set_domain(obj, file_priv,
+                                 I915_GEM_DOMAIN_CPU, 0);
+       if (ret) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+       offset = args->offset;
+
+       read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+                       args->size, &offset);
+       if (read != args->size) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               if (read < 0)
+                       return read;
+               else
+                       return -EINVAL;
+       }
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+#include "drm_compat.h"
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_i915_gem_pwrite *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       ssize_t remain;
+       loff_t offset;
+       char __user *user_data;
+       char *vaddr;
+       int i, o, l;
+       int ret = 0;
+       unsigned long pfn;
+       unsigned long unwritten;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+
+       /** Bounds check destination.
+        *
+        * XXX: This could use review for overflow issues...
+        */
+       if (args->offset > obj->size || args->size > obj->size || 
+           args->offset + args->size > obj->size)
+               return -EFAULT;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       remain = args->size;
+       if (!access_ok(VERIFY_READ, user_data, remain))
+               return -EFAULT;
+
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_pin(obj, 0);
+       if (ret) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+       ret = i915_gem_set_domain(obj, file_priv,
+                                 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+       if (ret)
+               goto fail;
+       
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+       obj_priv->dirty = 1;
+       
+       while (remain > 0) {
+               
+               /** Operation in this page
+                *
+                * i = page number
+                * o = offset within page
+                * l = bytes to copy
+                */
+               i = offset >> PAGE_SHIFT;
+               o = offset & (PAGE_SIZE-1);
+               l = remain;
+               if ((o + l) > PAGE_SIZE)
+                       l = PAGE_SIZE - o;
+
+               pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+               
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+               /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
+                */
+               vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
+                                            __pgprot(__PAGE_KERNEL));
+#if WATCH_PWRITE
+               DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+                        i, o, l, pfn, vaddr);
+#endif
+               unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l);
+               kunmap_atomic(vaddr, KM_USER0);
+
+               if (unwritten)
+#endif
+               {
+                       vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+#if WATCH_PWRITE
+                       DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n",
+                                i, o, l, pfn, vaddr);
+#endif
+                       if (vaddr == NULL) {
+                               ret = -EFAULT;
+                               goto fail;
+                       }
+                       unwritten = __copy_from_user(vaddr + o, user_data, l);
+#if WATCH_PWRITE
+                       DRM_INFO("unwritten %ld\n", unwritten);
+#endif
+                       iounmap(vaddr);
+                       if (unwritten) {
+                               ret = -EFAULT;
+                               goto fail;
+                       }
+               }
+
+               remain -= l;
+               user_data += l;
+               offset += l;
+       }
+#if WATCH_PWRITE && 1
+       i915_gem_clflush_object(obj);
+       i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
+       i915_gem_clflush_object(obj);
+#endif
+
+fail:
+       i915_gem_object_unpin (obj);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+#if WATCH_PWRITE
+       if (ret)
+               DRM_INFO("pwrite failed %d\n", ret);
+#endif
+       return ret;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_i915_gem_set_domain *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_set_domain(obj, file_priv,
+                                 args->read_domains, args->write_domain);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_i915_gem_sw_finish *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret = 0;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       mutex_lock(&dev->struct_mutex);
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+#if WATCH_BUF
+       DRM_INFO("%s: sw_finish %d (%p)\n",
+                __func__, args->handle, obj);
+#endif
+       obj_priv = obj->driver_private;
+               
+       /** Pinned buffers may be scanout, so flush the cache
+        */
+       if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+               i915_gem_clflush_object(obj);
+               drm_agp_chipset_flush(dev);
+       }
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_i915_gem_mmap *args = data;
+       struct drm_gem_object *obj;
+       loff_t offset;
+       unsigned long addr;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+
+       offset = args->offset;
+
+       down_write(&current->mm->mmap_sem);
+       addr = do_mmap(obj->filp, 0, args->size,
+                      PROT_READ | PROT_WRITE, MAP_SHARED,
+                      args->offset);
+       up_write(&current->mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR((void *)addr))
+               return addr;
+
+       args->addr_ptr = (uint64_t) addr;
+
+       return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count = obj->size / PAGE_SIZE;
+       int i;
+
+       if (obj_priv->page_list == NULL)
+               return;
+
+
+       for (i = 0; i < page_count; i++)
+               if (obj_priv->page_list[i] != NULL) {
+                       if (obj_priv->dirty)
+                               set_page_dirty(obj_priv->page_list[i]);
+                       mark_page_accessed(obj_priv->page_list[i]);
+                       page_cache_release(obj_priv->page_list[i]);
+               }
+       obj_priv->dirty = 0;
+
+       drm_free(obj_priv->page_list,
+                page_count * sizeof(struct page *),
+                DRM_MEM_DRIVER);
+       obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       /* Add a reference if we're newly entering the active list. */
+       if (!obj_priv->active) {
+               drm_gem_object_reference(obj);
+               obj_priv->active = 1;
+       }
+       /* Move from whatever list we were on to the tail of execution. */
+       list_move_tail(&obj_priv->list,
+                      &dev_priv->mm.active_list);
+}
+
+#if WATCH_INACTIVE
+static void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               obj = obj_priv->obj;
+               if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+                       DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
+                                 obj,
+                                 obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
+       }
+}
+#else
+#define i915_verify_inactive(dev,file,line)
+#endif
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+       if (obj_priv->pin_count != 0)
+               list_del_init(&obj_priv->list);
+       else
+               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+       if (obj_priv->active) {
+               obj_priv->active = 0;
+               drm_gem_object_unreference(obj);
+       }
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *request;
+       uint32_t seqno;
+       int was_empty;
+       RING_LOCALS;
+
+       request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+       if (request == NULL)
+               return 0;
+
+       /* Grab the seqno we're going to make this request be, and bump the
+        * next (skipping 0 so it can be the reserved no-seqno value).
+        */
+       seqno = dev_priv->mm.next_gem_seqno;
+       dev_priv->mm.next_gem_seqno++;
+       if (dev_priv->mm.next_gem_seqno == 0)
+               dev_priv->mm.next_gem_seqno++;
+
+       BEGIN_LP_RING(4);
+       OUT_RING(MI_STORE_DWORD_INDEX);
+       OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       OUT_RING(seqno);
+
+       OUT_RING(GFX_OP_USER_INTERRUPT);
+       ADVANCE_LP_RING();
+
+       DRM_DEBUG("%d\n", seqno);
+
+       request->seqno = seqno;
+       request->emitted_jiffies = jiffies;
+       request->flush_domains = flush_domains;
+       was_empty = list_empty(&dev_priv->mm.request_list);
+       list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+       if (was_empty)
+               schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
+       return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+
+uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+       uint32_t flush_domains = 0;
+       RING_LOCALS;
+
+       /* The sampler always gets flushed on i965 (sigh) */
+       if (IS_I965G(dev))
+               flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+       BEGIN_LP_RING(2);
+       OUT_RING(cmd);
+       OUT_RING(0); /* noop */
+       ADVANCE_LP_RING();
+       return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+                       struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (request->flush_domains != 0) {
+               struct drm_i915_gem_object *obj_priv, *next;
+
+               /* First clear any buffers that were only waiting for a flush
+                * matching the one just retired.
+                */
+
+               list_for_each_entry_safe(obj_priv, next,
+                                        &dev_priv->mm.flushing_list, list) {
+                       struct drm_gem_object *obj = obj_priv->obj;
+
+                       if (obj->write_domain & request->flush_domains) {
+                               obj->write_domain = 0;
+                               i915_gem_object_move_to_inactive(obj);
+                       }
+               }
+
+       }
+
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate.
+        */
+       while (!list_empty(&dev_priv->mm.active_list)) {
+               struct drm_gem_object *obj;
+               struct drm_i915_gem_object *obj_priv;
+
+               obj_priv = list_first_entry(&dev_priv->mm.active_list,
+                                           struct drm_i915_gem_object,
+                                           list);
+               obj = obj_priv->obj;
+
+               /* If the seqno being retired doesn't match the oldest in the
+                * list, then the oldest in the list must still be newer than
+                * this seqno.
+                */
+               if (obj_priv->last_rendering_seqno != request->seqno)
+                       return;
+#if WATCH_LRU
+               DRM_INFO("%s: retire %d moves to inactive list %p\n",
+                        __func__, request->seqno, obj);
+#endif
+
+               if (obj->write_domain != 0) {
+                       list_move_tail(&obj_priv->list,
+                                      &dev_priv->mm.flushing_list);
+               } else {
+                       i915_gem_object_move_to_inactive(obj);
+               }
+       }
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+       return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t seqno;
+
+       seqno = i915_get_gem_seqno(dev);
+
+       while (!list_empty(&dev_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+               uint32_t retiring_seqno;
+
+               request = list_first_entry(&dev_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+               retiring_seqno = request->seqno;
+
+               if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) {
+                       i915_gem_retire_request(dev, request);
+
+                       list_del(&request->list);
+                       drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+               } else
+                       break;
+       }
+}
+
+void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv;
+       struct drm_device *dev;
+
+       dev_priv = container_of(work, struct drm_i915_private,
+                               mm.retire_work.work);
+       dev = dev_priv->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_retire_requests(dev);
+       if (!list_empty(&dev_priv->mm.request_list))
+               schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = 0;
+
+       BUG_ON(seqno == 0);
+
+       if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+               dev_priv->mm.waiting_gem_seqno = seqno;
+               i915_user_irq_on(dev);
+               ret = wait_event_interruptible(dev_priv->irq_queue,
+                                              i915_seqno_passed(i915_get_gem_seqno(dev),
+                                                                seqno) || dev_priv->mm.wedged);
+               i915_user_irq_off(dev);
+               dev_priv->mm.waiting_gem_seqno = 0;
+       }
+       if (dev_priv->mm.wedged)
+               ret = -EIO;
+
+       if (ret)
+               DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+                         __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+       /* Directly dispatch request retiring.  While we have the work queue
+        * to handle this, the waiter on a request often wants an associated
+        * buffer to have made it to the inactive list, and we would need
+        * a separate wait queue to handle that.
+        */
+       if (ret == 0)
+               i915_gem_retire_requests(dev);
+
+       return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t cmd;
+       RING_LOCALS;
+
+#if WATCH_EXEC
+       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+                 invalidate_domains, flush_domains);
+#endif
+
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+
+       if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+               /*
+                * read/write caches:
+                *
+                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+                * also flushed at 2d versus 3d pipeline switches.
+                *
+                * read-only caches:
+                *
+                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+                * MI_READ_FLUSH is set, and is always flushed on 965.
+                *
+                * I915_GEM_DOMAIN_COMMAND may not exist?
+                *
+                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+                * invalidated when MI_EXE_FLUSH is set.
+                *
+                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+                * invalidated with every MI_FLUSH.
+                *
+                * TLBs:
+                *
+                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+                * are flushed at any MI_FLUSH.
+                */
+
+               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+               if ((invalidate_domains|flush_domains) &
+                   I915_GEM_DOMAIN_RENDER)
+                       cmd &= ~MI_NO_WRITE_FLUSH;
+               if (!IS_I965G(dev)) {
+                       /*
+                        * On the 965, the sampler cache always gets flushed
+                        * and this bit is reserved.
+                        */
+                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+                               cmd |= MI_READ_FLUSH;
+               }
+               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+                       cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+               BEGIN_LP_RING(2);
+               OUT_RING(cmd);
+               OUT_RING(0); /* noop */
+               ADVANCE_LP_RING();
+       }
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int ret;
+
+       /* If there are writes queued to the buffer, flush and
+        * create a new seqno to wait for.
+        */
+       if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+               uint32_t write_domain = obj->write_domain;
+#if WATCH_BUF
+               DRM_INFO("%s: flushing object %p from write domain %08x\n",
+                         __func__, obj, write_domain);
+#endif
+               i915_gem_flush(dev, 0, write_domain);
+               obj->write_domain = 0;
+
+               i915_gem_object_move_to_active(obj);
+               obj_priv->last_rendering_seqno = i915_add_request(dev,
+                                                                 write_domain);
+               BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+               DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+       }
+       /* If there is rendering queued on the buffer being evicted, wait for
+        * it.
+        */
+       if (obj_priv->active) {
+#if WATCH_BUF
+               DRM_INFO("%s: object %p wait for seqno %08x\n",
+                         __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int ret = 0;
+
+#if WATCH_BUF
+       DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+       DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+       if (obj_priv->gtt_space == NULL)
+               return 0;
+
+       if (obj_priv->pin_count != 0) {
+               DRM_ERROR("Attempting to unbind pinned buffer\n");
+               return -EINVAL;
+       }
+
+       /* Wait for any rendering to complete
+        */
+       ret = i915_gem_object_wait_rendering(obj);
+       if (ret) {
+               DRM_ERROR ("wait_rendering failed: %d\n", ret);
+               return ret;
+       }
+
+       /* Move the object to the CPU domain to ensure that
+        * any possible CPU writes while it's not in the GTT
+        * are flushed when we go to remap it. This will
+        * also ensure that all pending GPU writes are finished
+        * before we unbind.
+        */
+       ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+                                        I915_GEM_DOMAIN_CPU);
+       if (ret) {
+               DRM_ERROR("set_domain failed: %d\n", ret);
+               return ret;
+       }
+
+       if (obj_priv->agp_mem != NULL) {
+               drm_unbind_agp(obj_priv->agp_mem);
+               drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+               obj_priv->agp_mem = NULL;
+       }
+
+       BUG_ON(obj_priv->active);
+
+       i915_gem_object_free_page_list(obj);
+
+       if (obj_priv->gtt_space) {
+               atomic_dec(&dev->gtt_count);
+               atomic_sub(obj->size, &dev->gtt_memory);
+       
+               drm_memrange_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+       }
+
+       /* Remove ourselves from the LRU list if present. */
+       if (!list_empty(&obj_priv->list))
+               list_del_init(&obj_priv->list);
+
+       return 0;
+}
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+                  uint32_t bias, uint32_t mark)
+{
+       uint32_t *mem = kmap_atomic(page, KM_USER0);
+       int i;
+       for (i = start; i < end; i += 4)
+               DRM_INFO("%08x: %08x%s\n",
+                         (int) (bias + i), mem[i / 4],
+                         (bias + i == mark) ? " ********" : "");
+       kunmap_atomic(mem, KM_USER0);
+       /* give syslog time to catch up */
+       msleep(1);
+}
+
+static void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+                    const char *where, uint32_t mark)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page;
+
+       DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+       for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+               int page_len, chunk, chunk_len;
+
+               page_len = len - page * PAGE_SIZE;
+               if (page_len > PAGE_SIZE)
+                       page_len = PAGE_SIZE;
+
+               for (chunk = 0; chunk < page_len; chunk += 128) {
+                       chunk_len = page_len - chunk;
+                       if (chunk_len > 128)
+                               chunk_len = 128;
+                       i915_gem_dump_page(obj_priv->page_list[page],
+                                          chunk, chunk + chunk_len,
+                                          obj_priv->gtt_offset +
+                                          page * PAGE_SIZE,
+                                          mark);
+               }
+       }
+}
+#endif
+
+#if WATCH_LRU
+static void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+       struct drm_i915_private         *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object      *obj_priv;
+
+       DRM_INFO("active list %s {\n", where);
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+                           list)
+       {
+               DRM_INFO("    %p: %08x\n", obj_priv,
+                        obj_priv->last_rendering_seqno);
+       }
+       DRM_INFO("}\n");
+       DRM_INFO("flushing list %s {\n", where);
+       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+                           list)
+       {
+               DRM_INFO("    %p: %08x\n", obj_priv,
+                        obj_priv->last_rendering_seqno);
+       }
+       DRM_INFO("}\n");
+       DRM_INFO("inactive %s {\n", where);
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               DRM_INFO("    %p: %08x\n", obj_priv,
+                        obj_priv->last_rendering_seqno);
+       }
+       DRM_INFO("}\n");
+}
+#endif
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret = 0;
+
+       for (;;) {
+               /* If there's an inactive buffer available now, grab it
+                * and be done.
+                */
+               if (!list_empty(&dev_priv->mm.inactive_list)) {
+                       obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+                                                   struct drm_i915_gem_object,
+                                                   list);
+                       obj = obj_priv->obj;
+                       BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+                       DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+                       BUG_ON(obj_priv->active);
+
+                       /* Wait on the rendering and unbind the buffer. */
+                       ret = i915_gem_object_unbind(obj);
+                       break;
+               }
+
+               /* If we didn't get anything, but the ring is still processing
+                * things, wait for one of those things to finish and hopefully
+                * leave us a buffer to evict.
+                */
+               if (!list_empty(&dev_priv->mm.request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&dev_priv->mm.request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_wait_request(dev, request->seqno);
+                       if (ret)
+                               break;
+
+                       /* if waiting caused an object to become inactive,
+                        * then loop around and wait for it. Otherwise, we
+                        * assume that waiting freed and unbound something,
+                        * so there should now be some space in the GTT
+                        */
+                       if (!list_empty(&dev_priv->mm.inactive_list))
+                               continue;
+                       break;
+               }
+
+               /* If we didn't have anything on the request list but there
+                * are buffers awaiting a flush, emit one and try again.
+                * When we wait on it, those buffers waiting for that flush
+                * will get moved to inactive.
+                */
+               if (!list_empty(&dev_priv->mm.flushing_list)) {
+                       obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+                                                   struct drm_i915_gem_object,
+                                                   list);
+                       obj = obj_priv->obj;
+
+                       i915_gem_flush(dev,
+                                      obj->write_domain,
+                                      obj->write_domain);
+                       i915_add_request(dev, obj->write_domain);
+
+                       obj = NULL;
+                       continue;
+               }
+
+               DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n",
+                         list_empty(&dev_priv->mm.inactive_list),
+                         list_empty(&dev_priv->mm.request_list),
+                         list_empty(&dev_priv->mm.flushing_list));
+               /* If we didn't do any of the above, there's nothing to be done
+                * and we just can't fit it in.
+                */
+               return -ENOMEM;
+       }
+       return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count, i;
+       struct address_space *mapping;
+       struct inode *inode;
+       struct page *page;
+       int ret;
+       
+       if (obj_priv->page_list)
+               return 0;
+
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        */
+       page_count = obj->size / PAGE_SIZE;
+       BUG_ON(obj_priv->page_list != NULL);
+       obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+                                        DRM_MEM_DRIVER);
+       if (obj_priv->page_list == NULL) {
+               DRM_ERROR("Faled to allocate page list\n");
+               return -ENOMEM;
+       }
+
+       inode = obj->filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+       for (i = 0; i < page_count; i++) {
+               page = find_get_page(mapping, i);
+               if (page == NULL || !PageUptodate(page)) {
+                       if (page) {
+                               page_cache_release(page);
+                               page = NULL;
+                       }
+                       ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
+       
+                       if (ret) {
+                               DRM_ERROR("shmem_getpage failed: %d\n", ret);
+                               i915_gem_object_free_page_list(obj);
+                               return ret;
+                       }
+                       unlock_page(page);
+               }
+               obj_priv->page_list[i] = page;
+       }
+       return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_memrange_node *free_space;
+       int page_count, ret;
+
+       if (alignment == 0)
+               alignment = PAGE_SIZE;
+       if (alignment & (PAGE_SIZE - 1)) {
+               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+               return -EINVAL;
+       }
+
+ search_free:
+       free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
+                                             obj->size,
+                                             alignment, 0);
+       if (free_space != NULL) {
+               obj_priv->gtt_space =
+                       drm_memrange_get_block(free_space, obj->size,
+                                              alignment);
+               if (obj_priv->gtt_space != NULL) {
+                       obj_priv->gtt_space->private = obj;
+                       obj_priv->gtt_offset = obj_priv->gtt_space->start;
+               }
+       }
+       if (obj_priv->gtt_space == NULL) {
+               /* If the gtt is empty and we're still having trouble
+                * fitting our object in, we're out of memory.
+                */
+#if WATCH_LRU
+               DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+               if (list_empty(&dev_priv->mm.inactive_list) &&
+                   list_empty(&dev_priv->mm.flushing_list) &&
+                   list_empty(&dev_priv->mm.active_list)) {
+                       DRM_ERROR("GTT full, but LRU list empty\n");
+                       return -ENOMEM;
+               }
+
+               ret = i915_gem_evict_something(dev);
+               if (ret != 0) {
+                       DRM_ERROR("Failed to evict a buffer %d\n", ret);
+                       return ret;
+               }
+               goto search_free;
+       }
+
+#if WATCH_BUF
+       DRM_INFO("Binding object of size %d at 0x%08x\n",
+                obj->size, obj_priv->gtt_offset);
+#endif
+       ret = i915_gem_object_get_page_list(obj);
+       if (ret) {
+               drm_memrange_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+               return ret;
+       }
+
+       page_count = obj->size / PAGE_SIZE;
+       /* Create an AGP memory structure pointing at our pages, and bind it
+        * into the GTT.
+        */
+       obj_priv->agp_mem = drm_agp_bind_pages(dev,
+                                              obj_priv->page_list,
+                                              page_count,
+                                              obj_priv->gtt_offset);
+       if (obj_priv->agp_mem == NULL) {
+               i915_gem_object_free_page_list(obj);
+               drm_memrange_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+               return -ENOMEM;
+       }
+       atomic_inc(&dev->gtt_count);
+       atomic_add(obj->size, &dev->gtt_memory);
+
+       /* Assert that the object is not currently in any GPU domain. As it
+        * wasn't in the GTT, there shouldn't be any way it could have been in
+        * a GPU cache
+        */
+       BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+       BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+       return 0;
+}
+
+static void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+
+       /* If we don't have a page list set up, then we're not pinned
+        * to GPU, and we can ignore the cache flush because it'll happen
+        * again at bind time.
+        */
+       if (obj_priv->page_list == NULL)
+               return;
+
+       drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ *     1. Allocated
+ *     2. Written by CPU
+ *     3. Mapped to GTT
+ *     4. Read by GPU
+ *     5. Unmapped from GTT
+ *     6. Freed
+ *
+ *     Let's take these a step at a time
+ *
+ *     1. Allocated
+ *             Pages allocated from the kernel may still have
+ *             cache contents, so we set them to (CPU, CPU) always.
+ *     2. Written by CPU (using pwrite)
+ *             The pwrite function calls set_domain (CPU, CPU) and
+ *             this function does nothing (as nothing changes)
+ *     3. Mapped by GTT
+ *             This function asserts that the object is not
+ *             currently in any GPU-based read or write domains
+ *     4. Read by GPU
+ *             i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ *             As write_domain is zero, this function adds in the
+ *             current read domains (CPU+COMMAND, 0).
+ *             flush_domains is set to CPU.
+ *             invalidate_domains is set to COMMAND
+ *             clflush is run to get data out of the CPU caches
+ *             then i915_dev_set_domain calls i915_gem_flush to
+ *             emit an MI_FLUSH and drm_agp_chipset_flush
+ *     5. Unmapped from GTT
+ *             i915_gem_object_unbind calls set_domain (CPU, CPU)
+ *             flush_domains and invalidate_domains end up both zero
+ *             so no flushing/invalidating happens
+ *     6. Freed
+ *             yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ *     1. Allocated
+ *     2. Mapped to GTT
+ *     3. Read/written by GPU
+ *     4. set_domain to (CPU,CPU)
+ *     5. Read/written by CPU
+ *     6. Read/written by GPU
+ *
+ *     1. Allocated
+ *             Same as last example, (CPU, CPU)
+ *     2. Mapped to GTT
+ *             Nothing changes (assertions find that it is not in the GPU)
+ *     3. Read/written by GPU
+ *             execbuffer calls set_domain (RENDER, RENDER)
+ *             flush_domains gets CPU
+ *             invalidate_domains gets GPU
+ *             clflush (obj)
+ *             MI_FLUSH and drm_agp_chipset_flush
+ *     4. set_domain (CPU, CPU)
+ *             flush_domains gets GPU
+ *             invalidate_domains gets CPU
+ *             wait_rendering (obj) to make sure all drawing is complete.
+ *             This will include an MI_FLUSH to get the data from GPU
+ *             to memory
+ *             clflush (obj) to invalidate the CPU cache
+ *             Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ *     5. Read/written by CPU
+ *             cache lines are loaded and dirtied
+ *     6. Read written by GPU
+ *             Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ *     1. Allocated
+ *     2. Written by CPU
+ *     3. Read by GPU
+ *     4. Updated (written) by CPU again
+ *     5. Read by GPU
+ *
+ *     1. Allocated
+ *             (CPU, CPU)
+ *     2. Written by CPU
+ *             (CPU, CPU)
+ *     3. Read by GPU
+ *             (CPU+RENDER, 0)
+ *             flush_domains = CPU
+ *             invalidate_domains = RENDER
+ *             clflush (obj)
+ *             MI_FLUSH
+ *             drm_agp_chipset_flush
+ *     4. Updated (written) by CPU again
+ *             (CPU, CPU)
+ *             flush_domains = 0 (no previous write domain)
+ *             invalidate_domains = 0 (no new read domains)
+ *     5. Read by GPU
+ *             (CPU+RENDER, 0)
+ *             flush_domains = CPU
+ *             invalidate_domains = RENDER
+ *             clflush (obj)
+ *             MI_FLUSH
+ *             drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+                           uint32_t read_domains,
+                           uint32_t write_domain)
+{
+       struct drm_device               *dev = obj->dev;
+       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+       uint32_t                        invalidate_domains = 0;
+       uint32_t                        flush_domains = 0;
+       int                             ret;
+
+#if WATCH_BUF
+       DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+                __func__, obj, 
+                obj->read_domains, read_domains, 
+                obj->write_domain, write_domain);
+#endif
+       /*
+        * If the object isn't moving to a new write domain,
+        * let the object stay in multiple read domains
+        */
+       if (write_domain == 0)
+               read_domains |= obj->read_domains;
+       else
+               obj_priv->dirty = 1;
+
+       /*
+        * Flush the current write domain if
+        * the new read domains don't match. Invalidate
+        * any read domains which differ from the old
+        * write domain
+        */
+       if (obj->write_domain && obj->write_domain != read_domains) {
+               flush_domains |= obj->write_domain;
+               invalidate_domains |= read_domains & ~obj->write_domain;
+       }
+       /*
+        * Invalidate any read caches which may have
+        * stale data. That is, any new read domains.
+        */
+       invalidate_domains |= read_domains & ~obj->read_domains;
+       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+               DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+                        __func__, flush_domains, invalidate_domains);
+#endif
+               /*
+                * If we're invaliding the CPU cache and flushing a GPU cache,
+                * then pause for rendering so that the GPU caches will be
+                * flushed before the cpu cache is invalidated
+                */
+               if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+                   (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) {
+                       ret = i915_gem_object_wait_rendering(obj);
+                       if (ret)
+                               return ret;
+               }
+               i915_gem_clflush_object(obj);
+       }
+
+       if ((write_domain | flush_domains) != 0)
+               obj->write_domain = write_domain;
+       obj->read_domains = read_domains;
+       dev->invalidate_domains |= invalidate_domains;
+       dev->flush_domains |= flush_domains;
+#if WATCH_BUF
+       DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+                __func__,
+                obj->read_domains, obj->write_domain,
+                dev->invalidate_domains, dev->flush_domains);
+#endif
+       return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+       uint32_t flush_domains = dev->flush_domains;
+
+       /*
+        * Now that all the buffers are synced to the proper domains,
+        * flush and invalidate the collected domains
+        */
+       if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+                         __func__,
+                        dev->invalidate_domains,
+                        dev->flush_domains);
+#endif
+               i915_gem_flush(dev,
+                              dev->invalidate_domains,
+                              dev->flush_domains);
+               dev->invalidate_domains = 0;
+               dev->flush_domains = 0;
+       }
+
+       return flush_domains;
+}
+
+#if WATCH_COHERENCY
+static void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page;
+       uint32_t *gtt_mapping;
+       uint32_t *backing_map = NULL;
+       int bad_count = 0;
+
+       DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+                __func__, obj, obj_priv->gtt_offset, handle,
+                obj->size / 1024);
+
+       gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+                             obj->size);
+       if (gtt_mapping == NULL) {
+               DRM_ERROR("failed to map GTT space\n");
+               return;
+       }
+
+       for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+               int i;
+
+               backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+               if (backing_map == NULL) {
+                       DRM_ERROR("failed to map backing page\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PAGE_SIZE / 4; i++) {
+                       uint32_t cpuval = backing_map[i];
+                       uint32_t gttval = readl(gtt_mapping +
+                                               page * 1024 + i);
+
+                       if (cpuval != gttval) {
+                               DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+                                        "0x%08x vs 0x%08x\n",
+                                        (int)(obj_priv->gtt_offset +
+                                              page * PAGE_SIZE + i * 4),
+                                        cpuval, gttval);
+                               if (bad_count++ >= 8) {
+                                       DRM_INFO("...\n");
+                                       goto out;
+                               }
+                       }
+               }
+               kunmap_atomic(backing_map, KM_USER0);
+               backing_map = NULL;
+       }
+
+ out:
+       if (backing_map != NULL)
+               kunmap_atomic(backing_map, KM_USER0);
+       iounmap(gtt_mapping);
+
+       /* give syslog time to catch up */
+       msleep(1);
+
+       /* Directly flush the object, since we just loaded values with the CPU
+        * from thebacking pages and we don't want to disturb the cache
+        * management that we're trying to observe.
+        */
+
+       i915_gem_clflush_object(obj);
+}
+#endif
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+                                struct drm_file *file_priv,
+                                struct drm_i915_gem_exec_object *entry)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_relocation_entry reloc;
+       struct drm_i915_gem_relocation_entry __user *relocs;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int i, ret;
+       uint32_t last_reloc_offset = -1;
+       void *reloc_page = NULL;
+
+       /* Choose the GTT offset for our buffer and put it there. */
+       ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+       if (ret)
+               return ret;
+
+       entry->offset = obj_priv->gtt_offset;
+
+       relocs = (struct drm_i915_gem_relocation_entry __user *)
+                (uintptr_t) entry->relocs_ptr;
+       /* Apply the relocations, using the GTT aperture to avoid cache
+        * flushing requirements.
+        */
+       for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_gem_object *target_obj;
+               struct drm_i915_gem_object *target_obj_priv;
+               uint32_t reloc_val, reloc_offset, *reloc_entry;
+               int ret;
+
+               ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+               if (ret != 0) {
+                       i915_gem_object_unpin(obj);
+                       return ret;
+               }
+
+               target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+                                                  reloc.target_handle);
+               if (target_obj == NULL) {
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+               target_obj_priv = target_obj->driver_private;
+
+               /* The target buffer should have appeared before us in the
+                * exec_object list, so it should have a GTT space bound by now.
+                */
+               if (target_obj_priv->gtt_space == NULL) {
+                       DRM_ERROR("No GTT space found for object %d\n",
+                                 reloc.target_handle);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
+               if (reloc.offset > obj->size - 4) {
+                       DRM_ERROR("Relocation beyond object bounds: "
+                                 "obj %p target %d offset %d size %d.\n",
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset, (int) obj->size);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+               if (reloc.offset & 3) {
+                       DRM_ERROR("Relocation not 4-byte aligned: "
+                                 "obj %p target %d offset %d.\n",
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
+               if (reloc.write_domain && target_obj->pending_write_domain &&
+                   reloc.write_domain != target_obj->pending_write_domain) {
+                       DRM_ERROR("Write domain conflict: "
+                                 "obj %p target %d offset %d "
+                                 "new %08x old %08x\n",
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.write_domain,
+                                 target_obj->pending_write_domain);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return -EINVAL;
+               }
+
+#if WATCH_RELOC
+               DRM_INFO("%s: obj %p offset %08x target %d "
+                        "read %08x write %08x gtt %08x "
+                        "presumed %08x delta %08x\n",
+                        __func__,
+                        obj,
+                        (int) reloc.offset,
+                        (int) reloc.target_handle,
+                        (int) reloc.read_domains,
+                        (int) reloc.write_domain,
+                        (int) target_obj_priv->gtt_offset,
+                        (int) reloc.presumed_offset,
+                        reloc.delta);
+#endif
+
+               target_obj->pending_read_domains |= reloc.read_domains;
+               target_obj->pending_write_domain |= reloc.write_domain;
+
+               /* If the relocation already has the right value in it, no
+                * more work needs to be done.
+                */
+               if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+                       drm_gem_object_unreference(target_obj);
+                       continue;
+               }
+
+               /* Now that we're going to actually write some data in,
+                * make sure that any rendering using this buffer's contents
+                * is completed.
+                */
+               i915_gem_object_wait_rendering(obj);
+
+               /* As we're writing through the gtt, flush
+                * any CPU writes before we write the relocations
+                */
+               if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+                       i915_gem_clflush_object(obj);
+                       drm_agp_chipset_flush(dev);
+                       obj->write_domain = 0;
+               }
+
+               /* Map the page containing the relocation we're going to
+                * perform.
+                */
+               reloc_offset = obj_priv->gtt_offset + reloc.offset;
+               if (reloc_page == NULL ||
+                   (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+                   (reloc_offset & ~(PAGE_SIZE - 1))) {
+                       if (reloc_page != NULL)
+                               iounmap(reloc_page);
+
+                       reloc_page = ioremap(dev->agp->base +
+                                            (reloc_offset & ~(PAGE_SIZE - 1)),
+                                            PAGE_SIZE);
+                       last_reloc_offset = reloc_offset;
+                       if (reloc_page == NULL) {
+                               drm_gem_object_unreference(target_obj);
+                               i915_gem_object_unpin(obj);
+                               return -ENOMEM;
+                       }
+               }
+
+               reloc_entry = (uint32_t *)((char *)reloc_page +
+                                          (reloc_offset & (PAGE_SIZE - 1)));
+               reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+               DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+                         obj, (unsigned int) reloc.offset,
+                         readl(reloc_entry), reloc_val);
+#endif
+               writel(reloc_val, reloc_entry);
+
+               /* Write the updated presumed offset for this entry back out
+                * to the user.
+                */
+               reloc.presumed_offset = target_obj_priv->gtt_offset;
+               ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+               if (ret != 0) {
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
+                       return ret;
+               }
+
+               drm_gem_object_unreference(target_obj);
+       }
+
+       if (reloc_page != NULL)
+               iounmap(reloc_page);
+
+#if WATCH_BUF
+       if (0)
+               i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+       return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+                             struct drm_i915_gem_execbuffer *exec,
+                             uint64_t exec_offset)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+                                            (uintptr_t) exec->cliprects_ptr;
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t        exec_start, exec_len;
+       RING_LOCALS;
+
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       if ((exec_start | exec_len) & 0x7) {
+               DRM_ERROR("alignment\n");
+               return -EINVAL;
+       }
+
+       if (!exec_start)
+               return -EINVAL;
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, boxes, i,
+                                               exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (IS_I830(dev) || IS_845G(dev)) {
+                       BEGIN_LP_RING(4);
+                       OUT_RING(MI_BATCH_BUFFER);
+                       OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+                       OUT_RING(exec_start + exec_len - 4);
+                       OUT_RING(0);
+                       ADVANCE_LP_RING();
+               } else {
+                       BEGIN_LP_RING(2);
+                       if (IS_I965G(dev)) {
+                               OUT_RING(MI_BATCH_BUFFER_START |
+                                        (2 << 6) |
+                                        MI_BATCH_NON_SECURE_I965);
+                               OUT_RING(exec_start);
+                       } else {
+                               OUT_RING(MI_BATCH_BUFFER_START |
+                                        (2 << 6));
+                               OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+                       }
+                       ADVANCE_LP_RING();
+               }
+       }
+
+       /* XXX breadcrumb */
+       return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       int ret = 0;
+       uint32_t seqno;
+
+       mutex_lock(&dev->struct_mutex);
+       seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+       i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
+       if (seqno)
+               ret = i915_wait_request(dev, seqno);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_gem_execbuffer *args = data;
+       struct drm_i915_gem_exec_object *exec_list = NULL;
+       struct drm_gem_object **object_list = NULL;
+       struct drm_gem_object *batch_obj;
+       int ret, i, pinned = 0;
+       uint64_t exec_offset;
+       uint32_t seqno, flush_domains;
+
+#if WATCH_EXEC
+       DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+                 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+       /* Copy in the exec list from userland */
+       exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+                              DRM_MEM_DRIVER);
+       object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+                                DRM_MEM_DRIVER);
+       if (exec_list == NULL || object_list == NULL) {
+               DRM_ERROR("Failed to allocate exec or object list "
+                         "for %d buffers\n",
+                         args->buffer_count);
+               ret = -ENOMEM;
+               goto pre_mutex_err;
+       }
+       ret = copy_from_user(exec_list,
+                            (struct drm_i915_relocation_entry __user *)
+                            (uintptr_t) args->buffers_ptr,
+                            sizeof(*exec_list) * args->buffer_count);
+       if (ret != 0) {
+               DRM_ERROR("copy %d exec entries failed %d\n",
+                         args->buffer_count, ret);
+               goto pre_mutex_err;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       if (dev_priv->mm.wedged) {
+               DRM_ERROR("Execbuf while wedged\n");
+               mutex_unlock(&dev->struct_mutex);
+               return -EIO;
+       }
+               
+       if (dev_priv->mm.suspended) {
+               DRM_ERROR("Execbuf while VT-switched.\n");
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+
+       /* Zero the gloabl flush/invalidate flags. These
+        * will be modified as each object is bound to the
+        * gtt
+        */
+       dev->invalidate_domains = 0;
+       dev->flush_domains = 0;
+
+       /* Look up object handles and perform the relocations */
+       for (i = 0; i < args->buffer_count; i++) {
+               object_list[i] = drm_gem_object_lookup(dev, file_priv,
+                                                      exec_list[i].handle);
+               if (object_list[i] == NULL) {
+                       DRM_ERROR("Invalid object handle %d at index %d\n",
+                                  exec_list[i].handle, i);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               object_list[i]->pending_read_domains = 0;
+               object_list[i]->pending_write_domain = 0;
+               ret = i915_gem_object_pin_and_relocate(object_list[i],
+                                                      file_priv,
+                                                      &exec_list[i]);
+               if (ret) {
+                       DRM_ERROR("object bind and relocate failed %d\n", ret);
+                       goto err;
+               }
+               pinned = i + 1;
+       }
+
+       /* Set the pending read domains for the batch buffer to COMMAND */
+       batch_obj = object_list[args->buffer_count-1];
+       batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+       batch_obj->pending_write_domain = 0;
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       for (i = 0; i < args->buffer_count; i++) {
+               struct drm_gem_object *obj = object_list[i];
+               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+               if (obj_priv->gtt_space == NULL) {
+                       /* We evicted the buffer in the process of validating
+                        * our set of buffers in.  We could try to recover by
+                        * kicking them everything out and trying again from
+                        * the start.
+                        */
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               /* make sure all previous memory operations have passed */
+               ret = i915_gem_object_set_domain(obj,
+                                                obj->pending_read_domains,
+                                                obj->pending_write_domain);
+               if (ret)
+                       goto err;
+       }
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       /* Flush/invalidate caches and chipset buffer */
+       flush_domains = i915_gem_dev_set_domain(dev);
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+#if WATCH_COHERENCY
+       for (i = 0; i < args->buffer_count; i++) {
+               i915_gem_object_check_coherency(object_list[i],
+                                               exec_list[i].handle);
+       }
+#endif
+
+       exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+       i915_gem_dump_object(object_list[args->buffer_count - 1],
+                             args->batch_len,
+                             __func__,
+                             ~0);
+#endif
+
+       /* Exec the batchbuffer */
+       ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+       if (ret) {
+               DRM_ERROR("dispatch failed %d\n", ret);
+               goto err;
+       }
+
+       /*
+        * Ensure that the commands in the batch buffer are
+        * finished before the interrupt fires
+        */
+       flush_domains |= i915_retire_commands(dev);
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       /*
+        * Get a seqno representing the execution of the current buffer,
+        * which we can wait on.  We would like to mitigate these interrupts,
+        * likely by only creating seqnos occasionally (so that we have
+        * *some* interrupts representing completion of buffers that we can
+        * wait on when trying to clear up gtt space).
+        */
+       seqno = i915_add_request(dev, flush_domains);
+       BUG_ON(seqno == 0);
+       i915_file_priv->mm.last_gem_seqno = seqno;
+       for (i = 0; i < args->buffer_count; i++) {
+               struct drm_gem_object *obj = object_list[i];
+               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+               i915_gem_object_move_to_active(obj);
+               obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+               DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+       }
+#if WATCH_LRU
+       i915_dump_lru(dev, __func__);
+#endif
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       /* Copy the new buffer offsets back to the user's exec list. */
+       ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+                          (uintptr_t) args->buffers_ptr,
+                          exec_list,
+                          sizeof(*exec_list) * args->buffer_count);
+       if (ret)
+               DRM_ERROR("failed to copy %d exec entries "
+                         "back to user (%d)\n",
+                          args->buffer_count, ret);
+err:
+       if (object_list != NULL) {
+               for (i = 0; i < pinned; i++)
+                       i915_gem_object_unpin(object_list[i]);
+
+               for (i = 0; i < args->buffer_count; i++)
+                       drm_gem_object_unreference(object_list[i]);
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+       drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+                DRM_MEM_DRIVER);
+       drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+                DRM_MEM_DRIVER);
+
+       return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int ret;
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+       if (obj_priv->gtt_space == NULL) {
+               ret = i915_gem_object_bind_to_gtt(obj, alignment);
+               if (ret != 0) {
+                       DRM_ERROR("Failure to bind: %d", ret);
+                       return ret;
+               }
+       }
+       obj_priv->pin_count++;
+
+       /* If the object is not active and not pending a flush,
+        * remove it from the inactive list
+        */
+       if (obj_priv->pin_count == 1) {
+               atomic_inc(&dev->pin_count);
+               atomic_add(obj->size, &dev->pin_memory);
+               if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 &&
+                   !list_empty(&obj_priv->list))
+                       list_del_init(&obj_priv->list);
+       }
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+
+       return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+       obj_priv->pin_count--;
+       BUG_ON(obj_priv->pin_count < 0);
+       BUG_ON(obj_priv->gtt_space == NULL);
+
+       /* If the object is no longer pinned, and is
+        * neither active nor being flushed, then stick it on
+        * the inactive list
+        */
+       if (obj_priv->pin_count == 0) {
+               if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0)
+                       list_move_tail(&obj_priv->list,
+                                      &dev_priv->mm.inactive_list);
+               atomic_dec(&dev->pin_count);
+               atomic_sub(obj->size, &dev->pin_memory);
+       }
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       struct drm_i915_gem_pin *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+                         args->handle);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       obj_priv = obj->driver_private;
+
+       ret = i915_gem_object_pin(obj, args->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       /** XXX - flush the CPU caches for pinned objects
+        * as the X server doesn't manage domains yet
+        */
+       if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+               i915_gem_clflush_object(obj);
+               drm_agp_chipset_flush(dev);
+               obj->write_domain = 0;
+       }
+       args->offset = obj_priv->gtt_offset;
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_i915_gem_pin *args = data;
+       struct drm_gem_object *obj;
+
+       mutex_lock(&dev->struct_mutex);
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+                         args->handle);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       i915_gem_object_unpin(obj);
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+{
+       struct drm_i915_gem_busy *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       mutex_lock(&dev->struct_mutex);
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+                         args->handle);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       obj_priv = obj->driver_private;
+       args->busy = obj_priv->active;
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+    return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv;
+
+       obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+       if (obj_priv == NULL)
+               return -ENOMEM;
+
+       /*
+        * We've just allocated pages from the kernel,
+        * so they've just been written by the CPU with
+        * zeros. They'll need to be clflushed before we
+        * use them with the GPU.
+        */
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+       obj->driver_private = obj_priv;
+       obj_priv->obj = obj;
+       INIT_LIST_HEAD(&obj_priv->list);
+       return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       while (obj_priv->pin_count > 0)
+               i915_gem_object_unpin(obj);
+
+       i915_gem_object_unbind(obj);
+
+       drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+                   struct drm_file *file_priv,
+                   uint32_t read_domains,
+                   uint32_t write_domain)
+{
+       struct drm_device *dev = obj->dev;
+       int ret;
+       uint32_t flush_domains;
+
+       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+       if (ret)
+               return ret;
+       flush_domains = i915_gem_dev_set_domain(obj->dev);
+       
+       if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+               (void) i915_add_request(dev, flush_domains);
+
+       return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       while (!list_empty(head)) {
+               obj_priv = list_first_entry(head,
+                                           struct drm_i915_gem_object,
+                                           list);
+               obj = obj_priv->obj;
+
+               if (obj_priv->pin_count != 0) {
+                       DRM_ERROR("Pinned object in unbind list\n");
+                       mutex_unlock(&dev->struct_mutex);
+                       return -EINVAL;
+               }
+
+               ret = i915_gem_object_unbind(obj);
+               if (ret != 0) {
+                       DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+                                 ret);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
+       }
+
+
+       return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t seqno, cur_seqno, last_seqno;
+       int stuck;
+
+       if (dev_priv->mm.suspended)
+               return 0;
+
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        */
+       dev_priv->mm.suspended = 1;
+
+       i915_kernel_lost_context(dev);
+
+       /* Flush the GPU along with all non-CPU write domains
+        */
+       i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+                      ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+       seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+       if (seqno == 0) {
+               mutex_unlock(&dev->struct_mutex);
+               return -ENOMEM;
+       }
+
+       dev_priv->mm.waiting_gem_seqno = seqno;
+       last_seqno = 0;
+       stuck = 0;
+       for (;;) {
+               cur_seqno = i915_get_gem_seqno(dev);
+               if (i915_seqno_passed(cur_seqno, seqno))
+                       break;
+               if (last_seqno == cur_seqno) {
+                       if (stuck++ > 100) {
+                               DRM_ERROR("hardware wedged\n");
+                               dev_priv->mm.wedged = 1;
+                               DRM_WAKEUP(&dev_priv->irq_queue);
+                               break;
+                       }
+               }
+               msleep(10);
+               last_seqno = cur_seqno;
+       }
+       dev_priv->mm.waiting_gem_seqno = 0;
+
+       i915_gem_retire_requests(dev);
+
+       /* Active and flushing should now be empty as we've
+        * waited for a sequence higher than any pending execbuffer
+        */
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+       /* Request should now be empty as we've also waited
+        * for the last request in the list
+        */
+       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+       /* Move all buffers out of the GTT. */
+       i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+       BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+       return 0;
+}
+
+int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       obj = drm_gem_object_alloc(dev, 128 * 1024);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate ringbuffer\n");
+               return -ENOMEM;
+       }
+       obj_priv = obj->driver_private;
+
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       /* Set up the kernel mapping for the ring. */
+       dev_priv->ring.Size = obj->size;
+       dev_priv->ring.tail_mask = obj->size - 1;
+
+       dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+       dev_priv->ring.map.size = obj->size;
+       dev_priv->ring.map.type = 0;
+       dev_priv->ring.map.flags = 0;
+       dev_priv->ring.map.mtrr = 0;
+
+       drm_core_ioremap(&dev_priv->ring.map, dev);
+       if (dev_priv->ring.map.handle == NULL) {
+               DRM_ERROR("Failed to map ringbuffer.\n");
+               memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       dev_priv->ring.ring_obj = obj;
+       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(PRB0_CTL, 0);
+        I915_WRITE(PRB0_HEAD, 0);
+        I915_WRITE(PRB0_TAIL, 0);
+       I915_WRITE(PRB0_START, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+       I915_WRITE(PRB0_CTL, (((obj->size - 4096) & RING_NR_PAGES) |
+                             RING_NO_REPORT |
+                             RING_VALID));
+
+       /* Update our cache of the ring state */
+       i915_kernel_lost_context(dev);
+
+       return 0;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->ring.ring_obj == NULL)
+               return;
+
+       drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+       i915_gem_object_unpin(dev_priv->ring.ring_obj);
+       drm_gem_object_unreference(dev_priv->ring.ring_obj);
+
+       memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (dev_priv->mm.wedged) {
+               DRM_ERROR("Renabling wedged hardware, good luck\n");
+               dev_priv->mm.wedged = 0;
+       }
+
+       ret = i915_gem_init_ringbuffer(dev);
+       if (ret != 0)
+               return ret;
+
+       mutex_lock(&dev->struct_mutex);
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+       BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+       dev_priv->mm.suspended = 0;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_idle(dev);
+       if (ret == 0)
+               i915_gem_cleanup_ringbuffer(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+                               int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Active:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n",
+                                      obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+                                 int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Flushing:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+                                 int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Inactive:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+                                int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *gem_request;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Request:\n");
+       list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+                           list)
+       {
+               DRM_PROC_PRINT ("    %d @ %d %08x\n",
+                               gem_request->seqno,
+                               (int) (jiffies - gem_request->emitted_jiffies),
+                               gem_request->flush_domains);
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+       DRM_PROC_PRINT("Waiter sequence:  %d\n", dev_priv->mm.waiting_gem_seqno);
+       DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data; 
+       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int len = 0;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Interrupt enable:    %08x\n",
+                      I915_READ(IER));
+       DRM_PROC_PRINT("Interrupt identity:  %08x\n",
+                      I915_READ(IIR));
+       DRM_PROC_PRINT("Interrupt mask:      %08x\n",
+                      I915_READ(IMR));
+       DRM_PROC_PRINT("Pipe A stat:         %08x\n",
+                      I915_READ(PIPEASTAT));
+       DRM_PROC_PRINT("Pipe B stat:         %08x\n",
+                      I915_READ(PIPEBSTAT));
+       DRM_PROC_PRINT("Interrupts received: %d\n",
+                      atomic_read(&dev_priv->irq_received));
+       DRM_PROC_PRINT("Current sequence:    %d\n",
+                      i915_get_gem_seqno(dev));
+       DRM_PROC_PRINT("Waiter sequence:     %d\n",
+                      dev_priv->mm.waiting_gem_seqno);
+       DRM_PROC_PRINT("IRQ sequence:        %d\n",
+                      dev_priv->mm.irq_gem_seqno);
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+static struct drm_proc_list {
+       const char *name;       /**< file name */
+       int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
+} i915_gem_proc_list[] = {
+       {"i915_gem_active", i915_gem_active_info},
+       {"i915_gem_flushing", i915_gem_flushing_info},
+       {"i915_gem_inactive", i915_gem_inactive_info},
+       {"i915_gem_request", i915_gem_request_info},
+       {"i915_gem_seqno", i915_gem_seqno_info},
+       {"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+       struct proc_dir_entry *ent;
+       int i, j;
+
+       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+               ent = create_proc_entry(i915_gem_proc_list[i].name,
+                                       S_IFREG | S_IRUGO, minor->dev_root);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+                                 i915_gem_proc_list[i].name);
+                       for (j = 0; j < i; j++)
+                               remove_proc_entry(i915_gem_proc_list[i].name,
+                                                 minor->dev_root);
+                       return -1;
+               }
+               ent->read_proc = i915_gem_proc_list[i].f;
+               ent->data = minor;
+       }
+       return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+       int i;
+
+       if (!minor->dev_root)
+               return;
+
+       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+               remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+       int ret;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (dev_priv->ring.ring_obj != NULL) {
+               ret = i915_gem_idle(dev);
+               if (ret)
+                       DRM_ERROR("failed to idle hardware: %d\n", ret);
+       
+               i915_gem_cleanup_ringbuffer(dev);
+       }
+       
+       mutex_unlock(&dev->struct_mutex);
+}
index 8e1b833..2505d98 100644 (file)
@@ -136,8 +136,8 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
  *
  * Not for i915G/i915GM
  *
- * \return TRUE if CRT is connected.
- * \return FALSE if CRT is disconnected.
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
  */
 static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 {
index b8077bd..0236bbc 100644 (file)
@@ -369,6 +369,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
        struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj_priv;
        int pipe = intel_crtc->pipe;
        unsigned long Start, Offset;
        int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
@@ -385,7 +386,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
 
        intel_fb = to_intel_framebuffer(crtc->fb);
 
-       Start = intel_fb->bo->offset;
+       obj_priv = intel_fb->obj->driver_private;
+
+       Start = obj_priv->gtt_offset;
        Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
 
        I915_WRITE(dspstride, crtc->fb->pitch);
@@ -507,7 +510,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
                intel_crtc_load_lut(crtc);
                
                /* Give the overlay scaler a chance to enable if it's on this pipe */
-               //intel_crtc_dpms_video(crtc, TRUE); TODO
+               //intel_crtc_dpms_video(crtc, true); TODO
        break;
        case DRM_MODE_DPMS_OFF:
                /* Give the overlay scaler a chance to disable if it's on this pipe */
@@ -734,19 +737,19 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
 
                switch (intel_output->type) {
                case INTEL_OUTPUT_LVDS:
-                       is_lvds = TRUE;
+                       is_lvds = true;
                        break;
                case INTEL_OUTPUT_SDVO:
-                       is_sdvo = TRUE;
+                       is_sdvo = true;
                        break;
                case INTEL_OUTPUT_DVO:
-                       is_dvo = TRUE;
+                       is_dvo = true;
                        break;
                case INTEL_OUTPUT_TVOUT:
-                       is_tv = TRUE;
+                       is_tv = true;
                        break;
                case INTEL_OUTPUT_ANALOG:
-                       is_crt = TRUE;
+                       is_crt = true;
                        break;
                }
        }
@@ -1175,7 +1178,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
        }
 
        encoder->crtc = crtc;
-       intel_output->load_detect_temp = TRUE;
+       intel_output->load_detect_temp = true;
     
        intel_crtc = to_intel_crtc(crtc);
        *dpms_mode = intel_crtc->dpms_mode;
@@ -1210,7 +1213,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
     
        if (intel_output->load_detect_temp) {
                encoder->crtc = NULL;
-               intel_output->load_detect_temp = FALSE;
+               intel_output->load_detect_temp = false;
                crtc->enabled = drm_helper_crtc_in_use(crtc);
                drm_helper_disable_unused_functions(dev);
        }
@@ -1494,7 +1497,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 };
 
 struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev,
-                                                     struct drm_file *file_priv,
+                                                     struct drm_file *filp,
                                                      struct drm_mode_fb_cmd *mode_cmd)
 {
        struct intel_framebuffer *intel_fb;
@@ -1506,15 +1509,15 @@ struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev,
        drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 
-       if (file_priv) {
-               mutex_lock(&dev->struct_mutex);
-               intel_fb->bo = drm_lookup_buffer_object(file_priv, intel_fb->base.mm_handle, 0);
-               mutex_unlock(&dev->struct_mutex);
-               if (!intel_fb->bo) {
+       if (filp) {
+               intel_fb->obj = drm_gem_object_lookup(dev, filp,
+                                                     mode_cmd->handle);
+               if (!intel_fb->obj) {
                        kfree(intel_fb);
                        return NULL;
                }
        }
+       drm_gem_object_unreference(intel_fb->obj);
        return &intel_fb->base;
 }
 
@@ -1522,22 +1525,25 @@ static int intel_insert_new_fb(struct drm_device *dev, struct drm_file *file_pri
                                struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd)
 {
        struct intel_framebuffer *intel_fb;
-       struct drm_buffer_object *bo;
+       struct drm_gem_object *obj;
        struct drm_crtc *crtc;
 
        intel_fb = to_intel_framebuffer(fb);
 
        mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, mode_cmd->handle, 0);
-       mutex_unlock(&dev->struct_mutex);
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
        
-       if (!bo)
+       if (!obj) {
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
+       }
        drm_helper_mode_fill_fb_struct(fb, mode_cmd);
-       
-       drm_bo_usage_deref_unlocked(&intel_fb->bo);
 
-       intel_fb->bo = bo;
+       drm_gem_object_unreference(intel_fb->obj);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       intel_fb->obj = obj;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (crtc->fb == fb) {
index 1008e27..bffbeef 100644 (file)
@@ -50,8 +50,7 @@ struct intel_i2c_chan {
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
-       struct drm_buffer_object *bo;
-       struct drm_bo_kmap_obj kmap;
+       struct drm_gem_object *obj;
 };
 
 
index 5637ea2..bc056bc 100644 (file)
@@ -525,6 +525,92 @@ static int intelfb_pan_display(struct fb_var_screeninfo *var,
        return ret;
 }
 
+static void intelfb_on(struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               crtc_funcs->dpms(crtc, DPMSModeOn);
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, DPMSModeOn);
+                       }
+               }
+       }
+}
+
+static void intelfb_off(struct fb_info *info, int dpms_mode)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, dpms_mode);
+                       }
+               }
+               if (dpms_mode == DPMSModeOff)
+                       crtc_funcs->dpms(crtc, dpms_mode);
+       }
+}
+
+int intelfb_blank(int blank, struct fb_info *info)
+{
+       switch (blank) {
+       case FB_BLANK_UNBLANK:
+               intelfb_on(info);
+               break;
+       case FB_BLANK_NORMAL:
+               intelfb_off(info, DPMSModeStandby);
+               break;
+       case FB_BLANK_HSYNC_SUSPEND:
+               intelfb_off(info, DPMSModeStandby);
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               intelfb_off(info, DPMSModeSuspend);
+               break;
+       case FB_BLANK_POWERDOWN:
+               intelfb_off(info, DPMSModeOff);
+               break;
+       }
+       return 0;
+}
+
 static struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        //.fb_open = intelfb_open,
@@ -539,6 +625,7 @@ static struct fb_ops intelfb_ops = {
        .fb_copyarea = cfb_copyarea, //intelfb_copyarea,
        .fb_imageblit = cfb_imageblit, //intelfb_imageblit,
        .fb_pan_display = intelfb_pan_display,
+       .fb_blank = intelfb_blank,
 };
 
 /**
@@ -606,9 +693,10 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_mode_fb_cmd mode_cmd;
-       struct drm_buffer_object *fbo = NULL;
+       struct drm_gem_object *fbo = NULL;
+       struct drm_i915_gem_object *obj_priv;
        struct device *device = &dev->pdev->dev; 
-       int ret;
+       int size, aligned_size, ret;
 
        mode_cmd.width = surface_width;/* crtc->desired_mode->hdisplay; */
        mode_cmd.height = surface_height;/* crtc->desired_mode->vdisplay; */
@@ -617,26 +705,28 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height
        mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
        mode_cmd.depth = 24;
 
-       ret = drm_buffer_object_create(dev, mode_cmd.pitch * mode_cmd.height, 
-                                       drm_bo_type_kernel,
-                                       DRM_BO_FLAG_READ |
-                                       DRM_BO_FLAG_WRITE |
-                                       DRM_BO_FLAG_MEM_TT |
-                                       DRM_BO_FLAG_MEM_VRAM |
-                                       DRM_BO_FLAG_NO_EVICT,
-                                       DRM_BO_HINT_DONT_FENCE, 0, 0,
-                                       &fbo);
-       if (ret || !fbo) {
+       size = mode_cmd.pitch * mode_cmd.height;
+       aligned_size = ALIGN(size, PAGE_SIZE);
+       fbo = drm_gem_object_alloc(dev, aligned_size);
+       if (!fbo) {
                printk(KERN_ERR "failed to allocate framebuffer\n");
-               return -EINVAL;
+               ret = -ENOMEM;
+               goto out;
+       }
+       obj_priv = fbo->driver_private;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+       if (ret) {
+               DRM_ERROR("failed to pin fb: %d\n", ret);
+               goto out_unref;
        }
-       
 
        fb = intel_user_framebuffer_create(dev, NULL, &mode_cmd);
        if (!fb) {
-               drm_bo_usage_deref_unlocked(&fbo);
                DRM_ERROR("failed to allocate fb.\n");
-               return -EINVAL;
+               ret = -ENOMEM;
+               goto out_unref;
        }
 
        list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
@@ -644,11 +734,13 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height
        intel_fb = to_intel_framebuffer(fb);
        *intel_fb_p = intel_fb;
 
-       intel_fb->bo = fbo;
+       intel_fb->obj = fbo;
 
        info = framebuffer_alloc(sizeof(struct intelfb_par), device);
-       if (!info)
-               return -EINVAL;
+       if (!info) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
 
        par = info->par;
 
@@ -667,19 +759,20 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height
        info->fbops = &intelfb_ops;
 
        info->fix.line_length = fb->pitch;
-       info->fix.smem_start = intel_fb->bo->offset + dev->mode_config.fb_base;
-       info->fix.smem_len = info->fix.line_length * fb->height;
+       info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+       info->fix.smem_len = size;
 
        info->flags = FBINFO_DEFAULT;
 
-       ret = drm_bo_kmap(intel_fb->bo, 0, intel_fb->bo->num_pages, &intel_fb->kmap);
-       if (ret)
-               DRM_ERROR("error mapping fb: %d\n", ret);
-
-       info->screen_base = intel_fb->kmap.virtual;
-       info->screen_size = info->fix.smem_len; /* FIXME */
+       info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
+                                      size);
+       if (!info->screen_base) {
+               ret = -ENOSPC;
+               goto out_unref;
+       }
+       info->screen_size = size;
 
-       memset(intel_fb->kmap.virtual, 0, info->screen_size);
+       memset(info->screen_base, 0, size);
 
        info->pseudo_palette = fb->pseudo_palette;
        info->var.xres_virtual = fb->width;
@@ -770,10 +863,17 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height
        par->dev = dev;
 
        /* To allow resizeing without swapping buffers */
-       printk("allocated %dx%d fb: 0x%08lx, bo %p\n", intel_fb->base.width,
-              intel_fb->base.height, intel_fb->bo->offset, fbo);
+       printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
+              intel_fb->base.height, obj_priv->gtt_offset, fbo);
 
+       mutex_unlock(&dev->struct_mutex);
        return 0;
+
+out_unref:
+       drm_gem_object_unreference(fbo);
+       mutex_unlock(&dev->struct_mutex);
+out:
+       return ret;
 }
 
 static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
@@ -1029,8 +1129,10 @@ int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
        
        if (info) {
                unregister_framebuffer(info);
-               drm_bo_kunmap(&intel_fb->kmap);
-               drm_bo_usage_deref_unlocked(&intel_fb->bo);
+               iounmap(info->screen_base);
+               mutex_lock(&dev->struct_mutex);
+               drm_gem_object_unreference(intel_fb->obj);
+               mutex_unlock(&dev->struct_mutex);
                framebuffer_release(info);
        }
 
index 06b9986..fa8209c 100644 (file)
@@ -407,8 +407,8 @@ void intel_lvds_init(struct drm_device *dev)
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-       connector->interlace_allowed = FALSE;
-       connector->doublescan_allowed = FALSE;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
 
 
        /*
index 29cfc03..3efa9d2 100644 (file)
@@ -422,18 +422,18 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 124,
                .hblank_start   = 836,              .htotal             = 857,
 
-               .progressive    = FALSE,            .trilevel_sync = FALSE,
+               .progressive    = false,            .trilevel_sync = false,
 
                .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
                .vsync_len      = 6,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2   = 1,                .veq_len            = 18,
 
                .vi_end_f1      = 20,               .vi_end_f2          = 21,
                .nbr_end        = 240,
 
-               .burst_ena      = TRUE,
+               .burst_ena      = true,
                .hburst_start   = 72,               .hburst_len         = 34,
                .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
                .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
@@ -445,7 +445,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =   7624,           .dda2_size          =  20013,
                .dda3_inc       =      0,           .dda3_size          =      0,
                .sc_reset       = TV_SC_RESET_EVERY_4,
-               .pal_burst      = FALSE,
+               .pal_burst      = false,
 
                .composite_levels = &ntsc_m_levels_composite,
                .composite_color = &ntsc_m_csc_composite,
@@ -464,12 +464,12 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 124,
                .hblank_start   = 836,              .htotal             = 857,
 
-               .progressive    = FALSE,            .trilevel_sync = FALSE,
+               .progressive    = false,            .trilevel_sync = false,
 
                .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
                .vsync_len      = 6,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2   = 1,                .veq_len            = 18,
 
                .vi_end_f1      = 20,               .vi_end_f2          = 21,
@@ -487,7 +487,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =  18557,       .dda2_size      =  20625,
                .dda3_inc       =      0,       .dda3_size      =      0,
                .sc_reset   = TV_SC_RESET_EVERY_8,
-               .pal_burst  = TRUE,
+               .pal_burst  = true,
 
                .composite_levels = &ntsc_m_levels_composite,
                .composite_color = &ntsc_m_csc_composite,
@@ -507,18 +507,18 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 124,
                .hblank_start = 836,        .htotal             = 857,
 
-               .progressive    = FALSE,    .trilevel_sync = FALSE,
+               .progressive    = false,    .trilevel_sync = false,
 
                .vsync_start_f1 = 6,        .vsync_start_f2     = 7,
                .vsync_len      = 6,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2 = 1,          .veq_len            = 18,
 
                .vi_end_f1      = 20,               .vi_end_f2          = 21,
                .nbr_end        = 240,
 
-               .burst_ena      = TRUE,
+               .burst_ena      = true,
                .hburst_start   = 72,               .hburst_len         = 34,
                .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
                .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
@@ -530,7 +530,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =   7624,           .dda2_size          =  20013,
                .dda3_inc       =      0,           .dda3_size          =      0,
                .sc_reset       = TV_SC_RESET_EVERY_4,
-               .pal_burst      = FALSE,
+               .pal_burst      = false,
 
                .composite_levels = &ntsc_j_levels_composite,
                .composite_color = &ntsc_j_csc_composite,
@@ -550,18 +550,18 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,             .hblank_end           = 124,
                .hblank_start = 836,      .htotal               = 857,
 
-               .progressive    = FALSE,            .trilevel_sync = FALSE,
+               .progressive    = false,            .trilevel_sync = false,
 
                .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
                .vsync_len      = 6,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2   = 1,                .veq_len            = 18,
 
                .vi_end_f1      = 20,               .vi_end_f2          = 21,
                .nbr_end        = 240,
 
-               .burst_ena      = TRUE,
+               .burst_ena      = true,
                .hburst_start   = 72,               .hburst_len         = 34,
                .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
                .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
@@ -573,7 +573,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =    7624,          .dda2_size          =  20013,
                .dda3_inc       =      0,           .dda3_size          =      0,
                .sc_reset       = TV_SC_RESET_EVERY_4,
-               .pal_burst  = FALSE,
+               .pal_burst  = false,
 
                .composite_levels = &pal_m_levels_composite,
                .composite_color = &pal_m_csc_composite,
@@ -593,19 +593,19 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 128,
                .hblank_start = 844,        .htotal             = 863,
 
-               .progressive  = FALSE,    .trilevel_sync = FALSE,
+               .progressive  = false,    .trilevel_sync = false,
 
 
                .vsync_start_f1 = 6,       .vsync_start_f2      = 7,
                .vsync_len      = 6,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2   = 1,                .veq_len            = 18,
 
                .vi_end_f1      = 24,               .vi_end_f2          = 25,
                .nbr_end        = 286,
 
-               .burst_ena      = TRUE,
+               .burst_ena      = true,
                .hburst_start = 73,                 .hburst_len         = 34,
                .vburst_start_f1 = 8,       .vburst_end_f1      = 285,
                .vburst_start_f2 = 8,       .vburst_end_f2      = 286,
@@ -618,7 +618,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =  18557,       .dda2_size      =  20625,
                .dda3_inc       =      0,       .dda3_size      =      0,
                .sc_reset   = TV_SC_RESET_EVERY_8,
-               .pal_burst  = TRUE,
+               .pal_burst  = true,
 
                .composite_levels = &pal_n_levels_composite,
                .composite_color = &pal_n_csc_composite,
@@ -638,18 +638,18 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 128,
                .hblank_start   = 844,      .htotal             = 863,
 
-               .progressive    = FALSE,    .trilevel_sync = FALSE,
+               .progressive    = false,    .trilevel_sync = false,
 
                .vsync_start_f1 = 5,        .vsync_start_f2     = 6,
                .vsync_len      = 5,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 0,
+               .veq_ena        = true,             .veq_start_f1       = 0,
                .veq_start_f2   = 1,        .veq_len            = 15,
 
                .vi_end_f1      = 24,               .vi_end_f2          = 25,
                .nbr_end        = 286,
 
-               .burst_ena      = TRUE,
+               .burst_ena      = true,
                .hburst_start   = 73,               .hburst_len         = 32,
                .vburst_start_f1 = 8,               .vburst_end_f1      = 285,
                .vburst_start_f2 = 8,               .vburst_end_f2      = 286,
@@ -661,7 +661,7 @@ const static struct tv_mode tv_modes[] = {
                .dda2_inc       =  18557,       .dda2_size      =  20625,
                .dda3_inc       =      0,       .dda3_size      =      0,
                .sc_reset   = TV_SC_RESET_EVERY_8,
-               .pal_burst  = TRUE,
+               .pal_burst  = true,
 
                .composite_levels = &pal_levels_composite,
                .composite_color = &pal_csc_composite,
@@ -680,17 +680,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 122,
                .hblank_start   = 842,              .htotal             = 857,
 
-               .progressive    = TRUE,.trilevel_sync = FALSE,
+               .progressive    = true,.trilevel_sync = false,
 
                .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
                .vsync_len      = 12,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 44,               .vi_end_f2          = 44,
                .nbr_end        = 496,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -704,17 +704,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 122,
                .hblank_start   = 842,              .htotal             = 856,
 
-               .progressive    = TRUE,.trilevel_sync = FALSE,
+               .progressive    = true,.trilevel_sync = false,
 
                .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
                .vsync_len      = 12,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 44,               .vi_end_f2          = 44,
                .nbr_end        = 496,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -728,17 +728,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 64,               .hblank_end         = 139,
                .hblank_start   = 859,              .htotal             = 863,
 
-               .progressive    = TRUE,         .trilevel_sync = FALSE,
+               .progressive    = true,         .trilevel_sync = false,
 
                .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
                .vsync_len      = 10,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 48,               .vi_end_f2          = 48,
                .nbr_end        = 575,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -752,17 +752,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 80,               .hblank_end         = 300,
                .hblank_start   = 1580,             .htotal             = 1649,
 
-               .progressive    = TRUE,             .trilevel_sync = TRUE,
+               .progressive    = true,             .trilevel_sync = true,
 
                .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
                .vsync_len      = 10,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 29,               .vi_end_f2          = 29,
                .nbr_end        = 719,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -776,17 +776,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 80,               .hblank_end         = 300,
                .hblank_start   = 1580,             .htotal             = 1651,
 
-               .progressive    = TRUE,             .trilevel_sync = TRUE,
+               .progressive    = true,             .trilevel_sync = true,
 
                .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
                .vsync_len      = 10,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 29,               .vi_end_f2          = 29,
                .nbr_end        = 719,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -800,17 +800,17 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 80,               .hblank_end         = 300,
                .hblank_start   = 1580,             .htotal             = 1979,
 
-               .progressive    = TRUE,                 .trilevel_sync = TRUE,
+               .progressive    = true,                 .trilevel_sync = true,
 
                .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
                .vsync_len      = 10,
 
-               .veq_ena        = FALSE,
+               .veq_ena        = false,
 
                .vi_end_f1      = 29,               .vi_end_f2          = 29,
                .nbr_end        = 719,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
                .max_srcw = 800
@@ -825,19 +825,19 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 88,               .hblank_end         = 235,
                .hblank_start   = 2155,             .htotal             = 2639,
 
-               .progressive    = FALSE,            .trilevel_sync = TRUE,
+               .progressive    = false,            .trilevel_sync = true,
 
                .vsync_start_f1 = 4,              .vsync_start_f2     = 5,
                .vsync_len      = 10,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 4,
+               .veq_ena        = true,             .veq_start_f1       = 4,
                .veq_start_f2   = 4,        .veq_len            = 10,
 
 
                .vi_end_f1      = 21,           .vi_end_f2          = 22,
                .nbr_end        = 539,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -851,19 +851,19 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 88,               .hblank_end         = 235,
                .hblank_start   = 2155,             .htotal             = 2199,
 
-               .progressive    = FALSE,            .trilevel_sync = TRUE,
+               .progressive    = false,            .trilevel_sync = true,
 
                .vsync_start_f1 = 4,               .vsync_start_f2     = 5,
                .vsync_len      = 10,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 4,
+               .veq_ena        = true,             .veq_start_f1       = 4,
                .veq_start_f2   = 4,                .veq_len            = 10,
 
 
                .vi_end_f1      = 21,               .vi_end_f2          = 22,
                .nbr_end        = 539,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -877,19 +877,19 @@ const static struct tv_mode tv_modes[] = {
                .hsync_end      = 88,               .hblank_end         = 235,
                .hblank_start   = 2155,             .htotal             = 2200,
 
-               .progressive    = FALSE,            .trilevel_sync = TRUE,
+               .progressive    = false,            .trilevel_sync = true,
 
                .vsync_start_f1 = 4,            .vsync_start_f2    = 5,
                .vsync_len      = 10,
 
-               .veq_ena        = TRUE,             .veq_start_f1       = 4,
+               .veq_ena        = true,             .veq_start_f1       = 4,
                .veq_start_f2 = 4,                  .veq_len = 10,
 
 
                .vi_end_f1      = 21,           .vi_end_f2              = 22,
                .nbr_end        = 539,
 
-               .burst_ena      = FALSE,
+               .burst_ena      = false,
 
                .filter_table = filter_table,
        },
@@ -1098,17 +1098,17 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        struct drm_encoder *other_encoder;
 
        if (!tv_mode)
-               return FALSE;
+               return false;
     
        /* FIXME: lock encoder list */
        list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
                if (other_encoder != encoder &&
                    other_encoder->crtc == encoder->crtc)
-                       return FALSE;
+                       return false;
        }
 
        adjusted_mode->clock = tv_mode->clock;
-       return TRUE;
+       return true;
 }
 
 static void
@@ -1152,7 +1152,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                        color_conversion = &sdtv_csc_yprpb;
                else
                        color_conversion = &hdtv_csc_yprpb;
-               burst_ena = FALSE;
+               burst_ena = false;
                break;
        case DRM_MODE_CONNECTOR_SVIDEO:
                tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
@@ -1352,8 +1352,8 @@ static const struct drm_display_mode reported_modes[] = {
  *
  * Requires that the current pipe's DPLL is active.
 
- * \return TRUE if TV is connected.
- * \return FALSE if TV is disconnected.
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
  */
 static int
 intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
@@ -1703,8 +1703,8 @@ intel_tv_init(struct drm_device *dev)
     
        drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
        drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
-       connector->interlace_allowed = FALSE;
-       connector->doublescan_allowed = FALSE;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
 
        /* Create TV properties then attach current values */
        tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES,
index ab3b23a..86347e0 100644 (file)
@@ -229,7 +229,7 @@ out_cleanup:
         if (tmp_mem.mm_node) {
                 mutex_lock(&dev->struct_mutex);
                 if (tmp_mem.mm_node != bo->pinned_node)
-                        drm_mm_put_block(tmp_mem.mm_node);
+                        drm_memrange_put_block(tmp_mem.mm_node);
                 tmp_mem.mm_node = NULL;
                 mutex_unlock(&dev->struct_mutex);
         }
index cc4d5a9..81704ea 100644 (file)
@@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct drm_ttm_backend *be;
        struct drm_scatter_gather sgreq;
-       struct drm_mm_node mm_node;
+       struct drm_memrange_node mm_node;
        struct drm_bo_mem_reg mem;
        int ret;
 
index 64401ae..b31ac2d 100644 (file)
@@ -135,7 +135,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
                DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
 
                if (pCmdInfo->type == BTYPE_3D) {
-                       xgi_emit_flush(info, FALSE);
+                       xgi_emit_flush(info, false);
                }
 
                info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
@@ -214,7 +214,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)
                 * list chain with a flush command.
                 */
                if (info->cmdring.last_ptr != NULL) {
-                       xgi_emit_flush(info, FALSE);
+                       xgi_emit_flush(info, false);
                        xgi_emit_nop(info);
                }
 
@@ -322,5 +322,5 @@ void xgi_emit_irq(struct xgi_info * info)
        if (info->cmdring.last_ptr == NULL)
                return;
 
-       xgi_emit_flush(info, TRUE);
+       xgi_emit_flush(info, true);
 }
index f0225f8..f8ed7de 100644 (file)
@@ -307,8 +307,8 @@ void xgi_driver_lastclose(struct drm_device * dev)
                    || info->pcie_heap_initialized) {
                        drm_sman_cleanup(&info->sman);
 
-                       info->fb_heap_initialized = FALSE;
-                       info->pcie_heap_initialized = FALSE;
+                       info->fb_heap_initialized = false;
+                       info->pcie_heap_initialized = false;
                }
        }
 }
index 2b3a178..2a9632f 100644 (file)
@@ -46,41 +46,41 @@ static bool xgi_validate_signal(struct drm_map * map)
                check = le16_to_cpu(DRM_READ16(map, 0x2360));
 
                if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
-                       return FALSE;
+                       return false;
                }
 
                /* Check RO channel */
                DRM_WRITE8(map, 0x235c, 0x83);
                check = le16_to_cpu(DRM_READ16(map, 0x2360));
                if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
-                       return FALSE;
+                       return false;
                }
 
                /* Check RW channel */
                DRM_WRITE8(map, 0x235c, 0x88);
                check = le16_to_cpu(DRM_READ16(map, 0x2360));
                if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
-                       return FALSE;
+                       return false;
                }
 
                /* Check RO channel outstanding */
                DRM_WRITE8(map, 0x235c, 0x8f);
                check = le16_to_cpu(DRM_READ16(map, 0x2360));
                if (0 != (check & 0x3ff)) {
-                       return FALSE;
+                       return false;
                }
 
                /* Check RW channel outstanding */
                DRM_WRITE8(map, 0x235c, 0x90);
                check = le16_to_cpu(DRM_READ16(map, 0x2360));
                if (0 != (check & 0x3ff)) {
-                       return FALSE;
+                       return false;
                }
 
                /* No pending PCIE request. GE stall. */
        }
 
-       return TRUE;
+       return true;
 }
 
 
@@ -138,7 +138,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
 bool xgi_ge_irq_handler(struct xgi_info * info)
 {
        const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
-       bool is_support_auto_reset = FALSE;
+       bool is_support_auto_reset = false;
 
        /* Check GE on/off */
        if (0 == (0xffffc0f0 & int_status)) {
@@ -179,15 +179,15 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
                                    cpu_to_le32((int_status & ~0x01) | 0x04000000));
                }
 
-               return TRUE;
+               return true;
        }
 
-       return FALSE;
+       return false;
 }
 
 bool xgi_crt_irq_handler(struct xgi_info * info)
 {
-       bool ret = FALSE;
+       bool ret = false;
        u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
 
        /* CRT1 interrupt just happened
@@ -205,7 +205,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
                op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
                OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
                OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
-               ret = TRUE;
+               ret = true;
        }
        DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
 
@@ -214,7 +214,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
 
 bool xgi_dvi_irq_handler(struct xgi_info * info)
 {
-       bool ret = FALSE;
+       bool ret = false;
        const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
 
        /* DVI interrupt just happened
@@ -242,7 +242,7 @@ bool xgi_dvi_irq_handler(struct xgi_info * info)
                OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
                OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
 
-               ret = TRUE;
+               ret = true;
        }
        DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
 
index 4119064..900bffc 100644 (file)
@@ -993,6 +993,30 @@ struct drm_mm_info_arg {
        uint64_t p_size;
 };
 
+struct drm_gem_close {
+       /** Handle of the object to be closed. */
+       uint32_t handle;
+       uint32_t pad;
+};
+
+struct drm_gem_flink {
+       /** Handle for the object being named */
+       uint32_t handle;
+
+       /** Returned global name */
+       uint32_t name;
+};
+
+struct drm_gem_open {
+       /** Name of object being opened */
+       uint32_t name;
+
+       /** Returned handle for the object */
+       uint32_t handle;
+       
+       /** Returned size of the object */
+       uint64_t size;
+};
 
 /*
  * Drm mode setting
@@ -1257,7 +1281,11 @@ struct drm_mode_crtc_lut {
 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
 #define DRM_IOCTL_SET_VERSION          DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08,  struct drm_modeset_ctl)
+
+#define DRM_IOCTL_GEM_CLOSE            DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK            DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN             DRM_IOWR(0x0b, struct drm_gem_open)
 
 #define DRM_IOCTL_SET_UNIQUE           DRM_IOW( 0x10, struct drm_unique)
 #define DRM_IOCTL_AUTH_MAGIC           DRM_IOW( 0x11, struct drm_auth)
index db857fb..09c5367 100644 (file)
@@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
        u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+       u32 last_acthd = I915_READ(acthd_reg);
+       u32 acthd;
        int i;
 
        for (i = 0; i < 10000; i++) {
                ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+               acthd = I915_READ(acthd_reg);
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
                        ring->space += ring->Size;
@@ -54,13 +58,41 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
                if (ring->head != last_head)
                        i = 0;
 
+               if (acthd != last_acthd)
+                       i = 0;
+
                last_head = ring->head;
-               DRM_UDELAY(1);
+               last_acthd = acthd;
+               msleep_interruptible (10);
        }
 
        return -EBUSY;
 }
 
+#if I915_RING_VALIDATE
+/**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+       u32     tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+       u32     head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+       if (tail != ring->tail) {
+               DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+                         func, line,
+                         ring->head, head, ring->tail, tail);
+               BUG_ON(1);
+       }
+}
+#endif
+
 void i915_kernel_lost_context(struct drm_device * dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -89,7 +121,7 @@ int i915_dma_cleanup(struct drm_device * dev)
         * may not have been called from userspace and after dev_private
         * is freed, it's too late.
         */
-       if (dev->irq)
+       if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
         if (dev_priv->ring.virtual_start) {
@@ -107,17 +139,16 @@ int i915_dma_cleanup(struct drm_device * dev)
                 I915_WRITE(0x02080, 0x1ffff000);
         }
 
-        if (dev_priv->status_gfx_addr) {
-                dev_priv->status_gfx_addr = 0;
+        if (dev_priv->hws_agpoffset) {
+                dev_priv->hws_agpoffset = 0;
                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
                 I915_WRITE(0x02080, 0x1ffff000);
         }
 
-
        return 0;
 }
 
-#if defined(I915_HAVE_BUFFER)
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
 #define DRI2_SAREA_BLOCK_NEXT(p)                               \
@@ -195,27 +226,22 @@ static int i915_initialize(struct drm_device * dev,
                }
        }
 
-
 #ifdef I915_HAVE_BUFFER
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
                dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
        }
 #endif
 
-       if (!dev_priv->ring.Size) {
-               dev_priv->ring.Start = init->ring_start;
-               dev_priv->ring.End = init->ring_end;
+       if (init->ring_size != 0) {
                dev_priv->ring.Size = init->ring_size;
                dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-               
                dev_priv->ring.map.offset = init->ring_start;
                dev_priv->ring.map.size = init->ring_size;
                dev_priv->ring.map.type = 0;
                dev_priv->ring.map.flags = 0;
                dev_priv->ring.map.mtrr = 0;
-               
                drm_core_ioremap(&dev_priv->ring.map, dev);
-               
+
                if (dev_priv->ring.map.handle == NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("can not ioremap virtual address for"
@@ -225,7 +251,6 @@ static int i915_initialize(struct drm_device * dev,
                dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
        }
 
-
        dev_priv->cpp = init->cpp;
        master_priv->sarea_priv->pf_current_page = 0;
 
@@ -251,10 +276,10 @@ static int i915_initialize(struct drm_device * dev,
                        DRM_ERROR("Can not allocate hardware status page\n");
                        return -ENOMEM;
                }
-               dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+               dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
                dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-               memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+               memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
 
                I915_WRITE(0x02080, dev_priv->dma_status_page);
        }
@@ -264,8 +289,7 @@ static int i915_initialize(struct drm_device * dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
                mutex_init(&dev_priv->cmdbuf_mutex);
        }
-#endif
-#if defined(I915_HAVE_BUFFER)
+#ifdef DRI2
        if (init->func == I915_INIT_DMA2) {
                int ret = setup_dri2_sarea(dev, file_priv, init);
                if (ret) {
@@ -274,7 +298,8 @@ static int i915_initialize(struct drm_device * dev,
                        return ret;
                }
        }
-#endif
+#endif /* DRI2 */
+#endif /* I915_HAVE_BUFFER */
 
        return 0;
 }
@@ -288,11 +313,6 @@ static int i915_dma_resume(struct drm_device * dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (!dev_priv->mmio_map) {
-               DRM_ERROR("can not find mmio map!\n");
-               return -EINVAL;
-       }
-
        if (dev_priv->ring.map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
@@ -300,16 +320,16 @@ static int i915_dma_resume(struct drm_device * dev)
        }
 
        /* Program Hardware Status Page */
-       if (!dev_priv->hw_status_page) {
+       if (!dev_priv->hws_vaddr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
-       DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+       DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr);
 
-       if (dev_priv->status_gfx_addr != 0)
-               I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+       if (dev_priv->hws_agpoffset != 0)
+               I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
        else
-               I915_WRITE(0x02080, dev_priv->dma_status_page);
+               I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        DRM_DEBUG("Enabled hardware status page\n");
 
        return 0;
@@ -456,9 +476,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
        return 0;
 }
 
-static int i915_emit_box(struct drm_device * dev,
-                        struct drm_clip_rect __user * boxes,
-                        int i, int DR1, int DR4)
+int i915_emit_box(struct drm_device * dev,
+                 struct drm_clip_rect __user * boxes,
+                 int i, int DR1, int DR4)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_clip_rect box;
@@ -514,7 +534,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(20);
+       OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
        OUT_RING(dev_priv->counter);
        OUT_RING(0);
        ADVANCE_LP_RING();
@@ -713,9 +733,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
 int i915_quiescent(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
        i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+       ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+       if (ret)
+       {
+               i915_kernel_lost_context (dev);
+               DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+                          dev_priv->ring.head,
+                          dev_priv->ring.tail,
+                          dev_priv->ring.space);
+       }
+       return ret;
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -855,7 +885,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 
        switch (param->param) {
        case I915_PARAM_IRQ_ACTIVE:
-               value = dev->irq ? 1 : 0;
+               value = dev->irq_enabled ? 1 : 0;
                break;
        case I915_PARAM_ALLOW_BATCHBUFFER:
                value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -1003,7 +1033,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 
        DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
 
-       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+       dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12);
 
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
@@ -1014,18 +1044,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        drm_core_ioremap(&dev_priv->hws_map, dev);
        if (dev_priv->hws_map.handle == NULL) {
                i915_dma_cleanup(dev);
-               dev_priv->status_gfx_addr = 0;
+               dev_priv->hws_agpoffset = 0;
                DRM_ERROR("can not ioremap virtual address for"
                                " G33 hw status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->hws_map.handle;
+       dev_priv->hws_vaddr = dev_priv->hws_map.handle;
+
+       memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+       I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
+       DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
 
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-       DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
-                       dev_priv->status_gfx_addr);
-       DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
        return 0;
 }
 
@@ -1051,6 +1080,20 @@ struct drm_ioctl_desc i915_ioctls[] = {
 #ifdef I915_HAVE_BUFFER
        DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
 #endif
+       DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index bdcac9a..8ba7168 100644 (file)
@@ -176,6 +176,20 @@ typedef struct drm_i915_sarea {
 #define DRM_I915_MMIO          0x10
 #define DRM_I915_HWS_ADDR      0x11
 #define DRM_I915_EXECBUFFER    0x12
+#define DRM_I915_GEM_INIT      0x13
+#define DRM_I915_GEM_EXECBUFFER        0x14
+#define DRM_I915_GEM_PIN       0x15
+#define DRM_I915_GEM_UNPIN     0x16
+#define DRM_I915_GEM_BUSY      0x17
+#define DRM_I915_GEM_THROTTLE  0x18
+#define DRM_I915_GEM_ENTERVT   0x19
+#define DRM_I915_GEM_LEAVEVT   0x1a
+#define DRM_I915_GEM_CREATE    0x1b
+#define DRM_I915_GEM_PREAD     0x1c
+#define DRM_I915_GEM_PWRITE    0x1d
+#define DRM_I915_GEM_MMAP      0x1e
+#define DRM_I915_GEM_SET_DOMAIN        0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -195,6 +209,20 @@ typedef struct drm_i915_sarea {
 #define DRM_IOCTL_I915_VBLANK_SWAP     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 #define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
 #define DRM_IOCTL_I915_EXECBUFFER      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE    DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT     DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT     DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 
 /* Asynchronous page flipping:
  */
@@ -399,4 +427,232 @@ struct drm_i915_execbuffer {
        struct drm_fence_arg fence_arg;
 };
 
+struct drm_i915_gem_init {
+       /**
+        * Beginning offset in the GTT to be managed by the DRM memory
+        * manager.
+        */
+       uint64_t gtt_start;
+       /**
+        * Ending offset in the GTT to be managed by the DRM memory
+        * manager.
+        */
+       uint64_t gtt_end;
+};
+
+struct drm_i915_gem_create {
+       /**
+        * Requested size for the object.
+        *
+        * The (page-aligned) allocated size for the object will be returned.
+        */
+       uint64_t size;
+       /**
+        * Returned handle for the object.
+        *
+        * Object handles are nonzero.
+        */
+       uint32_t handle;
+       uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+       /** Handle for the object being read. */
+       uint32_t handle;
+       uint32_t pad;
+       /** Offset into the object to read from */
+       uint64_t offset;
+       /** Length of data to read */
+       uint64_t size;
+       /** Pointer to write the data into. */
+       uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+       /** Handle for the object being written to. */
+       uint32_t handle;
+       uint32_t pad;
+       /** Offset into the object to write to */
+       uint64_t offset;
+       /** Length of data to write */
+       uint64_t size;
+       /** Pointer to read the data from. */
+       uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+       /** Handle for the object being mapped. */
+       uint32_t handle;
+       uint32_t pad;
+       /** Offset in the object to map. */
+       uint64_t offset;
+       /**
+        * Length of data to map.
+        *
+        * The value will be page-aligned.
+        */
+       uint64_t size;
+       /** Returned pointer the data was mapped at */
+       uint64_t addr_ptr;      /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_set_domain {
+       /** Handle for the object */
+       uint32_t handle;
+
+       /** New read domains */
+       uint32_t read_domains;
+
+       /** New write domain */
+       uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+       /** Handle for the object */
+       uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+       /**
+        * Handle of the buffer being pointed to by this relocation entry.
+        *
+        * It's appealing to make this be an index into the mm_validate_entry
+        * list to refer to the buffer, but this allows the driver to create
+        * a relocation list for state buffers and not re-write it per
+        * exec using the buffer.
+        */
+       uint32_t target_handle;
+
+       /**
+        * Value to be added to the offset of the target buffer to make up
+        * the relocation entry.
+        */
+       uint32_t delta;
+
+       /** Offset in the buffer the relocation entry will be written into */
+       uint64_t offset;
+
+       /**
+        * Offset value of the target buffer that the relocation entry was last
+        * written as.
+        *
+        * If the buffer has the same offset as last time, we can skip syncing
+        * and writing the relocation.  This value is written back out by
+        * the execbuffer ioctl when the relocation is written.
+        */
+       uint64_t presumed_offset;
+
+       /**
+        * Target memory domains read by this operation.
+        */
+       uint32_t read_domains;
+
+       /**
+        * Target memory domains written by this operation.
+        *
+        * Note that only one domain may be written by the whole
+        * execbuffer operation, so that where there are conflicts,
+        * the application will get -EINVAL back.
+        */
+       uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU            0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER         0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER                0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND                0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION    0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX         0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT            0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+       /**
+        * User's handle for a buffer to be bound into the GTT for this
+        * operation.
+        */
+       uint32_t handle;
+
+       /** Number of relocations to be performed on this buffer */
+       uint32_t relocation_count;
+       /**
+        * Pointer to array of struct drm_i915_gem_relocation_entry containing
+        * the relocations to be performed in this buffer.
+        */
+       uint64_t relocs_ptr;
+
+       /** Required alignment in graphics aperture */
+       uint64_t alignment;
+
+       /**
+        * Returned value of the updated offset of the object, for future
+        * presumed_offset writes.
+        */
+       uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+       /**
+        * List of buffers to be validated with their relocations to be
+        * performend on them.
+        *
+        * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+        *
+        * These buffers must be listed in an order such that all relocations
+        * a buffer is performing refer to buffers that have already appeared
+        * in the validate list.
+        */
+       uint64_t buffers_ptr;
+       uint32_t buffer_count;
+
+       /** Offset in the batchbuffer to start execution from. */
+       uint32_t batch_start_offset;
+       /** Bytes used in batchbuffer from batch_start_offset */
+       uint32_t batch_len;
+       uint32_t DR1;
+       uint32_t DR4;
+       uint32_t num_cliprects;
+       uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_pin {
+       /** Handle of the buffer to be pinned. */
+       uint32_t handle;
+       uint32_t pad;
+
+       /** alignment required within the aperture */
+       uint64_t alignment;
+
+       /** Returned GTT offset of the buffer. */
+       uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+       /** Handle of the buffer to be unpinned. */
+       uint32_t handle;
+       uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+       /** Handle of the buffer to check for busy */
+       uint32_t handle;
+
+       /** Return busy status (1 if busy, 0 if idle) */
+       uint32_t busy;
+};
+
 #endif                         /* _I915_DRM_H_ */
index 6d72c05..17829a5 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20080312"
+#define DRIVER_DATE            "20080611"
 
 #if defined(__linux__)
 #define I915_HAVE_FENCE
@@ -63,7 +63,7 @@
  */
 #define DRIVER_MAJOR           1
 #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR           13
+#define DRIVER_MINOR           14
 #else
 #define DRIVER_MINOR           6
 #endif
@@ -81,14 +81,13 @@ struct drm_i915_validate_buffer;
 
 struct drm_i915_ring_buffer {
        int tail_mask;
-       unsigned long Start;
-       unsigned long End;
        unsigned long Size;
        u8 *virtual_start;
        int head;
        int tail;
        int space;
        drm_local_map_t map;
+       struct drm_gem_object *ring_obj;
 };
 
 struct mem_block {
@@ -114,7 +113,7 @@ struct drm_i915_master_private {
 };
        
 struct drm_i915_private {
-       struct drm_buffer_object *ring_buffer;
+        struct drm_device *dev;
 
        drm_local_map_t *mmio_map;
 
@@ -124,18 +123,17 @@ struct drm_i915_private {
        struct drm_i915_ring_buffer ring;
 
        struct drm_dma_handle *status_page_dmah;
-       void *hw_status_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
-       unsigned int status_gfx_addr;
+       uint32_t hws_agpoffset;
        drm_local_map_t hws_map;
-       struct drm_buffer_object *hws_bo;
+       void *hws_vaddr;
+       struct drm_memrange_node *hws;
 
        unsigned int cpp;
 
        wait_queue_head_t irq_queue;
        atomic_t irq_received;
-       atomic_t irq_emitted;
 
        int tex_lru_log_granularity;
        int allow_batchbuffer;
@@ -145,12 +143,14 @@ struct drm_i915_private {
        DRM_SPINTYPE user_irq_lock;
        int user_irq_refcount;
        int fence_irq_on;
-       uint32_t irq_enable_reg;
+       uint32_t irq_mask_reg;
        int irq_enabled;
        struct workqueue_struct *wq;
 
        bool cursor_needs_physical;
 
+       struct drm_memrange vram;
+
 #ifdef I915_HAVE_FENCE
        uint32_t flush_sequence;
        uint32_t flush_flags;
@@ -161,7 +161,7 @@ struct drm_i915_private {
        void *agp_iomap;
        unsigned int max_validate_buffers;
        struct mutex cmdbuf_mutex;
-       size_t stolen_base;
+       u32 stolen_base;
        struct drm_i915_validate_buffer *val_bufs;
 #endif
 
@@ -175,17 +175,96 @@ struct drm_i915_private {
        struct drm_display_mode *panel_fixed_mode;
        struct drm_display_mode *vbt_mode; /* if any */
 
-#if defined(I915_HAVE_BUFFER)
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
        /* DRI2 sarea */
-       struct drm_buffer_object *sarea_bo;
-       struct drm_bo_kmap_obj sarea_kmap;
+       struct drm_gem_object *sarea_object;
+        struct drm_bo_kmap_obj sarea_kmap;
+#endif
 
        /* Feature bits from the VBIOS */
        int int_tv_support:1;
        int lvds_dither:1;
        int lvds_vbt:1;
        int int_crt_support:1;
-#endif
+
+       struct {
+               struct drm_memrange gtt_space;
+
+               /**
+                * List of objects currently involved in rendering from the
+                * ringbuffer.
+                *
+                * A reference is held on the buffer while on this list.
+                */
+               struct list_head active_list;
+
+               /**
+                * List of objects which are not in the ringbuffer but which
+                * still have a write_domain which needs to be flushed before
+                * unbinding.
+                *
+                * A reference is held on the buffer while on this list.
+                */
+               struct list_head flushing_list;
+
+               /**
+                * LRU list of objects which are not in the ringbuffer and
+                * are ready to unbind, but are still in the GTT.
+                *
+                * A reference is not held on the buffer while on this list,
+                * as merely being GTT-bound shouldn't prevent its being
+                * freed, and we'll pull it off the list in the free path.
+                */
+               struct list_head inactive_list;
+
+               /**
+                * List of breadcrumbs associated with GPU requests currently
+                * outstanding.
+                */
+               struct list_head request_list;
+
+               /**
+                * We leave the user IRQ off as much as possible,
+                * but this means that requests will finish and never
+                * be retired once the system goes idle. Set a timer to
+                * fire periodically while the ring is running. When it
+                * fires, go retire requests.
+                */
+               struct delayed_work retire_work;
+               
+               uint32_t next_gem_seqno;
+
+               /**
+                * Waiting sequence number, if any
+                */
+               uint32_t waiting_gem_seqno;
+       
+               /**
+                * Last seq seen at irq time
+                */
+               uint32_t irq_gem_seqno;
+
+               /**
+                * Flag if the X Server, and thus DRM, is not currently in
+                * control of the device.
+                *
+                * This is set between LeaveVT and EnterVT.  It needs to be
+                * replaced with a semaphore.  It also needs to be
+                * transitioned away from for kernel modesetting.
+                */
+               int suspended;
+
+               /**
+                * Flag if the hardware appears to be wedged.
+                *
+                * This is set when attempts to idle the device timeout.
+                * It prevents command submission from occuring and makes
+                * every pending request fail
+                */
+               int wedged;
+       } mm;
+
+       struct work_struct user_interrupt_task;
 
        /* Register state */
        u8 saveLBB;
@@ -277,6 +356,13 @@ struct drm_i915_private {
        u8 saveCR[37];
 };
 
+struct drm_i915_file_private {
+       struct {
+               uint32_t last_gem_seqno;
+               uint32_t last_gem_throttle_seqno;
+       } mm;
+};
+
 enum intel_chip_family {
        CHIP_I8XX = 0x01,
        CHIP_I9XX = 0x02,
@@ -284,6 +370,74 @@ enum intel_chip_family {
        CHIP_I965 = 0x08,
 };
 
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+       struct drm_gem_object *obj;
+
+       /** Current space allocated to this object in the GTT, if any. */
+       struct drm_memrange_node *gtt_space;
+
+       /** This object's place on the active/flushing/inactive lists */
+       struct list_head list;
+
+       /**
+        * This is set if the object is on the active or flushing lists
+        * (has pending rendering), and is not set if it's on inactive (ready
+        * to be unbound).
+        */
+       int active;
+
+       /**
+        * This is set if the object has been written to since last bound
+        * to the GTT
+        */
+       int dirty;
+
+       /** AGP memory structure for our GTT binding. */
+       DRM_AGP_MEM *agp_mem;
+
+       struct page **page_list;
+
+       /**
+        * Current offset of the object in GTT space.
+        *
+        * This is the same as gtt_space->start
+        */
+       uint32_t gtt_offset;
+
+       /** Boolean whether this object has a valid gtt offset. */
+       int gtt_bound;
+
+       /** How many users have pinned this object in GTT space */
+       int pin_count;
+
+       /** Breadcrumb of last rendering to the buffer. */
+       uint32_t last_rendering_seqno;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+       /** GEM sequence number associated with this request. */
+       uint32_t seqno;
+
+       /** Time at which this request was emitted, in jiffies. */
+       unsigned long emitted_jiffies;
+
+       /** Cache domains that were flushed at the start of the request. */
+       uint32_t flush_domains;
+
+       struct list_head list;
+};
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 
@@ -294,8 +448,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *dev);
 extern void i915_driver_lastclose(struct drm_device * dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
 extern void i915_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+                                 struct drm_file *file_priv);
 extern int i915_driver_device_is_agp(struct drm_device * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
@@ -309,6 +466,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
                                     drm_i915_batchbuffer_t * batch);
 extern int i915_quiescent(struct drm_device *dev);
 
+int i915_emit_box(struct drm_device * dev,
+                 struct drm_clip_rect __user * boxes,
+                 int i, int DR1, int DR4);
+
 /* i915_irq.c */
 extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
@@ -325,6 +486,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern int i915_emit_irq(struct drm_device * dev);
 extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
 extern int i915_enable_vblank(struct drm_device *dev, int crtc);
 extern void i915_disable_vblank(struct drm_device *dev, int crtc);
 extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -353,7 +515,7 @@ extern void i915_invalidate_reported_sequence(struct drm_device *dev);
 
 #endif
 
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
 /* i915_buffer.c */
 extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
 extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
@@ -365,10 +527,54 @@ extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
 extern int i915_move(struct drm_buffer_object *bo, int evict,
                int no_wait, struct drm_bo_mem_reg *new_mem);
 void i915_flush_ttm(struct drm_ttm *ttm);
+#endif /* ttm */
+#ifdef I915_HAVE_BUFFER
 /* i915_execbuf.c */
 int i915_execbuffer(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv);
-
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+int i915_gem_proc_init(struct drm_minor *minor);
+void i915_gem_proc_cleanup(struct drm_minor *minor);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+                    unsigned long end);
+void i915_gem_retire_work_handler(struct work_struct *work);
 #endif
 
 extern unsigned int i915_fbpercrtc;
@@ -392,16 +598,25 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
 #define I915_WRITE16(reg,val)  DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
 
 #define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
 #define RING_LOCALS    unsigned int outring, ringmask, outcount; \
                        volatile char *virt;
 
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
 #define BEGIN_LP_RING(n) do {                          \
        if (I915_VERBOSE)                               \
                DRM_DEBUG("BEGIN_LP_RING(%d)\n",        \
                                 (n));                  \
+       I915_RING_DO_VALIDATE(dev);                     \
        if (dev_priv->ring.space < (n)*4)                      \
                i915_wait_ring(dev, (n)*4, __FUNCTION__);      \
        outcount = 0;                                   \
@@ -420,17 +635,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
 
 #define ADVANCE_LP_RING() do {                                         \
        if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
+       I915_RING_DO_VALIDATE(dev);                                     \
        dev_priv->ring.tail = outring;                                  \
        dev_priv->ring.space -= outcount * 4;                           \
        I915_WRITE(PRB0_TAIL, outring);                 \
 } while(0)
 
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
-
 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 /*
@@ -532,17 +742,40 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
 #define MI_STORE_DWORD_IMM     MI_INSTR(0x20, 1) /* used to have 1<<22? */
 #define MI_STORE_DWORD_INDEX   MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
 #define MI_LOAD_REGISTER_IMM   MI_INSTR(0x22, 1)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE  (1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
 
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hws_vaddr))[5])
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hws_vaddr))[reg])
+#define I915_GEM_HWS_INDEX             0x10
+
 /*
  * 3D instructions used by the kernel
  */
 #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
 
+#define GFX_OP_USER_INTERRUPT  ((0<<29)|(2<<23))
 #define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
 #define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
 #define   SC_UPDATE_SCISSOR       (0x1<<1)
@@ -603,6 +836,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define PRB1_HEAD      0x02044 /* 915+ only */
 #define PRB1_START     0x02048 /* 915+ only */
 #define PRB1_CTL       0x0204c /* 915+ only */
+#define I965REG_ACTHD  0x02074
 #define HWS_PGA                0x02080
 #define IPEIR          0x02088
 #define NOPID          0x02094
@@ -632,6 +866,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define EMR            0x020b4
 #define ESR            0x020b8
 #define INSTPM         0x020c0
+#define I915REG_ACTHD  0x020C8
 #define FW_BLC         0x020d8
 #define FW_BLC_SELF    0x020e0 /* 915+ only */
 #define MI_ARB_STATE   0x020e4 /* 915+ only */
@@ -790,12 +1025,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define ADPA_DPMS_STANDBY      (2<<10)
 #define ADPA_DPMS_OFF          (3<<10)
 
-#define LP_RING                        0x2030
-#define HP_RING                        0x2040
-/* The binner has its own ring buffer:
- */
-#define HWB_RING               0x2400
-
 #define RING_TAIL              0x00
 #define TAIL_ADDR              0x001FFFF8
 #define RING_HEAD              0x04
index f2c07fc..3a652e8 100644 (file)
@@ -100,62 +100,11 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
        return 0;
 }
 
-int i915_load_modeset_init(struct drm_device *dev)
+static int i915_init_hwstatus(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long agp_size, prealloc_size;
-       int size, ret = 0;
-
-       i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
-       printk("setting up %ld bytes of VRAM space\n", prealloc_size);
-       printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
-
-       drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1);
-       drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT,
-                      (agp_size - prealloc_size) >> PAGE_SHIFT, 1);
-       I915_WRITE(PRB0_CTL, 0);
-       I915_WRITE(PRB0_HEAD, 0);
-       I915_WRITE(PRB0_TAIL, 0);
-
-       size = PRIMARY_RINGBUFFER_SIZE;
-       ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
-                       DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
-                       DRM_BO_FLAG_MEM_VRAM |
-                       DRM_BO_FLAG_NO_EVICT,
-                       DRM_BO_HINT_DONT_FENCE, 0x1, 0,
-                       &dev_priv->ring_buffer);
-       if (ret < 0) {
-               DRM_ERROR("Unable to allocate or pin ring buffer\n");
-               goto clean_mm;
-       }
-
-       /* remap the buffer object properly */
-       dev_priv->ring.Start = dev_priv->ring_buffer->offset;
-       dev_priv->ring.End = dev_priv->ring.Start + size;
-       dev_priv->ring.Size = size;
-       dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
-       /* FIXME: need wrapper with PCI mem checks */
-       ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
-                                 (void **) &dev_priv->ring.virtual_start);
-       if (ret) {
-               DRM_ERROR("error mapping ring buffer: %d\n", ret);
-               goto destroy_ringbuffer;
-       }
-
-       DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
-                       dev_priv->ring.virtual_start, dev_priv->ring.Size);
-
-       memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
-       I915_WRITE(PRB0_START, dev_priv->ring.Start);
-       I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
-                  (RING_NO_REPORT | RING_VALID));
-
-       /* Allow hardware batchbuffers unless told otherwise.
-        */
-       dev_priv->allow_batchbuffer = 1;
-       dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
-       mutex_init(&dev_priv->cmdbuf_mutex);
+       struct drm_memrange_node *free_space;
+       int ret = 0;
 
        /* Program Hardware Status Page */
        if (!IS_G33(dev)) {
@@ -165,52 +114,105 @@ int i915_load_modeset_init(struct drm_device *dev)
                if (!dev_priv->status_page_dmah) {
                        DRM_ERROR("Can not allocate hardware status page\n");
                        ret = -ENOMEM;
-                       goto destroy_ringbuffer;
+                       goto out;
                }
-               dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+               dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
                dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-               memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        } else {
-               size = 4 * 1024;
-               ret = drm_buffer_object_create(dev, size,
-                               drm_bo_type_kernel,
-                               DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
-                               DRM_BO_FLAG_MEM_VRAM |
-                               DRM_BO_FLAG_NO_EVICT,
-                               DRM_BO_HINT_DONT_FENCE, 0x1, 0,
-                               &dev_priv->hws_bo);
-               if (ret < 0) {
+               free_space = drm_memrange_search_free(&dev_priv->vram,
+                                                     PAGE_SIZE,
+                                                     PAGE_SIZE, 0);
+               if (!free_space) {
+                       DRM_ERROR("No free vram available, aborting\n");
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
+                                                      PAGE_SIZE);
+               if (!dev_priv->hws) {
                        DRM_ERROR("Unable to allocate or pin hw status page\n");
                        ret = -EINVAL;
-                       goto destroy_ringbuffer;
+                       goto out;
                }
 
-               dev_priv->status_gfx_addr =
-                       dev_priv->hws_bo->offset & (0x1ffff << 12);
+               dev_priv->hws_agpoffset = dev_priv->hws->start;
                dev_priv->hws_map.offset = dev->agp->base +
-                       dev_priv->hws_bo->offset;
-               dev_priv->hws_map.size = size;
+                       dev_priv->hws->start;
+               dev_priv->hws_map.size = PAGE_SIZE;
                dev_priv->hws_map.type= 0;
                dev_priv->hws_map.flags= 0;
                dev_priv->hws_map.mtrr = 0;
 
                drm_core_ioremap(&dev_priv->hws_map, dev);
                if (dev_priv->hws_map.handle == NULL) {
-                       dev_priv->status_gfx_addr = 0;
+                       dev_priv->hws_agpoffset = 0;
                        DRM_ERROR("can not ioremap virtual addr for"
                                        "G33 hw status page\n");
                        ret = -ENOMEM;
-                       goto destroy_hws;
+                       goto out_free;
                }
-               dev_priv->hw_status_page = dev_priv->hws_map.handle;
-               memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+               dev_priv->hws_vaddr = dev_priv->hws_map.handle;
+               I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
        }
+
+       memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+
        DRM_DEBUG("Enabled hardware status page\n");
 
+       return 0;
+
+out_free:
+       /* free hws */
+out:
+       return ret;
+}
+
+static void i915_cleanup_hwstatus(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_G33(dev)) {
+               if (dev_priv->status_page_dmah)
+                       drm_pci_free(dev, dev_priv->status_page_dmah);
+       } else {
+               if (dev_priv->hws_map.handle)
+                       drm_core_ioremapfree(&dev_priv->hws_map, dev);
+               if (dev_priv->hws)
+                       drm_memrange_put_block(dev_priv->hws);
+       }
+       I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long agp_size, prealloc_size;
+       int ret = 0;
+
+       i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
+
+       /* Basic memrange allocator for stolen space (aka vram) */
+       drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
+       /* Let GEM Manage from end of prealloc space to end of aperture */
+       i915_gem_do_init(dev, prealloc_size, agp_size);
+
+       ret = i915_gem_init_ringbuffer(dev);
+       if (ret)
+               goto out;
+
+       ret = i915_init_hwstatus(dev);
+       if (ret)
+               goto destroy_ringbuffer;
+
+       /* Allow hardware batchbuffers unless told otherwise.
+        */
+       dev_priv->allow_batchbuffer = 1;
+       dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
+       mutex_init(&dev_priv->cmdbuf_mutex);
+
        dev_priv->wq = create_singlethread_workqueue("i915");
        if (dev_priv->wq == 0) {
                DRM_DEBUG("Error\n");
@@ -228,9 +230,6 @@ int i915_load_modeset_init(struct drm_device *dev)
        intel_modeset_init(dev);
        drm_helper_initial_config(dev, false);
 
-       drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM");
-       drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT");
-
        dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
        if (!dev->devname) {
                ret = -ENOMEM;
@@ -249,25 +248,10 @@ modeset_cleanup:
 destroy_wq:
        destroy_workqueue(dev_priv->wq);
 destroy_hws:
-       if (!IS_G33(dev)) {
-               if (dev_priv->status_page_dmah)
-                       drm_pci_free(dev, dev_priv->status_page_dmah);
-       } else {
-               if (dev_priv->hws_map.handle)
-                       drm_core_ioremapfree(&dev_priv->hws_map, dev);
-               if (dev_priv->hws_bo)
-                       drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
-       }
-       I915_WRITE(HWS_PGA, 0x1ffff000);
+       i915_cleanup_hwstatus(dev);
 destroy_ringbuffer:
-       if (dev_priv->ring.virtual_start)
-               drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
-                                   dev_priv->ring.virtual_start);
-       if (dev_priv->ring_buffer)
-               drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer);
-clean_mm:
-       drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1);
-       drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1);
+       i915_gem_cleanup_ringbuffer(dev);
+out:
        return ret;
 }
 
@@ -293,7 +277,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        memset(dev_priv, 0, sizeof(struct drm_i915_private));
        dev->dev_private = (void *)dev_priv;
-//     dev_priv->flags = flags;
+       dev_priv->dev = dev;
 
        /* i915 has 4 more counters */
        dev->counters += 4;
@@ -310,10 +294,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_I965G(dev) || IS_G33(dev))
                dev_priv->cursor_needs_physical = false;
 
-       if (IS_I9XX(dev)) {
+       if (IS_I9XX(dev))
                pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
-               DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
-       }
 
        if (IS_I9XX(dev)) {
                dev_priv->mmiobase = drm_get_resource_start(dev, 0);
@@ -341,6 +323,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       INIT_LIST_HEAD(&dev_priv->mm.active_list);
+       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+                         i915_gem_retire_work_handler);
+       dev_priv->mm.next_gem_seqno = 1;
+
 #ifdef __linux__
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
         intel_init_chipset_flush_compat(dev);
@@ -348,26 +338,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 #endif
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               /*
-                * Initialize the memory manager for local and AGP space
-                */
-               ret = drm_bo_driver_init(dev);
-               if (ret) {
-                       DRM_ERROR("fail to init memory manager for "
-                                 "local & AGP space\n");
-                       goto out_rmmap;
-               }
-
                ret = i915_load_modeset_init(dev);
                if (ret < 0) {
                        DRM_ERROR("failed to init modeset\n");
-                       goto driver_fini;
+                       goto out_rmmap;
                }
        }
        return 0;
 
-driver_fini:
-       drm_bo_driver_finish(dev);
 out_rmmap:
        drm_rmmap(dev, dev_priv->mmio_map);
 free_priv:
@@ -392,6 +370,8 @@ int i915_driver_unload(struct drm_device *dev)
                drm_core_ioremapfree(&dev_priv->ring.map, dev);
        }
 #endif
+
+#ifdef DRI2
        if (dev_priv->sarea_kmap.virtual) {
                drm_bo_kunmap(&dev_priv->sarea_kmap);
                dev_priv->sarea_kmap.virtual = NULL;
@@ -404,44 +384,17 @@ int i915_driver_unload(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
                dev_priv->sarea_bo = NULL;
        }
-
-       if (dev_priv->status_page_dmah) {
-               drm_pci_free(dev, dev_priv->status_page_dmah);
-               dev_priv->status_page_dmah = NULL;
-               dev_priv->hw_status_page = NULL;
-               dev_priv->dma_status_page = 0;
-               /* Need to rewrite hardware status page */
-               I915_WRITE(HWS_PGA, 0x1ffff000);
-       }
-
-       if (dev_priv->status_gfx_addr) {
-               dev_priv->status_gfx_addr = 0;
-               drm_core_ioremapfree(&dev_priv->hws_map, dev);
-               drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
-               I915_WRITE(HWS_PGA, 0x1ffff000);
-       }
+#endif
+       i915_cleanup_hwstatus(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
-                                   dev_priv->ring.virtual_start);
-
-               DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage));
                mutex_lock(&dev->struct_mutex);
-               drm_bo_usage_deref_locked(&dev_priv->ring_buffer);
-
-               if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
-                       DRM_ERROR("Memory manager type 3 not clean. "
-                                 "Delaying takedown\n");
-               }
-               if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
-                       DRM_ERROR("Memory manager type 3 not clean. "
-                                 "Delaying takedown\n");
-               }
+               i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
+               drm_memrange_takedown(&dev_priv->vram);
+               i915_gem_lastclose(dev);
        }
 
-       drm_bo_driver_finish(dev);
-
 #ifdef __linux__
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
         intel_init_chipset_flush_compat(dev);
@@ -497,10 +450,36 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
        master->driver_priv = NULL;
 }
 
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv;
+
+       DRM_DEBUG("\n");
+       i915_file_priv = (struct drm_i915_file_private *)
+           drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+       if (!i915_file_priv)
+               return -ENOMEM;
+
+       file_priv->driver_priv = i915_file_priv;
+
+       i915_file_priv->mm.last_gem_seqno = 0;
+       i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+       return 0;
+}
+
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+       drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
         struct drm_i915_private *dev_priv = dev->dev_private;
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
@@ -511,8 +490,33 @@ void i915_driver_lastclose(struct drm_device * dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
+#ifdef I915_HAVE_BUFFER
+       if (dev_priv->val_bufs) {
+               vfree(dev_priv->val_bufs);
+               dev_priv->val_bufs = NULL;
+       }
+#endif
+
+       i915_gem_lastclose(dev);
+
        if (dev_priv->agp_heap)
                i915_mem_takedown(&(dev_priv->agp_heap));
+
+#if defined(DRI2)
+       if (dev_priv->sarea_kmap.virtual) {
+               drm_bo_kunmap(&dev_priv->sarea_kmap);
+               dev_priv->sarea_kmap.virtual = NULL;
+               dev->control->master->lock.hw_lock = NULL;
+               dev->sigdata.lock = NULL;
+       }
+
+       if (dev_priv->sarea_bo) {
+               mutex_lock(&dev->struct_mutex);
+               drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+               mutex_unlock(&dev->struct_mutex);
+               dev_priv->sarea_bo = NULL;
+       }
+#endif
        
        i915_dma_cleanup(dev);
 }
@@ -521,7 +525,8 @@ int i915_driver_firstopen(struct drm_device *dev)
 {
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
-
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
        drm_bo_driver_init(dev);
+#endif
        return 0;
 }
index 2d35568..bad5ae2 100644 (file)
 
 #define MAX_NOPID ((u32)~0)
 
+/*
+ * These are the interrupts used by the driver
+ */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+                                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+                                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+static inline void
+i915_enable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       if ((dev_priv->irq_mask_reg & mask) != 0) {
+               dev_priv->irq_mask_reg &= ~mask;
+               I915_WRITE(IMR, dev_priv->irq_mask_reg);
+               (void) I915_READ(IMR);
+       }
+}
+
+static inline void
+i915_disable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       if ((dev_priv->irq_mask_reg & mask) != mask) {
+               dev_priv->irq_mask_reg |= mask;
+               I915_WRITE(IMR, dev_priv->irq_mask_reg);
+               (void) I915_READ(IMR);
+       }
+}
+
 /**
  * i915_get_pipe - return the the pipe associated with a given plane
  * @dev: DRM device
@@ -443,9 +470,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
 
 static struct drm_device *hotplug_dev;
 
-/*
- * This code is called in a more safe envirmoent to handle the hotplugs.
- * Add code here for hotplug love to userspace.
+/**
+ * Handler for user interrupts in process context (able to sleep, do VFS
+ * operations, etc.
+ *
+ * If another IRQ comes in while we're in this handler, it will still get put
+ * on the queue again to be rerun when we finish.
  */
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
 static void i915_hotplug_work_func(void *work)
@@ -485,7 +515,6 @@ static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
        if (stat & SDVOC_HOTPLUG_INT_STATUS) {
                DRM_DEBUG("sDVOC event\n");
        }
-
        queue_work(dev_priv->wq, &hotplug);
 
        return 0;
@@ -497,30 +526,29 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        struct drm_i915_master_private *master_priv;
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        u32 iir;
-       u32 pipea_stats = 0, pipeb_stats, tvdac;
+       u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
        int hotplug = 0;
        int vblank = 0;
 
-       /* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               iir = I915_READ(IIR);
-       else
-               iir = I915_READ16(IIR);
-
-       iir &= (dev_priv->irq_enable_reg | I915_USER_INTERRUPT);
-
+       if (dev->pdev->msi_enabled)
+               I915_WRITE(IMR, ~0);
+       iir = I915_READ(IIR);
 #if 0
        DRM_DEBUG("flag=%08x\n", iir);
 #endif
+       atomic_inc(&dev_priv->irq_received);
        if (iir == 0) {
-#if 0
                DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
                           iir,
                           I915_READ(IMR),
                           I915_READ(IER),
                           I915_READ(PIPEASTAT),
                           I915_READ(PIPEBSTAT));
-#endif
+               if (dev->pdev->msi_enabled) {
+                       I915_WRITE(IMR,
+                                  dev_priv->irq_mask_reg);
+                       (void) I915_READ(IMR);
+               }
                return IRQ_NONE;
        }
 
@@ -530,46 +558,29 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
         */
        if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
                pipea_stats = I915_READ(PIPEASTAT);
-               if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
-                                  PIPE_VBLANK_INTERRUPT_STATUS))
-               {
-                       vblank++;
-                       drm_handle_vblank(dev, i915_get_plane(dev, 0));
-               }
-
-               /* This is a global event, and not a pipe A event */
-               if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
-                       hotplug = 1;
-
-               if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS) {
-                       hotplug = 1;
-                       /* Toggle hotplug detection to clear hotplug status */
-                       tvdac = I915_READ(TV_DAC);
-                       I915_WRITE(TV_DAC, tvdac & ~TVDAC_STATE_CHG_EN);
-                       I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
-               }
-
                I915_WRITE(PIPEASTAT, pipea_stats);
        }
 
        if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
                pipeb_stats = I915_READ(PIPEBSTAT);
-               if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
-                                  PIPE_VBLANK_INTERRUPT_STATUS))
-               {
-                       vblank++;
-                       drm_handle_vblank(dev, i915_get_plane(dev, 1));
-               }
                I915_WRITE(PIPEBSTAT, pipeb_stats);
        }
 
-       /* Clear the generated interrupt */
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
-               I915_WRITE(IIR, iir);
-               (void) I915_READ(IIR);
-       } else {
-               I915_WRITE16(IIR, iir);
-               (void) I915_READ16(IIR);
+       I915_WRITE(IIR, iir);
+       if (dev->pdev->msi_enabled)
+               I915_WRITE(IMR, dev_priv->irq_mask_reg);
+       (void) I915_READ(IIR); /* Flush posted writes */
+
+       /* This is a global event, and not a pipe A event */
+       if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
+               hotplug = 1;
+
+       if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS) {
+               hotplug = 1;
+               /* Toggle hotplug detection to clear hotplug status */
+               tvdac = I915_READ(TV_DAC);
+               I915_WRITE(TV_DAC, tvdac & ~TVDAC_STATE_CHG_EN);
+               I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
        }
 
        if (dev->primary->master) {
@@ -578,12 +589,25 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        }
 
        if (iir & I915_USER_INTERRUPT) {
+               dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
                DRM_WAKEUP(&dev_priv->irq_queue);
 #ifdef I915_HAVE_FENCE
                i915_fence_handler(dev);
 #endif
        }
 
+       if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+                          PIPE_VBLANK_INTERRUPT_STATUS)) {
+               vblank++;
+               drm_handle_vblank(dev, i915_get_plane(dev, 0));
+       }
+
+       if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+                          PIPE_VBLANK_INTERRUPT_STATUS)) {
+               vblank++;
+               drm_handle_vblank(dev, i915_get_plane(dev, 1));
+       }
+
        if (vblank) {
                if (dev_priv->swaps_pending > 0)
                        drm_locked_tasklet(dev, i915_vblank_tasklet);
@@ -635,15 +659,9 @@ void i915_user_irq_on(struct drm_device *dev)
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 
        DRM_SPINLOCK(&dev_priv->user_irq_lock);
-       if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
-               dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
-               if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-                       I915_WRITE(IER, dev_priv->irq_enable_reg);
-               else
-                       I915_WRITE16(IER, dev_priv->irq_enable_reg);
-       }
+       if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+               i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
        DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
-
 }
                
 void i915_user_irq_off(struct drm_device *dev)
@@ -651,28 +669,38 @@ void i915_user_irq_off(struct drm_device *dev)
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 
        DRM_SPINLOCK(&dev_priv->user_irq_lock);
-       if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-               //              dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
-               //              if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               //                      I915_WRITE(IER, dev_priv->irq_enable_reg);
-               //              else
-               //                      I915_WRITE16(IER, dev_priv->irq_enable_reg);
-       }
+       BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
+       if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+               i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
        DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
 }
 
 
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        struct drm_i915_master_private *master_priv;
        int ret = 0;
 
+       if (!dev_priv) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
        DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
 
-       if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+       master_priv = dev->primary->master->driver_priv;
+
+       if (!master_priv) {
+               DRM_ERROR("no master priv?\n");
+               return -EINVAL;
+       }
+
+       if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+               master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
                return 0;
+       }
 
        i915_user_irq_on(dev);
        DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -684,10 +712,8 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
                          READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
        }
        
-       if (dev->primary->master) {
-               master_priv = dev->primary->master->driver_priv;
+       if (READ_BREADCRUMB(dev_priv) >= irq_nr)
                master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-       }
 
        return ret;
 }
@@ -739,16 +765,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        int pipe = i915_get_pipe(dev, plane);
        u32     pipestat_reg = 0;
+       u32     mask_reg = 0;
        u32     pipestat;
 
        switch (pipe) {
        case 0:
                pipestat_reg = PIPEASTAT;
-               dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+               mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
                break;
        case 1:
                pipestat_reg = PIPEBSTAT;
-               dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
                break;
        default:
                DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -775,11 +802,9 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
                I915_WRITE(pipestat_reg, pipestat);
        }
 
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               I915_WRITE(IER, dev_priv->irq_enable_reg);
-       else
-               I915_WRITE16(IER, dev_priv->irq_enable_reg);
-
+       DRM_SPINLOCK(&dev_priv->user_irq_lock);
+       i915_enable_irq(dev_priv, mask_reg);
+       DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
 
        return 0;
 }
@@ -789,16 +814,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        int pipe = i915_get_pipe(dev, plane);
        u32     pipestat_reg = 0;
+       u32     mask_reg = 0;
        u32     pipestat;
 
        switch (pipe) {
        case 0:
                pipestat_reg = PIPEASTAT;
-               dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+               mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
                break;
        case 1:
                pipestat_reg = PIPEBSTAT;
-               dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
                break;
        default:
                DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -806,13 +832,11 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
                break;
        }
 
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               I915_WRITE(IER, dev_priv->irq_enable_reg);
-       else
-               I915_WRITE16(IER, dev_priv->irq_enable_reg);
+       DRM_SPINLOCK(&dev_priv->user_irq_lock);
+       i915_disable_irq(dev_priv, mask_reg);
+       DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
 
-       if (pipestat_reg)
-       {
+       if (pipestat_reg) {
                pipestat = I915_READ (pipestat_reg);
                pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
                              PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -822,6 +846,7 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
                pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
                             PIPE_VBLANK_INTERRUPT_STATUS);
                I915_WRITE(pipestat_reg, pipestat);
+               (void) I915_READ(pipestat_reg);
        }
 }
 
@@ -829,15 +854,15 @@ void i915_enable_interrupt (struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        struct drm_connector *o;
-
-       dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+       
+       dev_priv->irq_mask_reg &= ~0;
 
        if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
                if (dev->mode_config.num_connector)
-                       dev_priv->irq_enable_reg |= I915_DISPLAY_PORT_INTERRUPT;
+                       dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
        } else {
                if (dev->mode_config.num_connector)
-                       dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+                       dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 
                /* Enable global interrupts for hotplug - not a pipeA event */
                I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
@@ -847,7 +872,8 @@ void i915_enable_interrupt (struct drm_device *dev)
                           PIPE_HOTPLUG_INTERRUPT_STATUS);
        }
 
-       if (dev_priv->irq_enable_reg & (I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
+       if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
+           !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
                u32 temp = 0;
 
                if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
@@ -891,11 +917,9 @@ void i915_enable_interrupt (struct drm_device *dev)
                }
        }
 
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               I915_WRITE(IER, dev_priv->irq_enable_reg);
-       else
-               I915_WRITE16(IER, dev_priv->irq_enable_reg);
-
+       I915_WRITE(IMR, dev_priv->irq_mask_reg);
+       I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+       (void) I915_READ (IER);
        dev_priv->irq_enabled = 1;
 }
 
@@ -927,17 +951,15 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_vblank_pipe *pipe = data;
-       u16 flag;
+       u32 flag = 0;
 
        if (!dev_priv) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
 
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
-               flag = I915_READ(IER);
-       else
-               flag = I915_READ16(IER);
+       if (dev_priv->irq_enabled)
+           flag = ~dev_priv->irq_mask_reg;
 
        pipe->pipe = 0;
        if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
@@ -1121,20 +1143,12 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
        tmp = I915_READ(PIPEBSTAT);
        I915_WRITE(PIPEBSTAT, tmp);
 
-
-       I915_WRITE16(HWSTAM, 0xeffe);
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
-               I915_WRITE(IMR, 0x0);
-               I915_WRITE(IER, 0x0);
-               tmp = I915_READ(IIR);
-               I915_WRITE(IIR, tmp);
-       } else {
-               I915_WRITE16(IMR, 0x0);
-               I915_WRITE16(IER, 0x0);
-               tmp = I915_READ16(IIR);
-               I915_WRITE16(IIR, tmp);
-       }
-
+       atomic_set(&dev_priv->irq_received, 0);
+       I915_WRITE(HWSTAM, 0xffff);
+       I915_WRITE(IER, 0x0);
+       I915_WRITE(IMR, 0xffffffff);
+       I915_WRITE(IIR, 0xffffffff);
+       (void) I915_READ(IIR);
 }
 
 int i915_driver_irq_postinstall(struct drm_device * dev)
@@ -1148,7 +1162,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
 
        DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
        dev_priv->user_irq_refcount = 0;
-       dev_priv->irq_enable_reg = 0;
+       dev_priv->irq_mask_reg = ~0;
 
        ret = drm_vblank_init(dev, num_pipes);
        if (ret)
@@ -1179,7 +1193,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
        if (!dev_priv)
                return;
 
-       dev_priv->irq_enabled = 0;
+       dev_priv->irq_enabled = 1;
 
        temp = I915_READ(PIPEASTAT);
        I915_WRITE(PIPEASTAT, temp);
index 2e68030..819a61a 100644 (file)
@@ -112,6 +112,27 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
                RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
 }
 
+static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+       u32 agp_base_hi = upper_32_bits(agp_base);
+       u32 agp_base_lo = agp_base & 0xffffffff;
+
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+               R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+               R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+       } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+               RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+               RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+       } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+               R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+               R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+       } else {
+               RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo);
+               if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+                       RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+       }
+}
+
 static int RADEON_READ_PLL(struct drm_device * dev, int addr)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -542,9 +563,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 
 #if __OS_HAS_AGP
        if (dev_priv->flags & RADEON_IS_AGP) {
-               RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
-               if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
-                       RADEON_WRITE(RADEON_AGP_BASE_2, 0);
+               radeon_write_agp_base(dev_priv, dev->agp->base);
+
                radeon_write_agp_location(dev_priv,
                             (((dev_priv->gart_vm_start - 1 +
                                dev_priv->gart_size) & 0xffff0000) |
index 1b32b2f..e263c61 100644 (file)
@@ -524,9 +524,13 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
 
 #define RV515_MC_FB_LOCATION 0x01
 #define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE     0x03
+#define RV515_MC_AGP_BASE_2   0x04
 
 #define R520_MC_FB_LOCATION 0x04
 #define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE     0x06
+#define R520_MC_AGP_BASE_2   0x07
 
 #define RADEON_MPP_TB_CONFIG           0x01c0
 #define RADEON_MEM_CNTL                        0x0140
index dce1754..718cc43 100644 (file)
@@ -22,7 +22,10 @@ TESTS = auth \
        getstats \
        lock \
        setversion \
-       updatedraw
+       updatedraw \
+       gem_basic \
+       gem_readwrite \
+       gem_mmap
 
 EXTRA_PROGRAMS = $(TESTS)
 CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)
index cae99a0..5453b10 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <fcntl.h>
+#include <sys/stat.h>
 #include "drmtest.h"
 
 /** Open the first DRM device we can find, searching up to 16 device nodes */
@@ -80,4 +81,3 @@ int drm_open_any_master(void)
        fprintf(stderr, "Couldn't find an un-controlled DRM device\n");
        abort();
 }
-
diff --git a/tests/gem_basic.c b/tests/gem_basic.c
new file mode 100644 (file)
index 0000000..b2176fb
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+static void
+test_bad_close(int fd)
+{
+       struct drm_gem_close close;
+       int ret;
+
+       printf("Testing error return on bad close ioctl.\n");
+
+       close.handle = 0x10101010;
+       ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+       assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_create_close(int fd)
+{
+       struct drm_i915_gem_create create;
+       struct drm_gem_close close;
+       int ret;
+
+       printf("Testing creating and closing an object.\n");
+
+       memset(&create, 0, sizeof(create));
+       create.size = 16 * 1024;
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+       assert(ret == 0);
+
+       close.handle = create.handle;
+       ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+}
+
+static void
+test_create_fd_close(int fd)
+{
+       struct drm_i915_gem_create create;
+       int ret;
+
+       printf("Testing closing with an object allocated.\n");
+
+       memset(&create, 0, sizeof(create));
+       create.size = 16 * 1024;
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+       assert(ret == 0);
+
+       close(fd);
+}
+
+int main(int argc, char **argv)
+{
+       int fd;
+
+       fd = drm_open_any();
+
+       test_bad_close(fd);
+       test_create_close(fd);
+       test_create_fd_close(fd);
+
+       return 0;
+}
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
new file mode 100644 (file)
index 0000000..c3a5188
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+       struct drm_i915_gem_pread read;
+
+       /* Ensure that we don't have any convenient data in buf in case
+        * we fail.
+        */
+       memset(buf, 0xd0, size);
+
+       memset(&read, 0, sizeof(read));
+       read.handle = handle;
+       read.data_ptr = (uintptr_t)buf;
+       read.size = size;
+       read.offset = offset;
+
+       return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+       struct drm_i915_gem_pwrite write;
+
+       memset(&write, 0, sizeof(write));
+       write.handle = handle;
+       write.data_ptr = (uintptr_t)buf;
+       write.size = size;
+       write.offset = offset;
+
+       return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+       int fd;
+       struct drm_i915_gem_create create;
+       struct drm_i915_gem_mmap mmap;
+       struct drm_gem_close unref;
+       uint8_t expected[OBJECT_SIZE];
+       uint8_t buf[OBJECT_SIZE];
+       uint8_t *addr;
+       int ret;
+       int handle;
+
+       fd = drm_open_any();
+
+       memset(&mmap, 0, sizeof(mmap));
+       mmap.handle = 0x10101010;
+       mmap.offset = 0;
+       mmap.size = 4096;
+       printf("Testing mmaping of bad object.\n");
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
+       assert(ret == -1 && errno == EINVAL);
+
+       memset(&create, 0, sizeof(create));
+       create.size = OBJECT_SIZE;
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+       assert(ret == 0);
+       handle = create.handle;
+
+       printf("Testing mmaping of newly created object.\n");
+       mmap.handle = handle;
+       mmap.offset = 0;
+       mmap.size = OBJECT_SIZE;
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
+       assert(ret == 0);
+       addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
+
+       printf("Testing contents of newly created object.\n");
+       memset(expected, 0, sizeof(expected));
+       assert(memcmp(addr, expected, sizeof(expected)) == 0);
+
+       printf("Testing coherency of writes and mmap reads.\n");
+       memset(buf, 0, sizeof(buf));
+       memset(buf + 1024, 0x01, 1024);
+       memset(expected + 1024, 0x01, 1024);
+       ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+       assert(ret == 0);
+       assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+       printf("Testing that mapping stays after close\n");
+       unref.handle = handle;
+       ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &unref);
+       assert(ret == 0);
+       assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+       printf("Testing unmapping\n");
+       munmap(addr, OBJECT_SIZE);
+
+       close(fd);
+
+       return 0;
+}
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
new file mode 100644 (file)
index 0000000..54b25ea
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+       struct drm_i915_gem_pread read;
+
+       /* Ensure that we don't have any convenient data in buf in case
+        * we fail.
+        */
+       memset(buf, 0xd0, size);
+
+       memset(&read, 0, sizeof(read));
+       read.handle = handle;
+       read.data_ptr = (uintptr_t)buf;
+       read.size = size;
+       read.offset = offset;
+
+       return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+       struct drm_i915_gem_pwrite write;
+
+       memset(&write, 0, sizeof(write));
+       write.handle = handle;
+       write.data_ptr = (uintptr_t)buf;
+       write.size = size;
+       write.offset = offset;
+
+       return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+       int fd;
+       struct drm_i915_gem_create create;
+       uint8_t expected[OBJECT_SIZE];
+       uint8_t buf[OBJECT_SIZE];
+       int ret;
+       int handle;
+
+       fd = drm_open_any();
+
+       memset(&create, 0, sizeof(create));
+       create.size = OBJECT_SIZE;
+       ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+       assert(ret == 0);
+       handle = create.handle;
+
+       printf("Testing contents of newly created object.\n");
+       ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+       assert(ret == 0);
+       memset(&expected, 0, sizeof(expected));
+       assert(memcmp(expected, buf, sizeof(expected)) == 0);
+
+       printf("Testing read beyond end of buffer.\n");
+       ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE);
+       assert(ret == -1 && errno == EINVAL);
+
+       printf("Testing full write of buffer\n");
+       memset(buf, 0, sizeof(buf));
+       memset(buf + 1024, 0x01, 1024);
+       memset(expected + 1024, 0x01, 1024);
+       ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+       assert(ret == 0);
+       ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+       assert(ret == 0);
+       assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+       printf("Testing partial write of buffer\n");
+       memset(buf + 4096, 0x02, 1024);
+       memset(expected + 4096, 0x02, 1024);
+       ret = do_write(fd, handle, buf + 4096, 4096, 1024);
+       assert(ret == 0);
+       ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+       assert(ret == 0);
+       assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+       printf("Testing partial read of buffer\n");
+       ret = do_read(fd, handle, buf, 512, 1024);
+       assert(ret == 0);
+       assert(memcmp(buf, expected + 512, 1024) == 0);
+
+       close(fd);
+
+       return 0;
+}