Merge commit 'origin/master' into drm-gem
authorEric Anholt <eric@anholt.net>
Mon, 28 Jul 2008 22:17:21 +0000 (15:17 -0700)
committerEric Anholt <eric@anholt.net>
Tue, 29 Jul 2008 06:12:26 +0000 (23:12 -0700)
Conflicts:

linux-core/Makefile.kernel
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c

16 files changed:
1  2 
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_agpsupport.c
linux-core/drm_compat.h
linux-core/drm_drv.c
linux-core/drm_irq.c
linux-core/drm_lock.c
linux-core/drm_memory.c
linux-core/i915_drv.c
linux-core/i915_gem.c
linux-core/i915_gem_proc.c
linux-core/i915_opregion.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c

@@@ -20,8 -20,8 +20,9 @@@ r128-objs   := r128_drv.o r128_cce.o r1
  mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
  i810-objs   := i810_drv.o i810_dma.o
  i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
-               i915_buffer.o i915_compat.o i915_execbuf.o \
+               i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
 -              i915_opregion.o
++              i915_opregion.o \
 +              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
  nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
                nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
                nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
@@@ -1355,71 -1260,8 +1361,72 @@@ static inline struct drm_memrange *drm_
        return block->mm;
  }
  
 +/* Graphics Execution Manager library functions (drm_gem.c) */
 +int
 +drm_gem_init (struct drm_device *dev);
 +
 +void
 +drm_gem_object_free (struct kref *kref);
 +
 +struct drm_gem_object *
 +drm_gem_object_alloc(struct drm_device *dev, size_t size);
 +
 +void
 +drm_gem_object_handle_free (struct kref *kref);
 +    
 +static inline void drm_gem_object_reference(struct drm_gem_object *obj)
 +{
 +      kref_get(&obj->refcount);
 +}
 +
 +static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
 +{
 +      if (obj == NULL)
 +              return;
 +
 +      kref_put (&obj->refcount, drm_gem_object_free);
 +}
 +
 +int
 +drm_gem_handle_create(struct drm_file *file_priv,
 +                    struct drm_gem_object *obj,
 +                    int *handlep);
 +
 +static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
 +{
 +      drm_gem_object_reference (obj);
 +      kref_get(&obj->handlecount);
 +}
 +
 +static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
 +{
 +      if (obj == NULL)
 +              return;
 +      
 +      /*
 +       * Must bump handle count first as this may be the last
 +       * ref, in which case the object would disappear before we
 +       * checked for a name
 +       */
 +      kref_put (&obj->handlecount, drm_gem_object_handle_free);
 +      drm_gem_object_unreference (obj);
 +}
 +
 +struct drm_gem_object *
 +drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
 +                    int handle);
 +int drm_gem_close_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int drm_gem_open_ioctl(struct drm_device *dev, void *data,
 +                     struct drm_file *file_priv);
 +
 +void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
 +void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
 +
  extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
+ extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
  extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
  
  static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 593302e,0000000..4c167d2
mode 100644,000000..100644
--- /dev/null
@@@ -1,2501 -1,0 +1,2501 @@@
-       OUT_RING(CMD_STORE_DWORD_IDX);
-       OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
 +/*
 + * Copyright © 2008 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 + * IN THE SOFTWARE.
 + *
 + * Authors:
 + *    Eric Anholt <eric@anholt.net>
 + *
 + */
 +
 +#include "drmP.h"
 +#include "drm.h"
 +#include "drm_compat.h"
 +#include "i915_drm.h"
 +#include "i915_drv.h"
 +#include <linux/swap.h>
 +
 +static int
 +i915_gem_object_set_domain(struct drm_gem_object *obj,
 +                          uint32_t read_domains,
 +                          uint32_t write_domain);
 +static int
 +i915_gem_object_set_domain_range(struct drm_gem_object *obj,
 +                               uint64_t offset,
 +                               uint64_t size,
 +                               uint32_t read_domains,
 +                               uint32_t write_domain);
 +int
 +i915_gem_set_domain(struct drm_gem_object *obj,
 +                  struct drm_file *file_priv,
 +                  uint32_t read_domains,
 +                  uint32_t write_domain);
 +static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
 +static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 +
 +int
 +i915_gem_init_ioctl(struct drm_device *dev, void *data,
 +                  struct drm_file *file_priv)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_init *args = data;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      if (args->gtt_start >= args->gtt_end ||
 +          (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
 +          (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EINVAL;
 +      }
 +
 +      drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
 +          args->gtt_end - args->gtt_start);
 +
 +      dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
 +
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +
 +/**
 + * Creates a new mm object and returns a handle to it.
 + */
 +int
 +i915_gem_create_ioctl(struct drm_device *dev, void *data,
 +                    struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_create *args = data;
 +      struct drm_gem_object *obj;
 +      int handle, ret;
 +
 +      args->size = roundup(args->size, PAGE_SIZE);
 +
 +      /* Allocate the new object */
 +      obj = drm_gem_object_alloc(dev, args->size);
 +      if (obj == NULL)
 +              return -ENOMEM;
 +
 +      ret = drm_gem_handle_create(file_priv, obj, &handle);
 +      mutex_lock(&dev->struct_mutex);
 +      drm_gem_object_handle_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      if (ret)
 +              return ret;
 +
 +      args->handle = handle;
 +
 +      return 0;
 +}
 +
 +/**
 + * Reads data from the object referenced by handle.
 + *
 + * On error, the contents of *data are undefined.
 + */
 +int
 +i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 +                   struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_pread *args = data;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      ssize_t read;
 +      loff_t offset;
 +      int ret;
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL)
 +              return -EBADF;
 +      obj_priv = obj->driver_private;
 +
 +      /* Bounds check source.
 +       *
 +       * XXX: This could use review for overflow issues...
 +       */
 +      if (args->offset > obj->size || args->size > obj->size ||
 +          args->offset + args->size > obj->size) {
 +              drm_gem_object_unreference(obj);
 +              return -EINVAL;
 +      }
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
 +                                             I915_GEM_DOMAIN_CPU, 0);
 +      if (ret != 0) {
 +              drm_gem_object_unreference(obj);
 +              mutex_unlock(&dev->struct_mutex);
 +      }
 +
 +      offset = args->offset;
 +
 +      read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
 +                      args->size, &offset);
 +      if (read != args->size) {
 +              drm_gem_object_unreference(obj);
 +              mutex_unlock(&dev->struct_mutex);
 +              if (read < 0)
 +                      return read;
 +              else
 +                      return -EINVAL;
 +      }
 +
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +#include "drm_compat.h"
 +
 +static int
 +i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 +                  struct drm_i915_gem_pwrite *args,
 +                  struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      ssize_t remain;
 +      loff_t offset;
 +      char __user *user_data;
 +      char *vaddr;
 +      int i, o, l;
 +      int ret = 0;
 +      unsigned long pfn;
 +      unsigned long unwritten;
 +
 +      user_data = (char __user *) (uintptr_t) args->data_ptr;
 +      remain = args->size;
 +      if (!access_ok(VERIFY_READ, user_data, remain))
 +              return -EFAULT;
 +
 +
 +      mutex_lock(&dev->struct_mutex);
 +      ret = i915_gem_object_pin(obj, 0);
 +      if (ret) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return ret;
 +      }
 +      ret = i915_gem_set_domain(obj, file_priv,
 +                                I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
 +      if (ret)
 +              goto fail;
 +
 +      obj_priv = obj->driver_private;
 +      offset = obj_priv->gtt_offset + args->offset;
 +      obj_priv->dirty = 1;
 +
 +      while (remain > 0) {
 +              /* Operation in this page
 +               *
 +               * i = page number
 +               * o = offset within page
 +               * l = bytes to copy
 +               */
 +              i = offset >> PAGE_SHIFT;
 +              o = offset & (PAGE_SIZE-1);
 +              l = remain;
 +              if ((o + l) > PAGE_SIZE)
 +                      l = PAGE_SIZE - o;
 +
 +              pfn = (dev->agp->base >> PAGE_SHIFT) + i;
 +
 +#ifdef DRM_KMAP_ATOMIC_PROT_PFN
 +              /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
 +               */
 +              vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
 +                                           __pgprot(__PAGE_KERNEL));
 +#if WATCH_PWRITE
 +              DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
 +                       i, o, l, pfn, vaddr);
 +#endif
 +              unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
 +                                                            user_data, l);
 +              kunmap_atomic(vaddr, KM_USER0);
 +
 +              if (unwritten)
 +#endif
 +              {
 +                      vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
 +#if WATCH_PWRITE
 +                      DRM_INFO("pwrite slow i %d o %d l %d "
 +                               "pfn %ld vaddr %p\n",
 +                               i, o, l, pfn, vaddr);
 +#endif
 +                      if (vaddr == NULL) {
 +                              ret = -EFAULT;
 +                              goto fail;
 +                      }
 +                      unwritten = __copy_from_user(vaddr + o, user_data, l);
 +#if WATCH_PWRITE
 +                      DRM_INFO("unwritten %ld\n", unwritten);
 +#endif
 +                      iounmap(vaddr);
 +                      if (unwritten) {
 +                              ret = -EFAULT;
 +                              goto fail;
 +                      }
 +              }
 +
 +              remain -= l;
 +              user_data += l;
 +              offset += l;
 +      }
 +#if WATCH_PWRITE && 1
 +      i915_gem_clflush_object(obj);
 +      i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
 +      i915_gem_clflush_object(obj);
 +#endif
 +
 +fail:
 +      i915_gem_object_unpin(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return ret;
 +}
 +
 +int
 +i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 +                    struct drm_i915_gem_pwrite *args,
 +                    struct drm_file *file_priv)
 +{
 +      int ret;
 +      loff_t offset;
 +      ssize_t written;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      ret = i915_gem_set_domain(obj, file_priv,
 +                                I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 +      if (ret) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return ret;
 +      }
 +
 +      offset = args->offset;
 +
 +      written = vfs_write(obj->filp,
 +                          (char __user *)(uintptr_t) args->data_ptr,
 +                          args->size, &offset);
 +      if (written != args->size) {
 +              mutex_unlock(&dev->struct_mutex);
 +              if (written < 0)
 +                      return written;
 +              else
 +                      return -EINVAL;
 +      }
 +
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +/**
 + * Writes data to the object referenced by handle.
 + *
 + * On error, the contents of the buffer that were to be modified are undefined.
 + */
 +int
 +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 +                    struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_pwrite *args = data;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret = 0;
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL)
 +              return -EBADF;
 +      obj_priv = obj->driver_private;
 +
 +      /* Bounds check destination.
 +       *
 +       * XXX: This could use review for overflow issues...
 +       */
 +      if (args->offset > obj->size || args->size > obj->size ||
 +          args->offset + args->size > obj->size) {
 +              drm_gem_object_unreference(obj);
 +              return -EINVAL;
 +      }
 +
 +      /* We can only do the GTT pwrite on untiled buffers, as otherwise
 +       * it would end up going through the fenced access, and we'll get
 +       * different detiling behavior between reading and writing.
 +       * pread/pwrite currently are reading and writing from the CPU
 +       * perspective, requiring manual detiling by the client.
 +       */
 +      if (obj_priv->tiling_mode == I915_TILING_NONE &&
 +          dev->gtt_total != 0)
 +              ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
 +      else
 +              ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
 +
 +#if WATCH_PWRITE
 +      if (ret)
 +              DRM_INFO("pwrite failed %d\n", ret);
 +#endif
 +
 +      drm_gem_object_unreference(obj);
 +
 +      return ret;
 +}
 +
 +/**
 + * Called when user space prepares to use an object
 + */
 +int
 +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 +                        struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_set_domain *args = data;
 +      struct drm_gem_object *obj;
 +      int ret;
 +
 +      if (!(dev->driver->driver_features & DRIVER_GEM))
 +              return -ENODEV;
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL)
 +              return -EBADF;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      ret = i915_gem_set_domain(obj, file_priv,
 +                                args->read_domains, args->write_domain);
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +      return ret;
 +}
 +
 +/**
 + * Called when user space has done writes to this buffer
 + */
 +int
 +i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 +                    struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_sw_finish *args = data;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret = 0;
 +
 +      if (!(dev->driver->driver_features & DRIVER_GEM))
 +              return -ENODEV;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EBADF;
 +      }
 +
 +#if WATCH_BUF
 +      DRM_INFO("%s: sw_finish %d (%p)\n",
 +               __func__, args->handle, obj);
 +#endif
 +      obj_priv = obj->driver_private;
 +
 +      /* Pinned buffers may be scanout, so flush the cache */
 +      if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
 +              i915_gem_clflush_object(obj);
 +              drm_agp_chipset_flush(dev);
 +      }
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +      return ret;
 +}
 +
 +/**
 + * Maps the contents of an object, returning the address it is mapped
 + * into.
 + *
 + * While the mapping holds a reference on the contents of the object, it doesn't
 + * imply a ref on the object itself.
 + */
 +int
 +i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 +                 struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_mmap *args = data;
 +      struct drm_gem_object *obj;
 +      loff_t offset;
 +      unsigned long addr;
 +
 +      if (!(dev->driver->driver_features & DRIVER_GEM))
 +              return -ENODEV;
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL)
 +              return -EBADF;
 +
 +      offset = args->offset;
 +
 +      down_write(&current->mm->mmap_sem);
 +      addr = do_mmap(obj->filp, 0, args->size,
 +                     PROT_READ | PROT_WRITE, MAP_SHARED,
 +                     args->offset);
 +      up_write(&current->mm->mmap_sem);
 +      mutex_lock(&dev->struct_mutex);
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +      if (IS_ERR((void *)addr))
 +              return addr;
 +
 +      args->addr_ptr = (uint64_t) addr;
 +
 +      return 0;
 +}
 +
 +static void
 +i915_gem_object_free_page_list(struct drm_gem_object *obj)
 +{
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int page_count = obj->size / PAGE_SIZE;
 +      int i;
 +
 +      if (obj_priv->page_list == NULL)
 +              return;
 +
 +
 +      for (i = 0; i < page_count; i++)
 +              if (obj_priv->page_list[i] != NULL) {
 +                      if (obj_priv->dirty)
 +                              set_page_dirty(obj_priv->page_list[i]);
 +                      mark_page_accessed(obj_priv->page_list[i]);
 +                      page_cache_release(obj_priv->page_list[i]);
 +              }
 +      obj_priv->dirty = 0;
 +
 +      drm_free(obj_priv->page_list,
 +               page_count * sizeof(struct page *),
 +               DRM_MEM_DRIVER);
 +      obj_priv->page_list = NULL;
 +}
 +
 +static void
 +i915_gem_object_move_to_active(struct drm_gem_object *obj)
 +{
 +      struct drm_device *dev = obj->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +      /* Add a reference if we're newly entering the active list. */
 +      if (!obj_priv->active) {
 +              drm_gem_object_reference(obj);
 +              obj_priv->active = 1;
 +      }
 +      /* Move from whatever list we were on to the tail of execution. */
 +      list_move_tail(&obj_priv->list,
 +                     &dev_priv->mm.active_list);
 +}
 +
 +
 +static void
 +i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 +{
 +      struct drm_device *dev = obj->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      if (obj_priv->pin_count != 0)
 +              list_del_init(&obj_priv->list);
 +      else
 +              list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 +
 +      if (obj_priv->active) {
 +              obj_priv->active = 0;
 +              drm_gem_object_unreference(obj);
 +      }
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +}
 +
 +/**
 + * Creates a new sequence number, emitting a write of it to the status page
 + * plus an interrupt, which will trigger i915_user_interrupt_handler.
 + *
 + * Must be called with struct_lock held.
 + *
 + * Returned sequence numbers are nonzero on success.
 + */
 +static uint32_t
 +i915_add_request(struct drm_device *dev, uint32_t flush_domains)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_request *request;
 +      uint32_t seqno;
 +      int was_empty;
 +      RING_LOCALS;
 +
 +      request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
 +      if (request == NULL)
 +              return 0;
 +
 +      /* Grab the seqno we're going to make this request be, and bump the
 +       * next (skipping 0 so it can be the reserved no-seqno value).
 +       */
 +      seqno = dev_priv->mm.next_gem_seqno;
 +      dev_priv->mm.next_gem_seqno++;
 +      if (dev_priv->mm.next_gem_seqno == 0)
 +              dev_priv->mm.next_gem_seqno++;
 +
 +      BEGIN_LP_RING(4);
-       OUT_RING(GFX_OP_USER_INTERRUPT);
++      OUT_RING(MI_STORE_DWORD_INDEX);
++      OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 +      OUT_RING(seqno);
 +
-       uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
++      OUT_RING(MI_USER_INTERRUPT);
 +      ADVANCE_LP_RING();
 +
 +      DRM_DEBUG("%d\n", seqno);
 +
 +      request->seqno = seqno;
 +      request->emitted_jiffies = jiffies;
 +      request->flush_domains = flush_domains;
 +      was_empty = list_empty(&dev_priv->mm.request_list);
 +      list_add_tail(&request->list, &dev_priv->mm.request_list);
 +
 +      if (was_empty)
 +              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
 +      return seqno;
 +}
 +
 +/**
 + * Command execution barrier
 + *
 + * Ensures that all commands in the ring are finished
 + * before signalling the CPU
 + */
 +uint32_t
 +i915_retire_commands(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
-               cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
++      uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 +      uint32_t flush_domains = 0;
 +      RING_LOCALS;
 +
 +      /* The sampler always gets flushed on i965 (sigh) */
 +      if (IS_I965G(dev))
 +              flush_domains |= I915_GEM_DOMAIN_SAMPLER;
 +      BEGIN_LP_RING(2);
 +      OUT_RING(cmd);
 +      OUT_RING(0); /* noop */
 +      ADVANCE_LP_RING();
 +      return flush_domains;
 +}
 +
 +/**
 + * Moves buffers associated only with the given active seqno from the active
 + * to inactive list, potentially freeing them.
 + */
 +static void
 +i915_gem_retire_request(struct drm_device *dev,
 +                      struct drm_i915_gem_request *request)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      if (request->flush_domains != 0) {
 +              struct drm_i915_gem_object *obj_priv, *next;
 +
 +              /* First clear any buffers that were only waiting for a flush
 +               * matching the one just retired.
 +               */
 +
 +              list_for_each_entry_safe(obj_priv, next,
 +                                       &dev_priv->mm.flushing_list, list) {
 +                      struct drm_gem_object *obj = obj_priv->obj;
 +
 +                      if (obj->write_domain & request->flush_domains) {
 +                              obj->write_domain = 0;
 +                              i915_gem_object_move_to_inactive(obj);
 +                      }
 +              }
 +
 +      }
 +
 +      /* Move any buffers on the active list that are no longer referenced
 +       * by the ringbuffer to the flushing/inactive lists as appropriate.
 +       */
 +      while (!list_empty(&dev_priv->mm.active_list)) {
 +              struct drm_gem_object *obj;
 +              struct drm_i915_gem_object *obj_priv;
 +
 +              obj_priv = list_first_entry(&dev_priv->mm.active_list,
 +                                          struct drm_i915_gem_object,
 +                                          list);
 +              obj = obj_priv->obj;
 +
 +              /* If the seqno being retired doesn't match the oldest in the
 +               * list, then the oldest in the list must still be newer than
 +               * this seqno.
 +               */
 +              if (obj_priv->last_rendering_seqno != request->seqno)
 +                      return;
 +#if WATCH_LRU
 +              DRM_INFO("%s: retire %d moves to inactive list %p\n",
 +                       __func__, request->seqno, obj);
 +#endif
 +
 +              if (obj->write_domain != 0) {
 +                      list_move_tail(&obj_priv->list,
 +                                     &dev_priv->mm.flushing_list);
 +              } else {
 +                      i915_gem_object_move_to_inactive(obj);
 +              }
 +      }
 +}
 +
 +/**
 + * Returns true if seq1 is later than seq2.
 + */
 +static int
 +i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 +{
 +      return (int32_t)(seq1 - seq2) >= 0;
 +}
 +
 +uint32_t
 +i915_get_gem_seqno(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
 +}
 +
 +/**
 + * This function clears the request list as sequence numbers are passed.
 + */
 +void
 +i915_gem_retire_requests(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      uint32_t seqno;
 +
 +      seqno = i915_get_gem_seqno(dev);
 +
 +      while (!list_empty(&dev_priv->mm.request_list)) {
 +              struct drm_i915_gem_request *request;
 +              uint32_t retiring_seqno;
 +
 +              request = list_first_entry(&dev_priv->mm.request_list,
 +                                         struct drm_i915_gem_request,
 +                                         list);
 +              retiring_seqno = request->seqno;
 +
 +              if (i915_seqno_passed(seqno, retiring_seqno) ||
 +                  dev_priv->mm.wedged) {
 +                      i915_gem_retire_request(dev, request);
 +
 +                      list_del(&request->list);
 +                      drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
 +              } else
 +                      break;
 +      }
 +}
 +
 +void
 +i915_gem_retire_work_handler(struct work_struct *work)
 +{
 +      drm_i915_private_t *dev_priv;
 +      struct drm_device *dev;
 +
 +      dev_priv = container_of(work, drm_i915_private_t,
 +                              mm.retire_work.work);
 +      dev = dev_priv->dev;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      i915_gem_retire_requests(dev);
 +      if (!list_empty(&dev_priv->mm.request_list))
 +              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
 +      mutex_unlock(&dev->struct_mutex);
 +}
 +
 +/**
 + * Waits for a sequence number to be signaled, and cleans up the
 + * request and object lists appropriately for that event.
 + */
 +int
 +i915_wait_request(struct drm_device *dev, uint32_t seqno)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      int ret = 0;
 +
 +      BUG_ON(seqno == 0);
 +
 +      if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
 +              dev_priv->mm.waiting_gem_seqno = seqno;
 +              i915_user_irq_on(dev_priv);
 +              ret = wait_event_interruptible(dev_priv->irq_queue,
 +                                             i915_seqno_passed(i915_get_gem_seqno(dev),
 +                                                               seqno) ||
 +                                             dev_priv->mm.wedged);
 +              i915_user_irq_off(dev_priv);
 +              dev_priv->mm.waiting_gem_seqno = 0;
 +      }
 +      if (dev_priv->mm.wedged)
 +              ret = -EIO;
 +
 +      if (ret)
 +              DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
 +                        __func__, ret, seqno, i915_get_gem_seqno(dev));
 +
 +      /* Directly dispatch request retiring.  While we have the work queue
 +       * to handle this, the waiter on a request often wants an associated
 +       * buffer to have made it to the inactive list, and we would need
 +       * a separate wait queue to handle that.
 +       */
 +      if (ret == 0)
 +              i915_gem_retire_requests(dev);
 +
 +      return ret;
 +}
 +
 +static void
 +i915_gem_flush(struct drm_device *dev,
 +             uint32_t invalidate_domains,
 +             uint32_t flush_domains)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      uint32_t cmd;
 +      RING_LOCALS;
 +
 +#if WATCH_EXEC
 +      DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
 +                invalidate_domains, flush_domains);
 +#endif
 +
 +      if (flush_domains & I915_GEM_DOMAIN_CPU)
 +              drm_agp_chipset_flush(dev);
 +
 +      if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
 +                                                   I915_GEM_DOMAIN_GTT)) {
 +              /*
 +               * read/write caches:
 +               *
 +               * I915_GEM_DOMAIN_RENDER is always invalidated, but is
 +               * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
 +               * also flushed at 2d versus 3d pipeline switches.
 +               *
 +               * read-only caches:
 +               *
 +               * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
 +               * MI_READ_FLUSH is set, and is always flushed on 965.
 +               *
 +               * I915_GEM_DOMAIN_COMMAND may not exist?
 +               *
 +               * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
 +               * invalidated when MI_EXE_FLUSH is set.
 +               *
 +               * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
 +               * invalidated with every MI_FLUSH.
 +               *
 +               * TLBs:
 +               *
 +               * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
 +               * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
 +               * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
 +               * are flushed at any MI_FLUSH.
 +               */
 +
-       I915_WRITE(LP_RING + RING_LEN, 0);
-       I915_WRITE(LP_RING + RING_HEAD, 0);
-       I915_WRITE(LP_RING + RING_TAIL, 0);
-       I915_WRITE(LP_RING + RING_START, 0);
++              cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 +              if ((invalidate_domains|flush_domains) &
 +                  I915_GEM_DOMAIN_RENDER)
 +                      cmd &= ~MI_NO_WRITE_FLUSH;
 +              if (!IS_I965G(dev)) {
 +                      /*
 +                       * On the 965, the sampler cache always gets flushed
 +                       * and this bit is reserved.
 +                       */
 +                      if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 +                              cmd |= MI_READ_FLUSH;
 +              }
 +              if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
 +                      cmd |= MI_EXE_FLUSH;
 +
 +#if WATCH_EXEC
 +              DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 +#endif
 +              BEGIN_LP_RING(2);
 +              OUT_RING(cmd);
 +              OUT_RING(0); /* noop */
 +              ADVANCE_LP_RING();
 +      }
 +}
 +
 +/**
 + * Ensures that all rendering to the object has completed and the object is
 + * safe to unbind from the GTT or access from the CPU.
 + */
 +static int
 +i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 +{
 +      struct drm_device *dev = obj->dev;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int ret;
 +
 +      /* If there are writes queued to the buffer, flush and
 +       * create a new seqno to wait for.
 +       */
 +      if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
 +              uint32_t write_domain = obj->write_domain;
 +#if WATCH_BUF
 +              DRM_INFO("%s: flushing object %p from write domain %08x\n",
 +                        __func__, obj, write_domain);
 +#endif
 +              i915_gem_flush(dev, 0, write_domain);
 +              obj->write_domain = 0;
 +
 +              i915_gem_object_move_to_active(obj);
 +              obj_priv->last_rendering_seqno = i915_add_request(dev,
 +                                                                write_domain);
 +              BUG_ON(obj_priv->last_rendering_seqno == 0);
 +#if WATCH_LRU
 +              DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
 +#endif
 +      }
 +      /* If there is rendering queued on the buffer being evicted, wait for
 +       * it.
 +       */
 +      if (obj_priv->active) {
 +#if WATCH_BUF
 +              DRM_INFO("%s: object %p wait for seqno %08x\n",
 +                        __func__, obj, obj_priv->last_rendering_seqno);
 +#endif
 +              ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
 +              if (ret != 0)
 +                      return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +/**
 + * Unbinds an object from the GTT aperture.
 + */
 +static int
 +i915_gem_object_unbind(struct drm_gem_object *obj)
 +{
 +      struct drm_device *dev = obj->dev;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int ret = 0;
 +
 +#if WATCH_BUF
 +      DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
 +      DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
 +#endif
 +      if (obj_priv->gtt_space == NULL)
 +              return 0;
 +
 +      if (obj_priv->pin_count != 0) {
 +              DRM_ERROR("Attempting to unbind pinned buffer\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Wait for any rendering to complete
 +       */
 +      ret = i915_gem_object_wait_rendering(obj);
 +      if (ret) {
 +              DRM_ERROR("wait_rendering failed: %d\n", ret);
 +              return ret;
 +      }
 +
 +      /* Move the object to the CPU domain to ensure that
 +       * any possible CPU writes while it's not in the GTT
 +       * are flushed when we go to remap it. This will
 +       * also ensure that all pending GPU writes are finished
 +       * before we unbind.
 +       */
 +      ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
 +                                       I915_GEM_DOMAIN_CPU);
 +      if (ret) {
 +              DRM_ERROR("set_domain failed: %d\n", ret);
 +              return ret;
 +      }
 +
 +      if (obj_priv->agp_mem != NULL) {
 +              drm_unbind_agp(obj_priv->agp_mem);
 +              drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
 +              obj_priv->agp_mem = NULL;
 +      }
 +
 +      BUG_ON(obj_priv->active);
 +
 +      i915_gem_object_free_page_list(obj);
 +
 +      if (obj_priv->gtt_space) {
 +              atomic_dec(&dev->gtt_count);
 +              atomic_sub(obj->size, &dev->gtt_memory);
 +
 +              drm_memrange_put_block(obj_priv->gtt_space);
 +              obj_priv->gtt_space = NULL;
 +      }
 +
 +      /* Remove ourselves from the LRU list if present. */
 +      if (!list_empty(&obj_priv->list))
 +              list_del_init(&obj_priv->list);
 +
 +      return 0;
 +}
 +
 +static int
 +i915_gem_evict_something(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret = 0;
 +
 +      for (;;) {
 +              /* If there's an inactive buffer available now, grab it
 +               * and be done.
 +               */
 +              if (!list_empty(&dev_priv->mm.inactive_list)) {
 +                      obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
 +                                                  struct drm_i915_gem_object,
 +                                                  list);
 +                      obj = obj_priv->obj;
 +                      BUG_ON(obj_priv->pin_count != 0);
 +#if WATCH_LRU
 +                      DRM_INFO("%s: evicting %p\n", __func__, obj);
 +#endif
 +                      BUG_ON(obj_priv->active);
 +
 +                      /* Wait on the rendering and unbind the buffer. */
 +                      ret = i915_gem_object_unbind(obj);
 +                      break;
 +              }
 +
 +              /* If we didn't get anything, but the ring is still processing
 +               * things, wait for one of those things to finish and hopefully
 +               * leave us a buffer to evict.
 +               */
 +              if (!list_empty(&dev_priv->mm.request_list)) {
 +                      struct drm_i915_gem_request *request;
 +
 +                      request = list_first_entry(&dev_priv->mm.request_list,
 +                                                 struct drm_i915_gem_request,
 +                                                 list);
 +
 +                      ret = i915_wait_request(dev, request->seqno);
 +                      if (ret)
 +                              break;
 +
 +                      /* if waiting caused an object to become inactive,
 +                       * then loop around and wait for it. Otherwise, we
 +                       * assume that waiting freed and unbound something,
 +                       * so there should now be some space in the GTT
 +                       */
 +                      if (!list_empty(&dev_priv->mm.inactive_list))
 +                              continue;
 +                      break;
 +              }
 +
 +              /* If we didn't have anything on the request list but there
 +               * are buffers awaiting a flush, emit one and try again.
 +               * When we wait on it, those buffers waiting for that flush
 +               * will get moved to inactive.
 +               */
 +              if (!list_empty(&dev_priv->mm.flushing_list)) {
 +                      obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
 +                                                  struct drm_i915_gem_object,
 +                                                  list);
 +                      obj = obj_priv->obj;
 +
 +                      i915_gem_flush(dev,
 +                                     obj->write_domain,
 +                                     obj->write_domain);
 +                      i915_add_request(dev, obj->write_domain);
 +
 +                      obj = NULL;
 +                      continue;
 +              }
 +
 +              DRM_ERROR("inactive empty %d request empty %d "
 +                        "flushing empty %d\n",
 +                        list_empty(&dev_priv->mm.inactive_list),
 +                        list_empty(&dev_priv->mm.request_list),
 +                        list_empty(&dev_priv->mm.flushing_list));
 +              /* If we didn't do any of the above, there's nothing to be done
 +               * and we just can't fit it in.
 +               */
 +              return -ENOMEM;
 +      }
 +      return ret;
 +}
 +
 +static int
 +i915_gem_object_get_page_list(struct drm_gem_object *obj)
 +{
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int page_count, i;
 +      struct address_space *mapping;
 +      struct inode *inode;
 +      struct page *page;
 +      int ret;
 +
 +      if (obj_priv->page_list)
 +              return 0;
 +
 +      /* Get the list of pages out of our struct file.  They'll be pinned
 +       * at this point until we release them.
 +       */
 +      page_count = obj->size / PAGE_SIZE;
 +      BUG_ON(obj_priv->page_list != NULL);
 +      obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
 +                                       DRM_MEM_DRIVER);
 +      if (obj_priv->page_list == NULL) {
 +              DRM_ERROR("Faled to allocate page list\n");
 +              return -ENOMEM;
 +      }
 +
 +      inode = obj->filp->f_path.dentry->d_inode;
 +      mapping = inode->i_mapping;
 +      for (i = 0; i < page_count; i++) {
 +              page = find_get_page(mapping, i);
 +              if (page == NULL || !PageUptodate(page)) {
 +                      if (page) {
 +                              page_cache_release(page);
 +                              page = NULL;
 +                      }
 +                      ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
 +
 +                      if (ret) {
 +                              DRM_ERROR("shmem_getpage failed: %d\n", ret);
 +                              i915_gem_object_free_page_list(obj);
 +                              return ret;
 +                      }
 +                      unlock_page(page);
 +              }
 +              obj_priv->page_list[i] = page;
 +      }
 +      return 0;
 +}
 +
 +/**
 + * Finds free space in the GTT aperture and binds the object there.
 + */
 +static int
 +i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 +{
 +      struct drm_device *dev = obj->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      struct drm_memrange_node *free_space;
 +      int page_count, ret;
 +
 +      if (alignment == 0)
 +              alignment = PAGE_SIZE;
 +      if (alignment & (PAGE_SIZE - 1)) {
 +              DRM_ERROR("Invalid object alignment requested %u\n", alignment);
 +              return -EINVAL;
 +      }
 +
 + search_free:
 +      free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
 +                                            obj->size,
 +                                            alignment, 0);
 +      if (free_space != NULL) {
 +              obj_priv->gtt_space =
 +                      drm_memrange_get_block(free_space, obj->size,
 +                                             alignment);
 +              if (obj_priv->gtt_space != NULL) {
 +                      obj_priv->gtt_space->private = obj;
 +                      obj_priv->gtt_offset = obj_priv->gtt_space->start;
 +              }
 +      }
 +      if (obj_priv->gtt_space == NULL) {
 +              /* If the gtt is empty and we're still having trouble
 +               * fitting our object in, we're out of memory.
 +               */
 +#if WATCH_LRU
 +              DRM_INFO("%s: GTT full, evicting something\n", __func__);
 +#endif
 +              if (list_empty(&dev_priv->mm.inactive_list) &&
 +                  list_empty(&dev_priv->mm.flushing_list) &&
 +                  list_empty(&dev_priv->mm.active_list)) {
 +                      DRM_ERROR("GTT full, but LRU list empty\n");
 +                      return -ENOMEM;
 +              }
 +
 +              ret = i915_gem_evict_something(dev);
 +              if (ret != 0) {
 +                      DRM_ERROR("Failed to evict a buffer %d\n", ret);
 +                      return ret;
 +              }
 +              goto search_free;
 +      }
 +
 +#if WATCH_BUF
 +      DRM_INFO("Binding object of size %d at 0x%08x\n",
 +               obj->size, obj_priv->gtt_offset);
 +#endif
 +      ret = i915_gem_object_get_page_list(obj);
 +      if (ret) {
 +              drm_memrange_put_block(obj_priv->gtt_space);
 +              obj_priv->gtt_space = NULL;
 +              return ret;
 +      }
 +
 +      page_count = obj->size / PAGE_SIZE;
 +      /* Create an AGP memory structure pointing at our pages, and bind it
 +       * into the GTT.
 +       */
 +      obj_priv->agp_mem = drm_agp_bind_pages(dev,
 +                                             obj_priv->page_list,
 +                                             page_count,
 +                                             obj_priv->gtt_offset);
 +      if (obj_priv->agp_mem == NULL) {
 +              i915_gem_object_free_page_list(obj);
 +              drm_memrange_put_block(obj_priv->gtt_space);
 +              obj_priv->gtt_space = NULL;
 +              return -ENOMEM;
 +      }
 +      atomic_inc(&dev->gtt_count);
 +      atomic_add(obj->size, &dev->gtt_memory);
 +
 +      /* Assert that the object is not currently in any GPU domain. As it
 +       * wasn't in the GTT, there shouldn't be any way it could have been in
 +       * a GPU cache
 +       */
 +      BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
 +      BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
 +
 +      return 0;
 +}
 +
 +void
 +i915_gem_clflush_object(struct drm_gem_object *obj)
 +{
 +      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
 +
 +      /* If we don't have a page list set up, then we're not pinned
 +       * to GPU, and we can ignore the cache flush because it'll happen
 +       * again at bind time.
 +       */
 +      if (obj_priv->page_list == NULL)
 +              return;
 +
 +      drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
 +}
 +
 +/*
 + * Set the next domain for the specified object. This
 + * may not actually perform the necessary flushing/invaliding though,
 + * as that may want to be batched with other set_domain operations
 + *
 + * This is (we hope) the only really tricky part of gem. The goal
 + * is fairly simple -- track which caches hold bits of the object
 + * and make sure they remain coherent. A few concrete examples may
 + * help to explain how it works. For shorthand, we use the notation
 + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
 + * a pair of read and write domain masks.
 + *
 + * Case 1: the batch buffer
 + *
 + *    1. Allocated
 + *    2. Written by CPU
 + *    3. Mapped to GTT
 + *    4. Read by GPU
 + *    5. Unmapped from GTT
 + *    6. Freed
 + *
 + *    Let's take these a step at a time
 + *
 + *    1. Allocated
 + *            Pages allocated from the kernel may still have
 + *            cache contents, so we set them to (CPU, CPU) always.
 + *    2. Written by CPU (using pwrite)
 + *            The pwrite function calls set_domain (CPU, CPU) and
 + *            this function does nothing (as nothing changes)
 + *    3. Mapped by GTT
 + *            This function asserts that the object is not
 + *            currently in any GPU-based read or write domains
 + *    4. Read by GPU
 + *            i915_gem_execbuffer calls set_domain (COMMAND, 0).
 + *            As write_domain is zero, this function adds in the
 + *            current read domains (CPU+COMMAND, 0).
 + *            flush_domains is set to CPU.
 + *            invalidate_domains is set to COMMAND
 + *            clflush is run to get data out of the CPU caches
 + *            then i915_dev_set_domain calls i915_gem_flush to
 + *            emit an MI_FLUSH and drm_agp_chipset_flush
 + *    5. Unmapped from GTT
 + *            i915_gem_object_unbind calls set_domain (CPU, CPU)
 + *            flush_domains and invalidate_domains end up both zero
 + *            so no flushing/invalidating happens
 + *    6. Freed
 + *            yay, done
 + *
 + * Case 2: The shared render buffer
 + *
 + *    1. Allocated
 + *    2. Mapped to GTT
 + *    3. Read/written by GPU
 + *    4. set_domain to (CPU,CPU)
 + *    5. Read/written by CPU
 + *    6. Read/written by GPU
 + *
 + *    1. Allocated
 + *            Same as last example, (CPU, CPU)
 + *    2. Mapped to GTT
 + *            Nothing changes (assertions find that it is not in the GPU)
 + *    3. Read/written by GPU
 + *            execbuffer calls set_domain (RENDER, RENDER)
 + *            flush_domains gets CPU
 + *            invalidate_domains gets GPU
 + *            clflush (obj)
 + *            MI_FLUSH and drm_agp_chipset_flush
 + *    4. set_domain (CPU, CPU)
 + *            flush_domains gets GPU
 + *            invalidate_domains gets CPU
 + *            wait_rendering (obj) to make sure all drawing is complete.
 + *            This will include an MI_FLUSH to get the data from GPU
 + *            to memory
 + *            clflush (obj) to invalidate the CPU cache
 + *            Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
 + *    5. Read/written by CPU
 + *            cache lines are loaded and dirtied
 + *    6. Read written by GPU
 + *            Same as last GPU access
 + *
 + * Case 3: The constant buffer
 + *
 + *    1. Allocated
 + *    2. Written by CPU
 + *    3. Read by GPU
 + *    4. Updated (written) by CPU again
 + *    5. Read by GPU
 + *
 + *    1. Allocated
 + *            (CPU, CPU)
 + *    2. Written by CPU
 + *            (CPU, CPU)
 + *    3. Read by GPU
 + *            (CPU+RENDER, 0)
 + *            flush_domains = CPU
 + *            invalidate_domains = RENDER
 + *            clflush (obj)
 + *            MI_FLUSH
 + *            drm_agp_chipset_flush
 + *    4. Updated (written) by CPU again
 + *            (CPU, CPU)
 + *            flush_domains = 0 (no previous write domain)
 + *            invalidate_domains = 0 (no new read domains)
 + *    5. Read by GPU
 + *            (CPU+RENDER, 0)
 + *            flush_domains = CPU
 + *            invalidate_domains = RENDER
 + *            clflush (obj)
 + *            MI_FLUSH
 + *            drm_agp_chipset_flush
 + */
 +static int
 +i915_gem_object_set_domain(struct drm_gem_object *obj,
 +                          uint32_t read_domains,
 +                          uint32_t write_domain)
 +{
 +      struct drm_device               *dev = obj->dev;
 +      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
 +      uint32_t                        invalidate_domains = 0;
 +      uint32_t                        flush_domains = 0;
 +      int                             ret;
 +
 +#if WATCH_BUF
 +      DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
 +               __func__, obj,
 +               obj->read_domains, read_domains,
 +               obj->write_domain, write_domain);
 +#endif
 +      /*
 +       * If the object isn't moving to a new write domain,
 +       * let the object stay in multiple read domains
 +       */
 +      if (write_domain == 0)
 +              read_domains |= obj->read_domains;
 +      else
 +              obj_priv->dirty = 1;
 +
 +      /*
 +       * Flush the current write domain if
 +       * the new read domains don't match. Invalidate
 +       * any read domains which differ from the old
 +       * write domain
 +       */
 +      if (obj->write_domain && obj->write_domain != read_domains) {
 +              flush_domains |= obj->write_domain;
 +              invalidate_domains |= read_domains & ~obj->write_domain;
 +      }
 +      /*
 +       * Invalidate any read caches which may have
 +       * stale data. That is, any new read domains.
 +       */
 +      invalidate_domains |= read_domains & ~obj->read_domains;
 +      if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
 +#if WATCH_BUF
 +              DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
 +                       __func__, flush_domains, invalidate_domains);
 +#endif
 +              /*
 +               * If we're invaliding the CPU cache and flushing a GPU cache,
 +               * then pause for rendering so that the GPU caches will be
 +               * flushed before the cpu cache is invalidated
 +               */
 +              if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
 +                  (flush_domains & ~(I915_GEM_DOMAIN_CPU |
 +                                     I915_GEM_DOMAIN_GTT))) {
 +                      ret = i915_gem_object_wait_rendering(obj);
 +                      if (ret)
 +                              return ret;
 +              }
 +              i915_gem_clflush_object(obj);
 +      }
 +
 +      if ((write_domain | flush_domains) != 0)
 +              obj->write_domain = write_domain;
 +
 +      /* If we're invalidating the CPU domain, clear the per-page CPU
 +       * domain list as well.
 +       */
 +      if (obj_priv->page_cpu_valid != NULL &&
 +          (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
 +          ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
 +              memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
 +      }
 +      obj->read_domains = read_domains;
 +
 +      dev->invalidate_domains |= invalidate_domains;
 +      dev->flush_domains |= flush_domains;
 +#if WATCH_BUF
 +      DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
 +               __func__,
 +               obj->read_domains, obj->write_domain,
 +               dev->invalidate_domains, dev->flush_domains);
 +#endif
 +      return 0;
 +}
 +
 +/**
 + * Set the read/write domain on a range of the object.
 + *
 + * Currently only implemented for CPU reads, otherwise drops to normal
 + * i915_gem_object_set_domain().
 + */
 +static int
 +i915_gem_object_set_domain_range(struct drm_gem_object *obj,
 +                               uint64_t offset,
 +                               uint64_t size,
 +                               uint32_t read_domains,
 +                               uint32_t write_domain)
 +{
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int ret, i;
 +
 +      if (obj->read_domains & I915_GEM_DOMAIN_CPU)
 +              return 0;
 +
 +      if (read_domains != I915_GEM_DOMAIN_CPU ||
 +          write_domain != 0)
 +              return i915_gem_object_set_domain(obj,
 +                                                read_domains, write_domain);
 +
 +      /* Wait on any GPU rendering to the object to be flushed. */
 +      if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
 +              ret = i915_gem_object_wait_rendering(obj);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      if (obj_priv->page_cpu_valid == NULL) {
 +              obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
 +                                                    DRM_MEM_DRIVER);
 +      }
 +
 +      /* Flush the cache on any pages that are still invalid from the CPU's
 +       * perspective.
 +       */
 +      for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
 +              if (obj_priv->page_cpu_valid[i])
 +                      continue;
 +
 +              drm_ttm_cache_flush(obj_priv->page_list + i, 1);
 +
 +              obj_priv->page_cpu_valid[i] = 1;
 +      }
 +
 +      return 0;
 +}
 +
 +/**
 + * Once all of the objects have been set in the proper domain,
 + * perform the necessary flush and invalidate operations.
 + *
 + * Returns the write domains flushed, for use in flush tracking.
 + */
 +static uint32_t
 +i915_gem_dev_set_domain(struct drm_device *dev)
 +{
 +      uint32_t flush_domains = dev->flush_domains;
 +
 +      /*
 +       * Now that all the buffers are synced to the proper domains,
 +       * flush and invalidate the collected domains
 +       */
 +      if (dev->invalidate_domains | dev->flush_domains) {
 +#if WATCH_EXEC
 +              DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
 +                        __func__,
 +                       dev->invalidate_domains,
 +                       dev->flush_domains);
 +#endif
 +              i915_gem_flush(dev,
 +                             dev->invalidate_domains,
 +                             dev->flush_domains);
 +              dev->invalidate_domains = 0;
 +              dev->flush_domains = 0;
 +      }
 +
 +      return flush_domains;
 +}
 +
 +/**
 + * Pin an object to the GTT and evaluate the relocations landing in it.
 + */
 +static int
 +i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 +                               struct drm_file *file_priv,
 +                               struct drm_i915_gem_exec_object *entry)
 +{
 +      struct drm_device *dev = obj->dev;
 +      struct drm_i915_gem_relocation_entry reloc;
 +      struct drm_i915_gem_relocation_entry __user *relocs;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int i, ret;
 +      uint32_t last_reloc_offset = -1;
 +      void *reloc_page = NULL;
 +
 +      /* Choose the GTT offset for our buffer and put it there. */
 +      ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
 +      if (ret)
 +              return ret;
 +
 +      entry->offset = obj_priv->gtt_offset;
 +
 +      relocs = (struct drm_i915_gem_relocation_entry __user *)
 +               (uintptr_t) entry->relocs_ptr;
 +      /* Apply the relocations, using the GTT aperture to avoid cache
 +       * flushing requirements.
 +       */
 +      for (i = 0; i < entry->relocation_count; i++) {
 +              struct drm_gem_object *target_obj;
 +              struct drm_i915_gem_object *target_obj_priv;
 +              uint32_t reloc_val, reloc_offset, *reloc_entry;
 +              int ret;
 +
 +              ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
 +              if (ret != 0) {
 +                      i915_gem_object_unpin(obj);
 +                      return ret;
 +              }
 +
 +              target_obj = drm_gem_object_lookup(obj->dev, file_priv,
 +                                                 reloc.target_handle);
 +              if (target_obj == NULL) {
 +                      i915_gem_object_unpin(obj);
 +                      return -EBADF;
 +              }
 +              target_obj_priv = target_obj->driver_private;
 +
 +              /* The target buffer should have appeared before us in the
 +               * exec_object list, so it should have a GTT space bound by now.
 +               */
 +              if (target_obj_priv->gtt_space == NULL) {
 +                      DRM_ERROR("No GTT space found for object %d\n",
 +                                reloc.target_handle);
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
 +                      return -EINVAL;
 +              }
 +
 +              if (reloc.offset > obj->size - 4) {
 +                      DRM_ERROR("Relocation beyond object bounds: "
 +                                "obj %p target %d offset %d size %d.\n",
 +                                obj, reloc.target_handle,
 +                                (int) reloc.offset, (int) obj->size);
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
 +                      return -EINVAL;
 +              }
 +              if (reloc.offset & 3) {
 +                      DRM_ERROR("Relocation not 4-byte aligned: "
 +                                "obj %p target %d offset %d.\n",
 +                                obj, reloc.target_handle,
 +                                (int) reloc.offset);
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
 +                      return -EINVAL;
 +              }
 +
 +              if (reloc.write_domain && target_obj->pending_write_domain &&
 +                  reloc.write_domain != target_obj->pending_write_domain) {
 +                      DRM_ERROR("Write domain conflict: "
 +                                "obj %p target %d offset %d "
 +                                "new %08x old %08x\n",
 +                                obj, reloc.target_handle,
 +                                (int) reloc.offset,
 +                                reloc.write_domain,
 +                                target_obj->pending_write_domain);
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
 +                      return -EINVAL;
 +              }
 +
 +#if WATCH_RELOC
 +              DRM_INFO("%s: obj %p offset %08x target %d "
 +                       "read %08x write %08x gtt %08x "
 +                       "presumed %08x delta %08x\n",
 +                       __func__,
 +                       obj,
 +                       (int) reloc.offset,
 +                       (int) reloc.target_handle,
 +                       (int) reloc.read_domains,
 +                       (int) reloc.write_domain,
 +                       (int) target_obj_priv->gtt_offset,
 +                       (int) reloc.presumed_offset,
 +                       reloc.delta);
 +#endif
 +
 +              target_obj->pending_read_domains |= reloc.read_domains;
 +              target_obj->pending_write_domain |= reloc.write_domain;
 +
 +              /* If the relocation already has the right value in it, no
 +               * more work needs to be done.
 +               */
 +              if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
 +                      drm_gem_object_unreference(target_obj);
 +                      continue;
 +              }
 +
 +              /* Now that we're going to actually write some data in,
 +               * make sure that any rendering using this buffer's contents
 +               * is completed.
 +               */
 +              i915_gem_object_wait_rendering(obj);
 +
 +              /* As we're writing through the gtt, flush
 +               * any CPU writes before we write the relocations
 +               */
 +              if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
 +                      i915_gem_clflush_object(obj);
 +                      drm_agp_chipset_flush(dev);
 +                      obj->write_domain = 0;
 +              }
 +
 +              /* Map the page containing the relocation we're going to
 +               * perform.
 +               */
 +              reloc_offset = obj_priv->gtt_offset + reloc.offset;
 +              if (reloc_page == NULL ||
 +                  (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
 +                  (reloc_offset & ~(PAGE_SIZE - 1))) {
 +                      if (reloc_page != NULL)
 +                              iounmap(reloc_page);
 +
 +                      reloc_page = ioremap(dev->agp->base +
 +                                           (reloc_offset & ~(PAGE_SIZE - 1)),
 +                                           PAGE_SIZE);
 +                      last_reloc_offset = reloc_offset;
 +                      if (reloc_page == NULL) {
 +                              drm_gem_object_unreference(target_obj);
 +                              i915_gem_object_unpin(obj);
 +                              return -ENOMEM;
 +                      }
 +              }
 +
 +              reloc_entry = (uint32_t *)((char *)reloc_page +
 +                                         (reloc_offset & (PAGE_SIZE - 1)));
 +              reloc_val = target_obj_priv->gtt_offset + reloc.delta;
 +
 +#if WATCH_BUF
 +              DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
 +                        obj, (unsigned int) reloc.offset,
 +                        readl(reloc_entry), reloc_val);
 +#endif
 +              writel(reloc_val, reloc_entry);
 +
 +              /* Write the updated presumed offset for this entry back out
 +               * to the user.
 +               */
 +              reloc.presumed_offset = target_obj_priv->gtt_offset;
 +              ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
 +              if (ret != 0) {
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
 +                      return ret;
 +              }
 +
 +              drm_gem_object_unreference(target_obj);
 +      }
 +
 +      if (reloc_page != NULL)
 +              iounmap(reloc_page);
 +
 +#if WATCH_BUF
 +      if (0)
 +              i915_gem_dump_object(obj, 128, __func__, ~0);
 +#endif
 +      return 0;
 +}
 +
 +/** Dispatch a batchbuffer to the ring
 + */
 +static int
 +i915_dispatch_gem_execbuffer(struct drm_device *dev,
 +                            struct drm_i915_gem_execbuffer *exec,
 +                            uint64_t exec_offset)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
 +                                           (uintptr_t) exec->cliprects_ptr;
 +      int nbox = exec->num_cliprects;
 +      int i = 0, count;
 +      uint32_t        exec_start, exec_len;
 +      RING_LOCALS;
 +
 +      exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 +      exec_len = (uint32_t) exec->batch_len;
 +
 +      if ((exec_start | exec_len) & 0x7) {
 +              DRM_ERROR("alignment\n");
 +              return -EINVAL;
 +      }
 +
 +      if (!exec_start)
 +              return -EINVAL;
 +
 +      count = nbox ? nbox : 1;
 +
 +      for (i = 0; i < count; i++) {
 +              if (i < nbox) {
 +                      int ret = i915_emit_box(dev, boxes, i,
 +                                              exec->DR1, exec->DR4);
 +                      if (ret)
 +                              return ret;
 +              }
 +
 +              if (IS_I830(dev) || IS_845G(dev)) {
 +                      BEGIN_LP_RING(4);
 +                      OUT_RING(MI_BATCH_BUFFER);
 +                      OUT_RING(exec_start | MI_BATCH_NON_SECURE);
 +                      OUT_RING(exec_start + exec_len - 4);
 +                      OUT_RING(0);
 +                      ADVANCE_LP_RING();
 +              } else {
 +                      BEGIN_LP_RING(2);
 +                      if (IS_I965G(dev)) {
 +                              OUT_RING(MI_BATCH_BUFFER_START |
 +                                       (2 << 6) |
 +                                       MI_BATCH_NON_SECURE_I965);
 +                              OUT_RING(exec_start);
 +                      } else {
 +                              OUT_RING(MI_BATCH_BUFFER_START |
 +                                       (2 << 6));
 +                              OUT_RING(exec_start | MI_BATCH_NON_SECURE);
 +                      }
 +                      ADVANCE_LP_RING();
 +              }
 +      }
 +
 +      /* XXX breadcrumb */
 +      return 0;
 +}
 +
 +/* Throttle our rendering by waiting until the ring has completed our requests
 + * emitted over 20 msec ago.
 + *
 + * This should get us reasonable parallelism between CPU and GPU but also
 + * relatively low latency when blocking on a particular request to finish.
 + */
 +static int
 +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
 +{
 +      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 +      int ret = 0;
 +      uint32_t seqno;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      seqno = i915_file_priv->mm.last_gem_throttle_seqno;
 +      i915_file_priv->mm.last_gem_throttle_seqno =
 +              i915_file_priv->mm.last_gem_seqno;
 +      if (seqno)
 +              ret = i915_wait_request(dev, seqno);
 +      mutex_unlock(&dev->struct_mutex);
 +      return ret;
 +}
 +
 +int
 +i915_gem_execbuffer(struct drm_device *dev, void *data,
 +                  struct drm_file *file_priv)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 +      struct drm_i915_gem_execbuffer *args = data;
 +      struct drm_i915_gem_exec_object *exec_list = NULL;
 +      struct drm_gem_object **object_list = NULL;
 +      struct drm_gem_object *batch_obj;
 +      int ret, i, pinned = 0;
 +      uint64_t exec_offset;
 +      uint32_t seqno, flush_domains;
 +
 +#if WATCH_EXEC
 +      DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
 +                (int) args->buffers_ptr, args->buffer_count, args->batch_len);
 +#endif
 +
 +      /* Copy in the exec list from userland */
 +      exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
 +                             DRM_MEM_DRIVER);
 +      object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
 +                               DRM_MEM_DRIVER);
 +      if (exec_list == NULL || object_list == NULL) {
 +              DRM_ERROR("Failed to allocate exec or object list "
 +                        "for %d buffers\n",
 +                        args->buffer_count);
 +              ret = -ENOMEM;
 +              goto pre_mutex_err;
 +      }
 +      ret = copy_from_user(exec_list,
 +                           (struct drm_i915_relocation_entry __user *)
 +                           (uintptr_t) args->buffers_ptr,
 +                           sizeof(*exec_list) * args->buffer_count);
 +      if (ret != 0) {
 +              DRM_ERROR("copy %d exec entries failed %d\n",
 +                        args->buffer_count, ret);
 +              goto pre_mutex_err;
 +      }
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      if (dev_priv->mm.wedged) {
 +              DRM_ERROR("Execbuf while wedged\n");
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EIO;
 +      }
 +
 +      if (dev_priv->mm.suspended) {
 +              DRM_ERROR("Execbuf while VT-switched.\n");
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EBUSY;
 +      }
 +
 +      /* Zero the gloabl flush/invalidate flags. These
 +       * will be modified as each object is bound to the
 +       * gtt
 +       */
 +      dev->invalidate_domains = 0;
 +      dev->flush_domains = 0;
 +
 +      /* Look up object handles and perform the relocations */
 +      for (i = 0; i < args->buffer_count; i++) {
 +              object_list[i] = drm_gem_object_lookup(dev, file_priv,
 +                                                     exec_list[i].handle);
 +              if (object_list[i] == NULL) {
 +                      DRM_ERROR("Invalid object handle %d at index %d\n",
 +                                 exec_list[i].handle, i);
 +                      ret = -EBADF;
 +                      goto err;
 +              }
 +
 +              object_list[i]->pending_read_domains = 0;
 +              object_list[i]->pending_write_domain = 0;
 +              ret = i915_gem_object_pin_and_relocate(object_list[i],
 +                                                     file_priv,
 +                                                     &exec_list[i]);
 +              if (ret) {
 +                      DRM_ERROR("object bind and relocate failed %d\n", ret);
 +                      goto err;
 +              }
 +              pinned = i + 1;
 +      }
 +
 +      /* Set the pending read domains for the batch buffer to COMMAND */
 +      batch_obj = object_list[args->buffer_count-1];
 +      batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
 +      batch_obj->pending_write_domain = 0;
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      for (i = 0; i < args->buffer_count; i++) {
 +              struct drm_gem_object *obj = object_list[i];
 +              struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +              if (obj_priv->gtt_space == NULL) {
 +                      /* We evicted the buffer in the process of validating
 +                       * our set of buffers in.  We could try to recover by
 +                       * kicking them everything out and trying again from
 +                       * the start.
 +                       */
 +                      ret = -ENOMEM;
 +                      goto err;
 +              }
 +
 +              /* make sure all previous memory operations have passed */
 +              ret = i915_gem_object_set_domain(obj,
 +                                               obj->pending_read_domains,
 +                                               obj->pending_write_domain);
 +              if (ret)
 +                      goto err;
 +      }
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      /* Flush/invalidate caches and chipset buffer */
 +      flush_domains = i915_gem_dev_set_domain(dev);
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +#if WATCH_COHERENCY
 +      for (i = 0; i < args->buffer_count; i++) {
 +              i915_gem_object_check_coherency(object_list[i],
 +                                              exec_list[i].handle);
 +      }
 +#endif
 +
 +      exec_offset = exec_list[args->buffer_count - 1].offset;
 +
 +#if WATCH_EXEC
 +      i915_gem_dump_object(object_list[args->buffer_count - 1],
 +                            args->batch_len,
 +                            __func__,
 +                            ~0);
 +#endif
 +
 +      /* Exec the batchbuffer */
 +      ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
 +      if (ret) {
 +              DRM_ERROR("dispatch failed %d\n", ret);
 +              goto err;
 +      }
 +
 +      /*
 +       * Ensure that the commands in the batch buffer are
 +       * finished before the interrupt fires
 +       */
 +      flush_domains |= i915_retire_commands(dev);
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      /*
 +       * Get a seqno representing the execution of the current buffer,
 +       * which we can wait on.  We would like to mitigate these interrupts,
 +       * likely by only creating seqnos occasionally (so that we have
 +       * *some* interrupts representing completion of buffers that we can
 +       * wait on when trying to clear up gtt space).
 +       */
 +      seqno = i915_add_request(dev, flush_domains);
 +      BUG_ON(seqno == 0);
 +      i915_file_priv->mm.last_gem_seqno = seqno;
 +      for (i = 0; i < args->buffer_count; i++) {
 +              struct drm_gem_object *obj = object_list[i];
 +              struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +              i915_gem_object_move_to_active(obj);
 +              obj_priv->last_rendering_seqno = seqno;
 +#if WATCH_LRU
 +              DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 +#endif
 +      }
 +#if WATCH_LRU
 +      i915_dump_lru(dev, __func__);
 +#endif
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      /* Copy the new buffer offsets back to the user's exec list. */
 +      ret = copy_to_user((struct drm_i915_relocation_entry __user *)
 +                         (uintptr_t) args->buffers_ptr,
 +                         exec_list,
 +                         sizeof(*exec_list) * args->buffer_count);
 +      if (ret)
 +              DRM_ERROR("failed to copy %d exec entries "
 +                        "back to user (%d)\n",
 +                         args->buffer_count, ret);
 +err:
 +      if (object_list != NULL) {
 +              for (i = 0; i < pinned; i++)
 +                      i915_gem_object_unpin(object_list[i]);
 +
 +              for (i = 0; i < args->buffer_count; i++)
 +                      drm_gem_object_unreference(object_list[i]);
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +
 +pre_mutex_err:
 +      drm_free(object_list, sizeof(*object_list) * args->buffer_count,
 +               DRM_MEM_DRIVER);
 +      drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
 +               DRM_MEM_DRIVER);
 +
 +      return ret;
 +}
 +
 +int
 +i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 +{
 +      struct drm_device *dev = obj->dev;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +      int ret;
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      if (obj_priv->gtt_space == NULL) {
 +              ret = i915_gem_object_bind_to_gtt(obj, alignment);
 +              if (ret != 0) {
 +                      DRM_ERROR("Failure to bind: %d", ret);
 +                      return ret;
 +              }
 +      }
 +      obj_priv->pin_count++;
 +
 +      /* If the object is not active and not pending a flush,
 +       * remove it from the inactive list
 +       */
 +      if (obj_priv->pin_count == 1) {
 +              atomic_inc(&dev->pin_count);
 +              atomic_add(obj->size, &dev->pin_memory);
 +              if (!obj_priv->active &&
 +                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
 +                                         I915_GEM_DOMAIN_GTT)) == 0 &&
 +                  !list_empty(&obj_priv->list))
 +                      list_del_init(&obj_priv->list);
 +      }
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +
 +      return 0;
 +}
 +
 +void
 +i915_gem_object_unpin(struct drm_gem_object *obj)
 +{
 +      struct drm_device *dev = obj->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      obj_priv->pin_count--;
 +      BUG_ON(obj_priv->pin_count < 0);
 +      BUG_ON(obj_priv->gtt_space == NULL);
 +
 +      /* If the object is no longer pinned, and is
 +       * neither active nor being flushed, then stick it on
 +       * the inactive list
 +       */
 +      if (obj_priv->pin_count == 0) {
 +              if (!obj_priv->active &&
 +                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
 +                                         I915_GEM_DOMAIN_GTT)) == 0)
 +                      list_move_tail(&obj_priv->list,
 +                                     &dev_priv->mm.inactive_list);
 +              atomic_dec(&dev->pin_count);
 +              atomic_sub(obj->size, &dev->pin_memory);
 +      }
 +      i915_verify_inactive(dev, __FILE__, __LINE__);
 +}
 +
 +int
 +i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 +                 struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_pin *args = data;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL) {
 +              DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
 +                        args->handle);
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EBADF;
 +      }
 +      obj_priv = obj->driver_private;
 +
 +      ret = i915_gem_object_pin(obj, args->alignment);
 +      if (ret != 0) {
 +              drm_gem_object_unreference(obj);
 +              mutex_unlock(&dev->struct_mutex);
 +              return ret;
 +      }
 +
 +      /* XXX - flush the CPU caches for pinned objects
 +       * as the X server doesn't manage domains yet
 +       */
 +      if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
 +              i915_gem_clflush_object(obj);
 +              drm_agp_chipset_flush(dev);
 +              obj->write_domain = 0;
 +      }
 +      args->offset = obj_priv->gtt_offset;
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +int
 +i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 +                   struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_pin *args = data;
 +      struct drm_gem_object *obj;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL) {
 +              DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
 +                        args->handle);
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EBADF;
 +      }
 +
 +      i915_gem_object_unpin(obj);
 +
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +      return 0;
 +}
 +
 +int
 +i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 +                  struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_busy *args = data;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 +      if (obj == NULL) {
 +              DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
 +                        args->handle);
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EBADF;
 +      }
 +
 +      obj_priv = obj->driver_private;
 +      args->busy = obj_priv->active;
 +
 +      drm_gem_object_unreference(obj);
 +      mutex_unlock(&dev->struct_mutex);
 +      return 0;
 +}
 +
 +int
 +i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv)
 +{
 +    return i915_gem_ring_throttle(dev, file_priv);
 +}
 +
 +int i915_gem_init_object(struct drm_gem_object *obj)
 +{
 +      struct drm_i915_gem_object *obj_priv;
 +
 +      obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
 +      if (obj_priv == NULL)
 +              return -ENOMEM;
 +
 +      /*
 +       * We've just allocated pages from the kernel,
 +       * so they've just been written by the CPU with
 +       * zeros. They'll need to be clflushed before we
 +       * use them with the GPU.
 +       */
 +      obj->write_domain = I915_GEM_DOMAIN_CPU;
 +      obj->read_domains = I915_GEM_DOMAIN_CPU;
 +
 +      obj->driver_private = obj_priv;
 +      obj_priv->obj = obj;
 +      INIT_LIST_HEAD(&obj_priv->list);
 +      return 0;
 +}
 +
 +void i915_gem_free_object(struct drm_gem_object *obj)
 +{
 +      struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +
 +      while (obj_priv->pin_count > 0)
 +              i915_gem_object_unpin(obj);
 +
 +      i915_gem_object_unbind(obj);
 +
 +      drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
 +      drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
 +}
 +
 +int
 +i915_gem_set_domain(struct drm_gem_object *obj,
 +                  struct drm_file *file_priv,
 +                  uint32_t read_domains,
 +                  uint32_t write_domain)
 +{
 +      struct drm_device *dev = obj->dev;
 +      int ret;
 +      uint32_t flush_domains;
 +
 +      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 +
 +      ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
 +      if (ret)
 +              return ret;
 +      flush_domains = i915_gem_dev_set_domain(obj->dev);
 +
 +      if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
 +              (void) i915_add_request(dev, flush_domains);
 +
 +      return 0;
 +}
 +
 +/** Unbinds all objects that are on the given buffer list. */
 +static int
 +i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
 +{
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret;
 +
 +      while (!list_empty(head)) {
 +              obj_priv = list_first_entry(head,
 +                                          struct drm_i915_gem_object,
 +                                          list);
 +              obj = obj_priv->obj;
 +
 +              if (obj_priv->pin_count != 0) {
 +                      DRM_ERROR("Pinned object in unbind list\n");
 +                      mutex_unlock(&dev->struct_mutex);
 +                      return -EINVAL;
 +              }
 +
 +              ret = i915_gem_object_unbind(obj);
 +              if (ret != 0) {
 +                      DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
 +                                ret);
 +                      mutex_unlock(&dev->struct_mutex);
 +                      return ret;
 +              }
 +      }
 +
 +
 +      return 0;
 +}
 +
 +static int
 +i915_gem_idle(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      uint32_t seqno, cur_seqno, last_seqno;
 +      int stuck;
 +
 +      if (dev_priv->mm.suspended)
 +              return 0;
 +
 +      /* Hack!  Don't let anybody do execbuf while we don't control the chip.
 +       * We need to replace this with a semaphore, or something.
 +       */
 +      dev_priv->mm.suspended = 1;
 +
 +      i915_kernel_lost_context(dev);
 +
 +      /* Flush the GPU along with all non-CPU write domains
 +       */
 +      i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
 +                     ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
 +      seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
 +                                      I915_GEM_DOMAIN_GTT));
 +
 +      if (seqno == 0) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return -ENOMEM;
 +      }
 +
 +      dev_priv->mm.waiting_gem_seqno = seqno;
 +      last_seqno = 0;
 +      stuck = 0;
 +      for (;;) {
 +              cur_seqno = i915_get_gem_seqno(dev);
 +              if (i915_seqno_passed(cur_seqno, seqno))
 +                      break;
 +              if (last_seqno == cur_seqno) {
 +                      if (stuck++ > 100) {
 +                              DRM_ERROR("hardware wedged\n");
 +                              dev_priv->mm.wedged = 1;
 +                              DRM_WAKEUP(&dev_priv->irq_queue);
 +                              break;
 +                      }
 +              }
 +              msleep(10);
 +              last_seqno = cur_seqno;
 +      }
 +      dev_priv->mm.waiting_gem_seqno = 0;
 +
 +      i915_gem_retire_requests(dev);
 +
 +      /* Active and flushing should now be empty as we've
 +       * waited for a sequence higher than any pending execbuffer
 +       */
 +      BUG_ON(!list_empty(&dev_priv->mm.active_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 +
 +      /* Request should now be empty as we've also waited
 +       * for the last request in the list
 +       */
 +      BUG_ON(!list_empty(&dev_priv->mm.request_list));
 +
 +      /* Move all buffers out of the GTT. */
 +      i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
 +
 +      BUG_ON(!list_empty(&dev_priv->mm.active_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.request_list));
 +      return 0;
 +}
 +
 +static int
 +i915_gem_init_hws(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret;
 +
 +      /* If we need a physical address for the status page, it's already
 +       * initialized at driver load time.
 +       */
 +      if (!I915_NEED_GFX_HWS(dev))
 +              return 0;
 +
 +      obj = drm_gem_object_alloc(dev, 4096);
 +      if (obj == NULL) {
 +              DRM_ERROR("Failed to allocate status page\n");
 +              return -ENOMEM;
 +      }
 +      obj_priv = obj->driver_private;
 +
 +      ret = i915_gem_object_pin(obj, 4096);
 +      if (ret != 0) {
 +              drm_gem_object_unreference(obj);
 +              return ret;
 +      }
 +
 +      dev_priv->status_gfx_addr = obj_priv->gtt_offset;
 +      dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
 +      dev_priv->hws_map.size = 4096;
 +      dev_priv->hws_map.type = 0;
 +      dev_priv->hws_map.flags = 0;
 +      dev_priv->hws_map.mtrr = 0;
 +
 +      drm_core_ioremap(&dev_priv->hws_map, dev);
 +      if (dev_priv->hws_map.handle == NULL) {
 +              DRM_ERROR("Failed to map status page.\n");
 +              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 +              drm_gem_object_unreference(obj);
 +              return -EINVAL;
 +      }
 +      dev_priv->hws_obj = obj;
 +      dev_priv->hw_status_page = dev_priv->hws_map.handle;
 +      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 +      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 +      DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 +
 +      return 0;
 +}
 +
 +static int
 +i915_gem_init_ringbuffer(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_gem_object *obj;
 +      struct drm_i915_gem_object *obj_priv;
 +      int ret;
 +
 +      ret = i915_gem_init_hws(dev);
 +      if (ret != 0)
 +              return ret;
 +
 +      obj = drm_gem_object_alloc(dev, 128 * 1024);
 +      if (obj == NULL) {
 +              DRM_ERROR("Failed to allocate ringbuffer\n");
 +              return -ENOMEM;
 +      }
 +      obj_priv = obj->driver_private;
 +
 +      ret = i915_gem_object_pin(obj, 4096);
 +      if (ret != 0) {
 +              drm_gem_object_unreference(obj);
 +              return ret;
 +      }
 +
 +      /* Set up the kernel mapping for the ring. */
 +      dev_priv->ring.Size = obj->size;
 +      dev_priv->ring.tail_mask = obj->size - 1;
 +
 +      dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
 +      dev_priv->ring.map.size = obj->size;
 +      dev_priv->ring.map.type = 0;
 +      dev_priv->ring.map.flags = 0;
 +      dev_priv->ring.map.mtrr = 0;
 +
 +      drm_core_ioremap(&dev_priv->ring.map, dev);
 +      if (dev_priv->ring.map.handle == NULL) {
 +              DRM_ERROR("Failed to map ringbuffer.\n");
 +              memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
 +              drm_gem_object_unreference(obj);
 +              return -EINVAL;
 +      }
 +      dev_priv->ring.ring_obj = obj;
 +      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
 +
 +      /* Stop the ring if it's running. */
-       I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
-       I915_WRITE(LP_RING + RING_LEN,
++      I915_WRITE(PRB0_CTL, 0);
++      I915_WRITE(PRB0_HEAD, 0);
++      I915_WRITE(PRB0_TAIL, 0);
++      I915_WRITE(PRB0_START, 0);
 +
 +      /* Initialize the ring. */
++      I915_WRITE(PRB0_START, obj_priv->gtt_offset);
++      I915_WRITE(PRB0_CTL,
 +                 ((obj->size - 4096) & RING_NR_PAGES) |
 +                 RING_NO_REPORT |
 +                 RING_VALID);
 +
 +      /* Update our cache of the ring state */
 +      i915_kernel_lost_context(dev);
 +
 +      return 0;
 +}
 +
 +static void
 +i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      if (dev_priv->ring.ring_obj == NULL)
 +              return;
 +
 +      drm_core_ioremapfree(&dev_priv->ring.map, dev);
 +
 +      i915_gem_object_unpin(dev_priv->ring.ring_obj);
 +      drm_gem_object_unreference(dev_priv->ring.ring_obj);
 +      dev_priv->ring.ring_obj = NULL;
 +      memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
 +
 +      if (dev_priv->hws_obj != NULL) {
 +              i915_gem_object_unpin(dev_priv->hws_obj);
 +              drm_gem_object_unreference(dev_priv->hws_obj);
 +              dev_priv->hws_obj = NULL;
 +              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 +
 +              /* Write high address into HWS_PGA when disabling. */
 +              I915_WRITE(HWS_PGA, 0x1ffff000);
 +      }
 +}
 +
 +int
 +i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 +                     struct drm_file *file_priv)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      int ret;
 +
 +      if (dev_priv->mm.wedged) {
 +              DRM_ERROR("Reenabling wedged hardware, good luck\n");
 +              dev_priv->mm.wedged = 0;
 +      }
 +
 +      ret = i915_gem_init_ringbuffer(dev);
 +      if (ret != 0)
 +              return ret;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      BUG_ON(!list_empty(&dev_priv->mm.active_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 +      BUG_ON(!list_empty(&dev_priv->mm.request_list));
 +      dev_priv->mm.suspended = 0;
 +      mutex_unlock(&dev->struct_mutex);
 +      return 0;
 +}
 +
 +int
 +i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 +                     struct drm_file *file_priv)
 +{
 +      int ret;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      ret = i915_gem_idle(dev);
 +      if (ret == 0)
 +              i915_gem_cleanup_ringbuffer(dev);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +void
 +i915_gem_lastclose(struct drm_device *dev)
 +{
 +      int ret;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      if (dev_priv->ring.ring_obj != NULL) {
 +              ret = i915_gem_idle(dev);
 +              if (ret)
 +                      DRM_ERROR("failed to idle hardware: %d\n", ret);
 +
 +              i915_gem_cleanup_ringbuffer(dev);
 +      }
 +
 +      mutex_unlock(&dev->struct_mutex);
 +}
 +
 +void i915_gem_load(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      INIT_LIST_HEAD(&dev_priv->mm.active_list);
 +      INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 +      INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 +      INIT_LIST_HEAD(&dev_priv->mm.request_list);
 +      INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 +                        i915_gem_retire_work_handler);
 +      dev_priv->mm.next_gem_seqno = 1;
 +
 +      i915_gem_detect_bit_6_swizzle(dev);
 +}
index 90351ac,0000000..132eb3d
mode 100644,000000..100644
--- /dev/null
@@@ -1,293 -1,0 +1,293 @@@
-                      I915_READ(I915REG_INT_ENABLE_R));
 +/*
 + * Copyright © 2008 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 + * IN THE SOFTWARE.
 + *
 + * Authors:
 + *    Eric Anholt <eric@anholt.net>
 + *    Keith Packard <keithp@keithp.com>
 + *
 + */
 +
 +#include "drmP.h"
 +#include "drm.h"
 +#include "drm_compat.h"
 +#include "i915_drm.h"
 +#include "i915_drv.h"
 +
 +static int i915_gem_active_info(char *buf, char **start, off_t offset,
 +                              int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Active:\n");
 +      list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
 +                          list)
 +      {
 +              struct drm_gem_object *obj = obj_priv->obj;
 +              if (obj->name) {
 +                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
 +                                     obj, obj->name,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              } else {
 +                      DRM_PROC_PRINT("       %p: %08x %08x %d\n",
 +                                     obj,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              }
 +      }
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
 +                                int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Flushing:\n");
 +      list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
 +                          list)
 +      {
 +              struct drm_gem_object *obj = obj_priv->obj;
 +              if (obj->name) {
 +                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
 +                                     obj, obj->name,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              } else {
 +                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              }
 +      }
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
 +                                int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Inactive:\n");
 +      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
 +                          list)
 +      {
 +              struct drm_gem_object *obj = obj_priv->obj;
 +              if (obj->name) {
 +                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
 +                                     obj, obj->name,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              } else {
 +                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
 +                                     obj->read_domains, obj->write_domain,
 +                                     obj_priv->last_rendering_seqno);
 +              }
 +      }
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +static int i915_gem_request_info(char *buf, char **start, off_t offset,
 +                               int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_request *gem_request;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Request:\n");
 +      list_for_each_entry(gem_request, &dev_priv->mm.request_list,
 +                          list)
 +      {
 +              DRM_PROC_PRINT("    %d @ %d %08x\n",
 +                             gem_request->seqno,
 +                             (int) (jiffies - gem_request->emitted_jiffies),
 +                             gem_request->flush_domains);
 +      }
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
 +                             int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
 +      DRM_PROC_PRINT("Waiter sequence:  %d\n",
 +                     dev_priv->mm.waiting_gem_seqno);
 +      DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +
 +static int i915_interrupt_info(char *buf, char **start, off_t offset,
 +                             int request, int *eof, void *data)
 +{
 +      struct drm_minor *minor = (struct drm_minor *) data;
 +      struct drm_device *dev = minor->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      int len = 0;
 +
 +      if (offset > DRM_PROC_LIMIT) {
 +              *eof = 1;
 +              return 0;
 +      }
 +
 +      *start = &buf[offset];
 +      *eof = 0;
 +      DRM_PROC_PRINT("Interrupt enable:    %08x\n",
-                      I915_READ(I915REG_INT_IDENTITY_R));
++                     I915_READ(IER));
 +      DRM_PROC_PRINT("Interrupt identity:  %08x\n",
-                      I915_READ(I915REG_INT_MASK_R));
++                     I915_READ(IIR));
 +      DRM_PROC_PRINT("Interrupt mask:      %08x\n",
-                      I915_READ(I915REG_PIPEASTAT));
++                     I915_READ(IMR));
 +      DRM_PROC_PRINT("Pipe A stat:         %08x\n",
-                      I915_READ(I915REG_PIPEBSTAT));
++                     I915_READ(PIPEASTAT));
 +      DRM_PROC_PRINT("Pipe B stat:         %08x\n",
++                     I915_READ(PIPEBSTAT));
 +      DRM_PROC_PRINT("Interrupts received: %d\n",
 +                     atomic_read(&dev_priv->irq_received));
 +      DRM_PROC_PRINT("Current sequence:    %d\n",
 +                     i915_get_gem_seqno(dev));
 +      DRM_PROC_PRINT("Waiter sequence:     %d\n",
 +                     dev_priv->mm.waiting_gem_seqno);
 +      DRM_PROC_PRINT("IRQ sequence:        %d\n",
 +                     dev_priv->mm.irq_gem_seqno);
 +      if (len > request + offset)
 +              return request;
 +      *eof = 1;
 +      return len - offset;
 +}
 +
 +static struct drm_proc_list {
 +      /** file name */
 +      const char *name;
 +      /** proc callback*/
 +      int (*f) (char *, char **, off_t, int, int *, void *);
 +} i915_gem_proc_list[] = {
 +      {"i915_gem_active", i915_gem_active_info},
 +      {"i915_gem_flushing", i915_gem_flushing_info},
 +      {"i915_gem_inactive", i915_gem_inactive_info},
 +      {"i915_gem_request", i915_gem_request_info},
 +      {"i915_gem_seqno", i915_gem_seqno_info},
 +      {"i915_gem_interrupt", i915_interrupt_info},
 +};
 +
 +#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
 +
 +int i915_gem_proc_init(struct drm_minor *minor)
 +{
 +      struct proc_dir_entry *ent;
 +      int i, j;
 +
 +      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
 +              ent = create_proc_entry(i915_gem_proc_list[i].name,
 +                                      S_IFREG | S_IRUGO, minor->dev_root);
 +              if (!ent) {
 +                      DRM_ERROR("Cannot create /proc/dri/.../%s\n",
 +                                i915_gem_proc_list[i].name);
 +                      for (j = 0; j < i; j++)
 +                              remove_proc_entry(i915_gem_proc_list[i].name,
 +                                                minor->dev_root);
 +                      return -1;
 +              }
 +              ent->read_proc = i915_gem_proc_list[i].f;
 +              ent->data = minor;
 +      }
 +      return 0;
 +}
 +
 +void i915_gem_proc_cleanup(struct drm_minor *minor)
 +{
 +      int i;
 +
 +      if (!minor->dev_root)
 +              return;
 +
 +      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
 +              remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
 +}
index 0000000,e691571..015376f
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,386 +1,387 @@@
 -              u32 pipeb_stats = I915_READ(PIPEBSTAT);
+ /*
+  *
+  * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+  * Copyright 2008 Red Hat <mjg@redhat.com>
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+  * "Software"), to deal in the Software without restriction, including
+  * without limitation the rights to use, copy, modify, merge, publish,
+  * distribute, sub license, and/or sell copies of the Software, and to
+  * permit persons to whom the Software is furnished to do so, subject to
+  * the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the
+  * next paragraph) shall be included in all copies or substantial portions
+  * of the Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
+  *
+  */
+ #include <linux/acpi.h>
+ #include "drmP.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ #define PCI_ASLE 0xe4
+ #define PCI_ASLS 0xfc
+ #define OPREGION_SZ            (8*1024)
+ #define OPREGION_HEADER_OFFSET 0
+ #define OPREGION_ACPI_OFFSET   0x100
+ #define OPREGION_SWSCI_OFFSET  0x200
+ #define OPREGION_ASLE_OFFSET   0x300
+ #define OPREGION_VBT_OFFSET    0x1000
+ #define OPREGION_SIGNATURE "IntelGraphicsMem"
+ #define MBOX_ACPI      (1<<0)
+ #define MBOX_SWSCI     (1<<1)
+ #define MBOX_ASLE      (1<<2)
+ /* _DOD id definitions */
+ #define OUTPUT_CONNECTOR_MSK   0xf000
+ #define OUTPUT_CONNECTOR_OFFSET        12
+ #define OUTPUT_PORT_MSK                0x00f0
+ #define OUTPUT_PORT_OFFSET     4
+   #define OUTPUT_PORT_ANALOG   0
+   #define OUTPUT_PORT_LVDS     1
+   #define OUTPUT_PORT_SDVOB    2
+   #define OUTPUT_PORT_SDVOC    3
+   #define OUTPUT_PORT_TV       4
+ #define OUTPUT_DISPLAY_MSK     0x0f00
+ #define OUTPUT_DISPLAY_OFFSET  8
+   #define OUTPUT_DISPLAY_OTHER         0
+   #define OUTPUT_DISPLAY_VGA           1
+   #define OUTPUT_DISPLAY_TV            2
+   #define OUTPUT_DISPLAY_DIGI          3
+   #define OUTPUT_DISPLAY_FLAT_PANEL    4
+ /* predefined id for integrated LVDS and VGA connector */
+ #define OUTPUT_INT_LVDS        0x00000110
+ #define OUTPUT_INT_VGA 0x80000100
+ struct opregion_header {
+        u8 signature[16];
+        u32 size;
+        u32 opregion_ver;
+        u8 bios_ver[32];
+        u8 vbios_ver[16];
+        u8 driver_ver[16];
+        u32 mboxes;
+        u8 reserved[164];
+ } __attribute__((packed));
+ /* OpRegion mailbox #1: public ACPI methods */
+ struct opregion_acpi {
+        u32 drdy;       /* driver readiness */
+        u32 csts;       /* notification status */
+        u32 cevt;       /* current event */
+        u8 rsvd1[20];
+        u32 didl[8];    /* supported display devices ID list */
+        u32 cpdl[8];    /* currently presented display list */
+        u32 cadl[8];    /* currently active display list */
+        u32 nadl[8];    /* next active devices list */
+        u32 aslp;       /* ASL sleep time-out */
+        u32 tidx;       /* toggle table index */
+        u32 chpd;       /* current hotplug enable indicator */
+        u32 clid;       /* current lid state*/
+        u32 cdck;       /* current docking state */
+        u32 sxsw;       /* Sx state resume */
+        u32 evts;       /* ASL supported events */
+        u32 cnot;       /* current OS notification */
+        u32 nrdy;       /* driver status */
+        u8 rsvd2[60];
+ } __attribute__((packed));
+ /* OpRegion mailbox #2: SWSCI */
+ struct opregion_swsci {
+        u32 scic;       /* SWSCI command|status|data */
+        u32 parm;       /* command parameters */
+        u32 dslp;       /* driver sleep time-out */
+        u8 rsvd[244];
+ } __attribute__((packed));
+ /* OpRegion mailbox #3: ASLE */
+ struct opregion_asle {
+        u32 ardy;       /* driver readiness */
+        u32 aslc;       /* ASLE interrupt command */
+        u32 tche;       /* technology enabled indicator */
+        u32 alsi;       /* current ALS illuminance reading */
+        u32 bclp;       /* backlight brightness to set */
+        u32 pfit;       /* panel fitting state */
+        u32 cblv;       /* current brightness level */
+        u16 bclm[20];   /* backlight level duty cycle mapping table */
+        u32 cpfm;       /* current panel fitting mode */
+        u32 epfm;       /* enabled panel fitting modes */
+        u8 plut[74];    /* panel LUT and identifier */
+        u32 pfmb;       /* PWM freq and min brightness */
+        u8 rsvd[102];
+ } __attribute__((packed));
+ /* ASLE irq request bits */
+ #define ASLE_SET_ALS_ILLUM     (1 << 0)
+ #define ASLE_SET_BACKLIGHT     (1 << 1)
+ #define ASLE_SET_PFIT          (1 << 2)
+ #define ASLE_SET_PWM_FREQ      (1 << 3)
+ #define ASLE_REQ_MSK           0xf
+ /* response bits of ASLE irq request */
+ #define ASLE_ALS_ILLUM_FAIL    (2<<10)
+ #define ASLE_BACKLIGHT_FAIL    (2<<12)
+ #define ASLE_PFIT_FAIL         (2<<14)
+ #define ASLE_PWM_FREQ_FAIL     (2<<16)
+ /* ASLE backlight brightness to set */
+ #define ASLE_BCLP_VALID                (1<<31)
+ #define ASLE_BCLP_MSK          (~(1<<31))
+ /* ASLE panel fitting request */
+ #define ASLE_PFIT_VALID         (1<<31)
+ #define ASLE_PFIT_CENTER (1<<0)
+ #define ASLE_PFIT_STRETCH_TEXT (1<<1)
+ #define ASLE_PFIT_STRETCH_GFX (1<<2)
+ /* PWM frequency and minimum brightness */
+ #define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+ #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+ #define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+ #define ASLE_PFMB_PWM_VALID (1<<31)
+ #define ASLE_CBLV_VALID         (1<<31)
+ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 blc_pwm_ctl;
+       
+       if (!(bclp & ASLE_BCLP_VALID))
+               return ASLE_BACKLIGHT_FAIL;
+       
+       bclp &= ASLE_BCLP_MSK;
+       if (bclp < 0 || bclp > 255)
+               return ASLE_BACKLIGHT_FAIL;
+       
+       blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+       blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
+       asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+       
+       return 0;
+ }
+ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+ {
+       return 0;
+ }
+ static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (pfmb & ASLE_PFMB_PWM_VALID) {
+               u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+               u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+               blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+               pwm = pwm >> 9;
+               // FIXME - what do we do with the PWM?
+       }
+       return 0;
+ }
+ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+ {
+       if (!(pfit & ASLE_PFIT_VALID))
+               return ASLE_PFIT_FAIL;
+       return 0;
+ }
+ void opregion_asle_intr(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 asle_stat = 0;
+       u32 asle_req;
+       if (!asle)
+               return;
+       asle_req = asle->aslc & ASLE_REQ_MSK;
+       
+       if (!asle_req) {
+               DRM_DEBUG("non asle set request??\n");
+               return;
+       }
+       if (asle_req & ASLE_SET_ALS_ILLUM)
+               asle_stat |= asle_set_als_illum(dev, asle->alsi);
+       
+       if (asle_req & ASLE_SET_BACKLIGHT)
+               asle_stat |= asle_set_backlight(dev, asle->bclp);
+       
+       if (asle_req & ASLE_SET_PFIT)
+               asle_stat |= asle_set_pfit(dev, asle->pfit);
+       
+       if (asle_req & ASLE_SET_PWM_FREQ)
+               asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+       
+       asle->aslc = asle_stat;
+ }
+ #define ASLE_ALS_EN    (1<<0)
+ #define ASLE_BLC_EN    (1<<1)
+ #define ASLE_PFIT_EN   (1<<2)
+ #define ASLE_PFMB_EN   (1<<3)
+ void opregion_enable_asle(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       if (asle) {
 -                      I915_WRITE(PIPEBSTAT, pipeb_stats |= 
 -                                 I915_LEGACY_BLC_EVENT_ENABLE);
 -                      dev_priv->irq_enable_reg |= 
 -                              (I915_ASLE_INTERRUPT 
 -                               | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
 -              } else
 -                      dev_priv->irq_enable_reg |= I915_ASLE_INTERRUPT;
 -              
+               if (IS_MOBILE(dev)) {
++                      u32 pipeb_stats = I915_READ(PIPEBSTAT);
+                       /* Some hardware uses the legacy backlight controller
+                          to signal interrupts, so we need to set up pipe B
+                          to generate an IRQ on writes */
++                      pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
++                      I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++                      dev_priv->irq_mask_reg &=
++                              ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              }
++
++              dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
++
+               asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 
+                       ASLE_PFMB_EN;
+               asle->ardy = 1;
+       }
+ }
+ #define ACPI_EV_DISPLAY_SWITCH (1<<0)
+ #define ACPI_EV_LID            (1<<1)
+ #define ACPI_EV_DOCK           (1<<2)
+ static struct intel_opregion *system_opregion;
+ int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
+                                void *data)
+ {
+       /* The only video events relevant to opregion are 0x80. These indicate
+          either a docking event, lid switch or display switch request. In
+          Linux, these are handled by the dock, button and video drivers.
+          We might want to fix the video driver to be opregion-aware in
+          future, but right now we just indicate to the firmware that the
+          request has been handled */
+       
+       struct opregion_acpi *acpi;
+       if (!system_opregion)
+               return NOTIFY_DONE;
+       
+       acpi = system_opregion->acpi;
+       acpi->csts = 0;
+       return NOTIFY_OK;
+ }
+ static struct notifier_block intel_opregion_notifier = {
+       .notifier_call = intel_opregion_video_event,
+ };
+ int intel_opregion_init(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       void *base;
+       u32 asls, mboxes;
+       int err = 0;
+       
+       pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+       DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+       if (asls == 0) {
+               DRM_DEBUG("ACPI OpRegion not supported!\n");
+               return -ENOTSUPP;
+       }
+       
+       base = ioremap(asls, OPREGION_SZ);
+       if (!base)
+               return -ENOMEM;
+       
+       opregion->header = base;
+       if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+               DRM_DEBUG("opregion signature mismatch\n");
+               err = -EINVAL;
+               goto err_out;
+       }
+       
+       mboxes = opregion->header->mboxes;
+       if (mboxes & MBOX_ACPI) {
+               DRM_DEBUG("Public ACPI methods supported\n");
+               opregion->acpi = base + OPREGION_ACPI_OFFSET;
+       } else {
+               DRM_DEBUG("Public ACPI methods not supported\n");
+               err = -ENOTSUPP;
+               goto err_out;
+       }
+       opregion->enabled = 1;
+       
+       if (mboxes & MBOX_SWSCI) {
+               DRM_DEBUG("SWSCI supported\n");
+               opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+       }
+       if (mboxes & MBOX_ASLE) {
+               DRM_DEBUG("ASLE supported\n");
+               opregion->asle = base + OPREGION_ASLE_OFFSET;
+       }
+       
+       /* Notify BIOS we are ready to handle ACPI video ext notifs.
+        * Right now, all the events are handled by the ACPI video module.
+        * We don't actually need to do anything with them. */
+       opregion->acpi->csts = 0;
+       opregion->acpi->drdy = 1;
+       system_opregion = opregion;
+       register_acpi_notifier(&intel_opregion_notifier);
+       
+       return 0;
+       
+ err_out:
+       iounmap(opregion->header);
+       opregion->header = NULL;
+       return err;
+ }
+ void intel_opregion_free(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       
+       if (!opregion->enabled)
+               return;
+       
+       opregion->acpi->drdy = 0;
+       
+       system_opregion = NULL;
+       unregister_acpi_notifier(&intel_opregion_notifier);
+       
+       /* just clear all opregion memory pointers now */
+       iounmap(opregion->header);
+       opregion->header = NULL;
+       opregion->acpi = NULL;
+       opregion->swsci = NULL;
+       opregion->asle = NULL;
+       
+       opregion->enabled = 0;
+ }
Simple merge
@@@ -40,15 -40,11 +40,15 @@@ int i915_wait_ring(struct drm_device * 
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-       u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-       u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+       u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++      u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
 +      u32 last_acthd = I915_READ(acthd_reg);
 +      u32 acthd;
        int i;
  
 -      for (i = 0; i < 10000; i++) {
 +      for (i = 0; i < 100000; i++) {
-               ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 +              acthd = I915_READ(acthd_reg);
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
                        ring->space += ring->Size;
@@@ -542,8 -516,8 +542,8 @@@ void i915_emit_breadcrumb(struct drm_de
                dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
  
        BEGIN_LP_RING(4);
-       OUT_RING(CMD_STORE_DWORD_IDX);
-       OUT_RING(5 << STORE_DWORD_INDEX_SHIFT);
+       OUT_RING(MI_STORE_DWORD_INDEX);
 -      OUT_RING(20);
++      OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
        OUT_RING(dev_priv->counter);
        OUT_RING(0);
        ADVANCE_LP_RING();
@@@ -1074,27 -1038,68 +1074,76 @@@ int i915_driver_load(struct drm_device 
  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
        intel_init_chipset_flush_compat(dev);
  #endif
+       intel_opregion_init(dev);
  #endif
  
-       /* Init HWS
-        */
-       if (!I915_NEED_GFX_HWS(dev)) {  
++      /* Init HWS */
++      if (!I915_NEED_GFX_HWS(dev)) {
 +              ret = i915_init_hardware_status(dev);
 +              if(ret)
 +                      return ret;
 +      }
 +
+       I915_WRITE16(HWSTAM, 0xeffe);
+       I915_WRITE16(IMR, 0x0);
+       I915_WRITE16(IER, 0x0);
+       DRM_SPININIT(&dev_priv->swaps_lock, "swap");
+       INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+       dev_priv->swaps_pending = 0;
+       DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
+       dev_priv->user_irq_refcount = 0;
 -      dev_priv->irq_enable_reg = 0;
++      dev_priv->irq_mask_reg = ~0;
+       ret = drm_vblank_init(dev, num_pipes);
+       if (ret)
+               return ret;
+       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       i915_enable_interrupt(dev);
+       DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+       /*
+        * Initialize the hardware status page IRQ location.
+        */
+       I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
        return ret;
  }
  
  int i915_driver_unload(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 temp;
+       if (dev_priv) {
+               dev_priv->vblank_pipe = 0;
+               dev_priv->irq_enabled = 0;
+               I915_WRITE(HWSTAM, 0xffffffff);
+               I915_WRITE(IMR, 0xffffffff);
+               I915_WRITE(IER, 0x0);
+               temp = I915_READ(PIPEASTAT);
+               I915_WRITE(PIPEASTAT, temp);
+               temp = I915_READ(PIPEBSTAT);
+               I915_WRITE(PIPEBSTAT, temp);
+               temp = I915_READ(IIR);
+               I915_WRITE(IIR, temp);
+       }
  
 -      if (dev_priv->mmio_map)
 -              drm_rmmap(dev, dev_priv->mmio_map);
 +      i915_free_hardware_status(dev);
 +
 +      drm_rmmap(dev, dev_priv->mmio_map);
  
+ #ifdef __linux__
+       intel_opregion_free(dev);
+ #endif
        drm_free(dev->dev_private, sizeof(drm_i915_private_t),
                 DRM_MEM_DRIVER);
  #ifdef __linux__
@@@ -107,9 -105,23 +112,25 @@@ typedef struct _drm_i915_vbl_swap 
        int flip;
  } drm_i915_vbl_swap_t;
  
+ #ifdef __linux__
+ struct opregion_header;
+ struct opregion_acpi;
+ struct opregion_swsci;
+ struct opregion_asle;
+ struct intel_opregion {
+       struct opregion_header *header;
+       struct opregion_acpi *acpi;
+       struct opregion_swsci *swsci;
+       struct opregion_asle *asle;
+       int enabled;
+ };
+ #endif
  typedef struct drm_i915_private {
 +      struct drm_device *dev;
 +
        drm_local_map_t *sarea;
        drm_local_map_t *mmio_map;
  
@@@ -511,70 -361,18 +543,78 @@@ void i915_flush_ttm(struct drm_ttm *ttm
  /* i915_execbuf.c */
  int i915_execbuffer(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv);
 +/* i915_gem.c */
 +int i915_gem_init_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int i915_gem_create_ioctl(struct drm_device *dev, void *data,
 +                        struct drm_file *file_priv);
 +int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 +                       struct drm_file *file_priv);
 +int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 +                        struct drm_file *file_priv);
 +int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 +                            struct drm_file *file_priv);
 +int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 +                           struct drm_file *file_priv);
 +int i915_gem_execbuffer(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 +                     struct drm_file *file_priv);
 +int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 +                       struct drm_file *file_priv);
 +int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 +                          struct drm_file *file_priv);
 +int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 +                         struct drm_file *file_priv);
 +int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 +                         struct drm_file *file_priv);
 +int i915_gem_set_tiling(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +int i915_gem_get_tiling(struct drm_device *dev, void *data,
 +                      struct drm_file *file_priv);
 +void i915_gem_load(struct drm_device *dev);
 +int i915_gem_proc_init(struct drm_minor *minor);
 +void i915_gem_proc_cleanup(struct drm_minor *minor);
 +int i915_gem_init_object(struct drm_gem_object *obj);
 +void i915_gem_free_object(struct drm_gem_object *obj);
 +int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
 +void i915_gem_object_unpin(struct drm_gem_object *obj);
 +void i915_gem_lastclose(struct drm_device *dev);
 +uint32_t i915_get_gem_seqno(struct drm_device *dev);
 +void i915_gem_retire_requests(struct drm_device *dev);
 +void i915_gem_retire_work_handler(struct work_struct *work);
 +void i915_gem_clflush_object(struct drm_gem_object *obj);
 +#endif
  
 +/* i915_gem_tiling.c */
 +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 +
 +/* i915_gem_debug.c */
 +void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 +                        const char *where, uint32_t mark);
 +#if WATCH_INACTIVE
 +void i915_verify_inactive(struct drm_device *dev, char *file, int line);
 +#else
 +#define i915_verify_inactive(dev,file,line)
  #endif
 +void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
 +void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 +                        const char *where, uint32_t mark);
 +void i915_dump_lru(struct drm_device *dev, const char *where);
  
  #ifdef __linux__
+ /* i915_opregion.c */
+ extern int intel_opregion_init(struct drm_device *dev);
+ extern void intel_opregion_free(struct drm_device *dev);
+ extern void opregion_asle_intr(struct drm_device *dev);
+ extern void opregion_enable_asle(struct drm_device *dev);
+ #endif
+ #ifdef __linux__
  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
  extern void intel_init_chipset_flush_compat(struct drm_device *dev);
  extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
  #define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
  #define I915_READ16(reg)      DRM_READ16(dev_priv->mmio_map, (reg))
  #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+ #define I915_READ8(reg)               DRM_READ8(dev_priv->mmio_map, (reg))
+ #define I915_WRITE8(reg,val)  DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
+ #if defined(__FreeBSD__)
+ typedef boolean_t bool;
+ #endif
  
  #define I915_VERBOSE 0
 +#define I915_RING_VALIDATE 0
  
+ #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
  #define RING_LOCALS   unsigned int outring, ringmask, outcount; \
                        volatile char *virt;
  
@@@ -622,29 -419,48 +670,82 @@@ void i915_ring_validate(struct drm_devi
  
  #define ADVANCE_LP_RING() do {                                                \
        if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
 +      I915_RING_DO_VALIDATE(dev);                                     \
        dev_priv->ring.tail = outring;                                  \
        dev_priv->ring.space -= outcount * 4;                           \
-       I915_WRITE(LP_RING + RING_TAIL, outring);                       \
+       I915_WRITE(PRB0_TAIL, outring);                 \
  } while(0)
  
  extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
  
- /* Extended config space */
- #define LBB 0xf4
++#define BREADCRUMB_BITS 31
++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
++
++#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
++/**
++ * Reads a dword out of the status page, which is written to from the command
++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
++ * MI_STORE_DATA_IMM.
++ *
++ * The following dwords have a reserved meaning:
++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
++ * 4: ring 0 head pointer
++ * 5: ring 1 head pointer (915-class)
++ * 6: ring 2 head pointer (915-class)
++ *
++ * The area from dword 0x10 to 0x3ff is available for driver usage.
++ */
++#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
++#define I915_GEM_HWS_INDEX            0x10
++
 +/* MCH MMIO space */
 +/** 915-945 and GM965 MCH register controlling DRAM channel access */
 +#define DCC           0x200
 +#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL            (0 << 0)
 +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC   (1 << 0)
 +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED  (2 << 0)
 +#define DCC_ADDRESSING_MODE_MASK                      (3 << 0)
 +#define DCC_CHANNEL_XOR_DISABLE                               (1 << 10)
 +
 +/** 965 MCH register controlling DRAM channel configuration */
 +#define CHDECMISC             0x111
 +#define CHDECMISC_FLEXMEMORY          (1 << 1)
 +
+ /*
+  * The Bridge device's PCI config space has information about the
+  * fb aperture size and the amount of pre-reserved memory.
+  */
+ #define INTEL_GMCH_CTRL               0x52
+ #define INTEL_GMCH_ENABLED    0x4
+ #define INTEL_GMCH_MEM_MASK   0x1
+ #define INTEL_GMCH_MEM_64M    0x1
+ #define INTEL_GMCH_MEM_128M   0
+ #define INTEL_855_GMCH_GMS_MASK               (0x7 << 4)
+ #define INTEL_855_GMCH_GMS_DISABLED   (0x0 << 4)
+ #define INTEL_855_GMCH_GMS_STOLEN_1M  (0x1 << 4)
+ #define INTEL_855_GMCH_GMS_STOLEN_4M  (0x2 << 4)
+ #define INTEL_855_GMCH_GMS_STOLEN_8M  (0x3 << 4)
+ #define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+ #define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+ #define INTEL_915G_GMCH_GMS_STOLEN_48M        (0x6 << 4)
+ #define INTEL_915G_GMCH_GMS_STOLEN_64M        (0x7 << 4)
+ /* PCI config space */
+ #define HPLLCC        0xc0 /* 855 only */
+ #define   GC_CLOCK_CONTROL_MASK               (3 << 0)
+ #define   GC_CLOCK_133_200            (0 << 0)
+ #define   GC_CLOCK_100_200            (1 << 0)
+ #define   GC_CLOCK_100_133            (2 << 0)
+ #define   GC_CLOCK_166_250            (3 << 0)
+ #define GCFGC 0xf0 /* 915+ only */
+ #define   GC_LOW_FREQUENCY_ENABLE     (1 << 7)
+ #define   GC_DISPLAY_CLOCK_190_200_MHZ        (0 << 4)
+ #define   GC_DISPLAY_CLOCK_333_MHZ    (4 << 4)
+ #define   GC_DISPLAY_CLOCK_MASK               (7 << 4)
+ #define LBB   0xf4
  
  /* VGA stuff */
  
  #define VGA_CR_INDEX_CGA 0x3d4
  #define VGA_CR_DATA_CGA 0x3d5
  
- #define GFX_OP_USER_INTERRUPT         ((0<<29)|(2<<23))
- #define GFX_OP_BREAKPOINT_INTERRUPT   ((0<<29)|(1<<23))
- #define CMD_REPORT_HEAD                       (7<<23)
- #define CMD_STORE_DWORD_IMM             ((0x20<<23) | (0x1 << 22) | 0x1)
- /**
-  * Stores a 32-bit integer to the status page at the dword index given.
+ /*
+  * Memory interface instructions used by the kernel
   */
- #define CMD_STORE_DWORD_IDX           ((0x21<<23) | 0x1)
- # define STORE_DWORD_INDEX_SHIFT              2
+ #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+ #define MI_NOOP                       MI_INSTR(0, 0)
+ #define MI_USER_INTERRUPT     MI_INSTR(0x02, 0)
+ #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+ #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+ #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+ #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+ #define MI_FLUSH              MI_INSTR(0x04, 0)
+ #define   MI_READ_FLUSH               (1 << 0)
+ #define   MI_EXE_FLUSH                (1 << 1)
+ #define   MI_NO_WRITE_FLUSH   (1 << 2)
+ #define   MI_SCENE_COUNT      (1 << 3) /* just increment scene count */
+ #define   MI_END_SCENE                (1 << 4) /* flush binner and incr scene count */
+ #define MI_BATCH_BUFFER_END   MI_INSTR(0x0a, 0)
+ #define MI_REPORT_HEAD                MI_INSTR(0x07, 0)
+ #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+ #define MI_STORE_DWORD_IMM    MI_INSTR(0x20, 1)
+ #define   MI_MEM_VIRTUAL      (1 << 22) /* 965+ only */
+ #define MI_STORE_DWORD_INDEX  MI_INSTR(0x21, 1)
++#define   MI_STORE_DWORD_INDEX_SHIFT 2
+ #define MI_LOAD_REGISTER_IMM  MI_INSTR(0x22, 1)
+ #define MI_BATCH_BUFFER               MI_INSTR(0x30, 1)
+ #define   MI_BATCH_NON_SECURE (1)
+ #define   MI_BATCH_NON_SECURE_I965 (1<<8)
+ #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
  
- #define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
 -#define BREADCRUMB_BITS 31
 -#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
 -
 -#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
 -
 -/**
 - * Reads a dword out of the status page, which is written to from the command
 - * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 - * MI_STORE_DATA_IMM.
 - *
 - * The following dwords have a reserved meaning:
 - * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 - * 4: ring 0 head pointer
 - * 5: ring 1 head pointer (915-class)
 - * 6: ring 2 head pointer (915-class)
 - *
 - * The area from dword 0x10 to 0x3ff is available for driver usage.
 - */
 -#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
 -#define I915_GEM_HWS_INDEX            0x10
 -
+ /*
+  * 3D instructions used by the kernel
+  */
+ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
  
- #define CMD_MI_FLUSH         (0x04 << 23)
- #define MI_NO_WRITE_FLUSH    (1 << 2)
- #define MI_READ_FLUSH        (1 << 0)
- #define MI_EXE_FLUSH         (1 << 1)
- #define MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
- #define MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
+ #define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+ #define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+ #define   SC_UPDATE_SCISSOR       (0x1<<1)
+ #define   SC_ENABLE_MASK          (0x1<<0)
+ #define   SC_ENABLE               (0x1<<0)
+ #define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+ #define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+ #define   SCI_YMIN_MASK      (0xffff<<16)
+ #define   SCI_XMIN_MASK      (0xffff<<0)
+ #define   SCI_YMAX_MASK      (0xffff<<16)
+ #define   SCI_XMAX_MASK      (0xffff<<0)
+ #define GFX_OP_SCISSOR_ENABLE  ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+ #define GFX_OP_SCISSOR_RECT    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+ #define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+ #define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+ #define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+ #define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DESTBUFFER_INFO         ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+ #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+ #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+ #define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+ #define XY_SRC_COPY_BLT_CMD           ((2<<29)|(0x53<<22)|6)
+ #define XY_MONO_SRC_COPY_IMM_BLT      ((2<<29)|(0x71<<22)|5)
+ #define XY_SRC_COPY_BLT_WRITE_ALPHA   (1<<21)
+ #define XY_SRC_COPY_BLT_WRITE_RGB     (1<<20)
+ #define   BLT_DEPTH_8                 (0<<24)
+ #define   BLT_DEPTH_16_565            (1<<24)
+ #define   BLT_DEPTH_16_1555           (2<<24)
+ #define   BLT_DEPTH_32                        (3<<24)
+ #define   BLT_ROP_GXCOPY              (0xcc<<16)
+ #define XY_SRC_COPY_BLT_SRC_TILED     (1<<15) /* 965+ only */
+ #define XY_SRC_COPY_BLT_DST_TILED     (1<<11) /* 965+ only */
+ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+ #define   ASYNC_FLIP                (1<<22)
+ #define   DISPLAY_PLANE_A           (0<<20)
+ #define   DISPLAY_PLANE_B           (1<<20)
  
- /* Packet to load a register value from the ring/batch command stream:
+ /*
+  * Instruction and interrupt control regs
   */
- #define CMD_MI_LOAD_REGISTER_IMM      ((0x22 << 23)|0x1)
  
- #define BB1_START_ADDR_MASK   (~0x7)
- #define BB1_PROTECTED         (1<<0)
- #define BB1_UNPROTECTED       (0<<0)
- #define BB2_END_ADDR_MASK     (~0x7)
+ #define PRB0_TAIL     0x02030
+ #define PRB0_HEAD     0x02034
+ #define PRB0_START    0x02038
+ #define PRB0_CTL      0x0203c
+ #define   TAIL_ADDR           0x001FFFF8
+ #define   HEAD_WRAP_COUNT     0xFFE00000
+ #define   HEAD_WRAP_ONE               0x00200000
+ #define   HEAD_ADDR           0x001FFFFC
+ #define   RING_NR_PAGES               0x001FF000
+ #define   RING_REPORT_MASK    0x00000006
+ #define   RING_REPORT_64K     0x00000002
+ #define   RING_REPORT_128K    0x00000004
+ #define   RING_NO_REPORT      0x00000000
+ #define   RING_VALID_MASK     0x00000001
+ #define   RING_VALID          0x00000001
+ #define   RING_INVALID                0x00000000
+ #define PRB1_TAIL     0x02040 /* 915+ only */
+ #define PRB1_HEAD     0x02044 /* 915+ only */
+ #define PRB1_START    0x02048 /* 915+ only */
+ #define PRB1_CTL      0x0204c /* 915+ only */
++#define ACTHD_I965    0x02074
+ #define HWS_PGA               0x02080
++#define HWS_ADDRESS_MASK      0xfffff000
++#define HWS_START_ADDRES_SHIFT        4
+ #define IPEIR         0x02088
+ #define NOPID         0x02094
+ #define HWSTAM                0x02098
+ #define SCPD0         0x0209c /* 915+ only */
+ #define IER           0x020a0
+ #define IIR           0x020a4
+ #define IMR           0x020a8
+ #define ISR           0x020ac
+ #define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT          (1<<18)
+ #define   I915_DISPLAY_PORT_INTERRUPT                 (1<<17)
+ #define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT  (1<<15)
+ #define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT    (1<<14)
+ #define   I915_HWB_OOM_INTERRUPT                      (1<<13)
+ #define   I915_SYNC_STATUS_INTERRUPT                  (1<<12)
+ #define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+ #define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+ #define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT   (1<<9)
+ #define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+ #define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT                (1<<7)
+ #define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT         (1<<6)
+ #define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT                (1<<5)
+ #define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT         (1<<4)
+ #define   I915_DEBUG_INTERRUPT                                (1<<2)
+ #define   I915_USER_INTERRUPT                         (1<<1)
+ #define   I915_ASLE_INTERRUPT                         (1<<0)
+ #define EIR           0x020b0
+ #define EMR           0x020b4
+ #define ESR           0x020b8
+ #define INSTPM                0x020c0
++#define ACTHD         0x020c8
+ #define FW_BLC                0x020d8
+ #define FW_BLC_SELF   0x020e0 /* 915+ only */
+ #define MI_ARB_STATE  0x020e4 /* 915+ only */
+ #define CACHE_MODE_0  0x02120 /* 915+ only */
+ #define   CM0_MASK_SHIFT          16
+ #define   CM0_IZ_OPT_DISABLE      (1<<6)
+ #define   CM0_ZR_OPT_DISABLE      (1<<5)
+ #define   CM0_DEPTH_EVICT_DISABLE (1<<4)
+ #define   CM0_COLOR_EVICT_DISABLE (1<<3)
+ #define   CM0_DEPTH_WRITE_DISABLE (1<<1)
+ #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+ /*
+  * Framebuffer compression (915+ only)
+  */
  
- /* Framebuffer compression */
  #define FBC_CFB_BASE          0x03200 /* 4k page aligned */
  #define FBC_LL_BASE           0x03204 /* 4k page aligned */
  #define FBC_CONTROL           0x03208
  #define FBC_FENCE_OFF         0x0321b
  
  #define FBC_LL_SIZE           (1536)
- #define FBC_LL_PAD            (32)
- /* Interrupt bits:
-  */
- #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT            (1<<18)
- #define I915_DISPLAY_PORT_INTERRUPT                   (1<<17)
- #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT    (1<<15)
- #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT      (1<<14)
- #define I915_HWB_OOM_INTERRUPT                                (1<<13) /* binner out of memory */
- #define I915_SYNC_STATUS_INTERRUPT                    (1<<12)
- #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT   (1<<11)
- #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT   (1<<10)
- #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT     (1<<9)
- #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT   (1<<8)
- #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT          (1<<7)
- #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT           (1<<6)
- #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT          (1<<5)
- #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT           (1<<4)
- #define I915_DEBUG_INTERRUPT                          (1<<2)
- #define I915_USER_INTERRUPT                           (1<<1)
- #define I915REG_HWSTAM                0x02098
- #define I915REG_INT_IDENTITY_R        0x020a4
- #define I915REG_INT_MASK_R    0x020a8
- #define I915REG_INT_ENABLE_R  0x020a0
- #define I915REG_INSTPM                0x020c0
- #define I965REG_ACTHD         0x02074
- #define I915REG_ACTHD         0x020C8
- #define PIPEADSL              0x70000
- #define PIPEBDSL              0x71000
  
- #define I915REG_PIPEASTAT     0x70024
- #define I915REG_PIPEBSTAT     0x71024
  /*
-  * The two pipe frame counter registers are not synchronized, so
-  * reading a stable value is somewhat tricky. The following code 
-  * should work:
-  *
-  *  do {
-  *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
-  *             PIPE_FRAME_HIGH_SHIFT;
-  *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
-  *             PIPE_FRAME_LOW_SHIFT);
-  *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
-  *             PIPE_FRAME_HIGH_SHIFT);
-  *  } while (high1 != high2);
-  *  frame = (high1 << 8) | low1;
+  * GPIO regs
   */
- #define PIPEAFRAMEHIGH          0x70040
- #define PIPEBFRAMEHIGH                0x71040
- #define PIPE_FRAME_HIGH_MASK    0x0000ffff
- #define PIPE_FRAME_HIGH_SHIFT   0
- #define PIPEAFRAMEPIXEL         0x70044
- #define PIPEBFRAMEPIXEL               0x71044
 -
+ #define GPIOA                 0x5010
+ #define GPIOB                 0x5014
+ #define GPIOC                 0x5018
+ #define GPIOD                 0x501c
+ #define GPIOE                 0x5020
+ #define GPIOF                 0x5024
+ #define GPIOG                 0x5028
+ #define GPIOH                 0x502c
+ # define GPIO_CLOCK_DIR_MASK          (1 << 0)
+ # define GPIO_CLOCK_DIR_IN            (0 << 1)
+ # define GPIO_CLOCK_DIR_OUT           (1 << 1)
+ # define GPIO_CLOCK_VAL_MASK          (1 << 2)
+ # define GPIO_CLOCK_VAL_OUT           (1 << 3)
+ # define GPIO_CLOCK_VAL_IN            (1 << 4)
+ # define GPIO_CLOCK_PULLUP_DISABLE    (1 << 5)
+ # define GPIO_DATA_DIR_MASK           (1 << 8)
+ # define GPIO_DATA_DIR_IN             (0 << 9)
+ # define GPIO_DATA_DIR_OUT            (1 << 9)
+ # define GPIO_DATA_VAL_MASK           (1 << 10)
+ # define GPIO_DATA_VAL_OUT            (1 << 11)
+ # define GPIO_DATA_VAL_IN             (1 << 12)
+ # define GPIO_DATA_PULLUP_DISABLE     (1 << 13)
  
- #define PIPE_FRAME_LOW_MASK     0xff000000
- #define PIPE_FRAME_LOW_SHIFT    24
  /*
-  * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register
-  * and is 24 bits wide.
+  * Clock control & power management
   */
- #define PIPE_PIXEL_MASK         0x00ffffff
- #define PIPE_PIXEL_SHIFT        0
+ #define VGA0  0x6000
+ #define VGA1  0x6004
+ #define VGA_PD        0x6010
+ #define   VGA0_PD_P2_DIV_4    (1 << 7)
+ #define   VGA0_PD_P1_DIV_2    (1 << 5)
+ #define   VGA0_PD_P1_SHIFT    0
+ #define   VGA0_PD_P1_MASK     (0x1f << 0)
+ #define   VGA1_PD_P2_DIV_4    (1 << 15)
+ #define   VGA1_PD_P1_DIV_2    (1 << 13)
+ #define   VGA1_PD_P1_SHIFT    8
+ #define   VGA1_PD_P1_MASK     (0x1f << 8)
+ #define DPLL_A        0x06014
+ #define DPLL_B        0x06018
+ #define   DPLL_VCO_ENABLE             (1 << 31)
+ #define   DPLL_DVO_HIGH_SPEED         (1 << 30)
+ #define   DPLL_SYNCLOCK_ENABLE                (1 << 29)
+ #define   DPLL_VGA_MODE_DIS           (1 << 28)
+ #define   DPLLB_MODE_DAC_SERIAL               (1 << 26) /* i915 */
+ #define   DPLLB_MODE_LVDS             (2 << 26) /* i915 */
+ #define   DPLL_MODE_MASK              (3 << 26)
+ #define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+ #define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+ #define   DPLLB_LVDS_P2_CLOCK_DIV_14  (0 << 24) /* i915 */
+ #define   DPLLB_LVDS_P2_CLOCK_DIV_7   (1 << 24) /* i915 */
+ #define   DPLL_P2_CLOCK_DIV_MASK      0x03000000 /* i915 */
+ #define   DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
  
  #define I915_FIFO_UNDERRUN_STATUS             (1UL<<31)
  #define I915_CRC_ERROR_ENABLE                 (1UL<<29)
  
  #define MAX_NOPID ((u32)~0)
  
-               I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
-               (void) I915_READ(I915REG_INT_MASK_R);
 +/*
 + * These are the interrupts used by the driver
 + */
 +#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
 +                                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
 +                                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
 +
 +static inline void
 +i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
 +{
 +      if ((dev_priv->irq_mask_reg & mask) != 0) {
 +              dev_priv->irq_mask_reg &= ~mask;
-               I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
-               (void) I915_READ(I915REG_INT_MASK_R);
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
 +      }
 +}
 +
 +static inline void
 +i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
 +{
 +      if ((dev_priv->irq_mask_reg & mask) != mask) {
 +              dev_priv->irq_mask_reg |= mask;
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
 +      }
 +}
 +
  /**
   * i915_get_pipe - return the the pipe associated with a given plane
   * @dev: DRM device
@@@ -467,54 -403,64 +430,77 @@@ irqreturn_t i915_driver_irq_handler(DRM
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 iir;
 -      u32 pipea_stats, pipeb_stats;
 +      u32 pipea_stats = 0, pipeb_stats = 0;
        int vblank = 0;
  
-               I915_WRITE(I915REG_INT_MASK_R, ~0);
-       iir = I915_READ(I915REG_INT_IDENTITY_R);
 +      if (dev->pdev->msi_enabled)
 -      if (iir == 0)
++              I915_WRITE(IMR, ~0);
+       iir = I915_READ(IIR);
-               DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
-                          iir,
-                          I915_READ(I915REG_INT_MASK_R),
-                          I915_READ(I915REG_INT_ENABLE_R),
-                          I915_READ(I915REG_PIPEASTAT),
-                          I915_READ(I915REG_PIPEBSTAT));
 +#if 0
 +      DRM_DEBUG("flag=%08x\n", iir);
 +#endif
 +      atomic_inc(&dev_priv->irq_received);
 +      if (iir == 0) {
-                       I915_WRITE(I915REG_INT_MASK_R,
-                                  dev_priv->irq_mask_reg);
-                       (void) I915_READ(I915REG_INT_MASK_R);
 +              if (dev->pdev->msi_enabled) {
++                      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++                      (void) I915_READ(IMR);
 +              }
                return IRQ_NONE;
 +      }
  
        /*
         * Clear the PIPE(A|B)STAT regs before the IIR otherwise
         * we may get extra interrupts.
         */
        if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
-               pipea_stats = I915_READ(I915REG_PIPEASTAT);
-               I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
+               pipea_stats = I915_READ(PIPEASTAT);
+               if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+                                  PIPE_VBLANK_INTERRUPT_STATUS))
+               {
+                       vblank++;
+                       drm_handle_vblank(dev, i915_get_plane(dev, 0));
+               }
 -
+               I915_WRITE(PIPEASTAT, pipea_stats);
        }
        if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
-               pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
-               I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
+               pipeb_stats = I915_READ(PIPEBSTAT);
+               /* Ack the event */
+               I915_WRITE(PIPEBSTAT, pipeb_stats);
+               /* The vblank interrupt gets enabled even if we didn't ask for
+                  it, so make sure it's shut down again */
+               if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
+                       pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
+               if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+                                  PIPE_VBLANK_INTERRUPT_STATUS))
+               {
+                       vblank++;
+                       drm_handle_vblank(dev, i915_get_plane(dev, 1));
+               }
+ #ifdef __linux__
+               if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE)
+                       opregion_asle_intr(dev);
+ #endif
+               I915_WRITE(PIPEBSTAT, pipeb_stats);
        }
  
-       I915_WRITE(I915REG_INT_IDENTITY_R, iir);
-       if (dev->pdev->msi_enabled)
-               I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
-       (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted writes */
+ #ifdef __linux__
+       if (iir & I915_ASLE_INTERRUPT)
+               opregion_asle_intr(dev);
+ #endif
  
        if (dev_priv->sarea_priv)
            dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  
 -      (void) I915_READ(IIR);
+       I915_WRITE(IIR, iir);
++      if (dev->pdev->msi_enabled)
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++      (void) I915_READ(IIR); /* Flush posted writes */
        if (iir & I915_USER_INTERRUPT) {
 +              dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
                DRM_WAKEUP(&dev_priv->irq_queue);
  #ifdef I915_HAVE_FENCE
                i915_fence_handler(dev);
@@@ -664,12 -594,12 +640,12 @@@ int i915_enable_vblank(struct drm_devic
  
        switch (pipe) {
        case 0:
-               pipestat_reg = I915REG_PIPEASTAT;
+               pipestat_reg = PIPEASTAT;
 -              dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 +              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
                break;
        case 1:
-               pipestat_reg = I915REG_PIPEBSTAT;
+               pipestat_reg = PIPEBSTAT;
 -              dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 +              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
                break;
        default:
                DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
                /*
                 * Clear any pending status
                 */
-               pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
-                            I915_VBLANK_INTERRUPT_STATUS);
+               pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+                            PIPE_VBLANK_INTERRUPT_STATUS);
                I915_WRITE(pipestat_reg, pipestat);
        }
 -      I915_WRITE(IER, dev_priv->irq_enable_reg);
 +      DRM_SPINLOCK(&dev_priv->user_irq_lock);
 +      i915_enable_irq(dev_priv, mask_reg);
 +      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
  
        return 0;
  }
@@@ -712,12 -639,12 +688,12 @@@ void i915_disable_vblank(struct drm_dev
  
        switch (pipe) {
        case 0:
-               pipestat_reg = I915REG_PIPEASTAT;
+               pipestat_reg = PIPEASTAT;
 -              dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 +              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
                break;
        case 1:
-               pipestat_reg = I915REG_PIPEBSTAT;
+               pipestat_reg = PIPEBSTAT;
 -              dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 +              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
                break;
        default:
                DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
                break;
        }
  
 -      I915_WRITE(IER, dev_priv->irq_enable_reg);
 +      DRM_SPINLOCK(&dev_priv->user_irq_lock);
 +      i915_disable_irq(dev_priv, mask_reg);
 +      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
        if (pipestat_reg)
        {
                pipestat = I915_READ (pipestat_reg);
                /*
                 * Clear any pending status
                 */
-               pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
-                            I915_VBLANK_INTERRUPT_STATUS);
+               pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+                            PIPE_VBLANK_INTERRUPT_STATUS);
                I915_WRITE(pipestat_reg, pipestat);
 +              (void) I915_READ(pipestat_reg);
        }
  }
  
static void i915_enable_interrupt (struct drm_device *dev)
+ void i915_enable_interrupt (struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
--      
 -      dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
++
 +      dev_priv->irq_mask_reg = ~0;
-       I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
-       I915_WRITE(I915REG_INT_ENABLE_R, I915_INTERRUPT_ENABLE_MASK);
-       (void) I915_READ (I915REG_INT_ENABLE_R);
-       dev_priv->irq_enabled = 1;
- }
++      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++      I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++      (void) I915_READ (IER);
  
- static void i915_disable_interrupt (struct drm_device *dev)
- {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       
-       I915_WRITE(I915REG_HWSTAM, 0xffffffff);
-       I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
-       I915_WRITE(I915REG_INT_ENABLE_R, 0);
-       I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
-       (void) I915_READ (I915REG_INT_IDENTITY_R);
-       dev_priv->irq_enabled = 0;
+ #ifdef __linux__
+       opregion_enable_asle(dev);
+ #endif
 -      I915_WRITE(IER, dev_priv->irq_enable_reg);
+       dev_priv->irq_enabled = 1;
  }
  
  /* Set the vblank monitor pipe