Merge commit 'origin/drm-gem' into modesetting-gem
authorKristian Høgsberg <krh@redhat.com>
Fri, 1 Aug 2008 17:35:56 +0000 (13:35 -0400)
committerKristian Høgsberg <krh@redhat.com>
Fri, 1 Aug 2008 17:35:56 +0000 (13:35 -0400)
Conflicts:
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_mm.c
linux-core/drm_stub.c
linux-core/i915_gem.c
linux-core/i915_opregion.c
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c

20 files changed:
1  2 
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_bo.c
linux-core/drm_bo_move.c
linux-core/drm_drv.c
linux-core/drm_mm.c
linux-core/drm_objects.h
linux-core/drm_proc.c
linux-core/drm_stub.c
linux-core/i915_drv.c
linux-core/i915_gem.c
linux-core/i915_gem_proc.c
linux-core/i915_gem_tiling.c
linux-core/i915_opregion.c
linux-core/radeon_buffer.c
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_drv.h
shared-core/i915_init.c
shared-core/i915_irq.c

@@@ -12,9 -12,8 +12,9 @@@ drm-objs    := drm_auth.o drm_bufs.o dr
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
                drm_memory_debug.o ati_pcigart.o drm_sman.o \
-               drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
+               drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
                drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
 +              drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
                drm_regman.o drm_vm_nopage_compat.o drm_gem.o
  tdfx-objs   := tdfx_drv.o
  r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
@@@ -22,11 -21,8 +22,12 @@@ mga-objs    := mga_drv.o mga_dma.o mga_
  i810-objs   := i810_drv.o i810_dma.o
  i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
                i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
-               i915_gem.o i915_opregion.o \
+               i915_opregion.o \
 -              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
++              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
 +              intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
 +              intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
 +              intel_tv.o intel_dvo.o dvo_ch7xxx.o \
 +              dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
  nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
                nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
                nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
@@@ -584,9 -573,7 +579,9 @@@ struct drm_map_list 
        struct drm_hash_item hash;
        struct drm_map *map;                    /**< mapping */
        uint64_t user_token;
-       struct drm_memrange_node *file_offset_node;
 +      struct drm_master *master; /** if this map is associated with a specific
 +                                     master */
+       struct drm_mm_node *file_offset_node;
  };
  
  typedef struct drm_map drm_local_map_t;
@@@ -1415,33 -1328,25 +1410,29 @@@ struct drm_sysfs_class
  extern struct class *drm_sysfs_create(struct module *owner, char *name);
  extern void drm_sysfs_destroy(void);
  extern int drm_sysfs_device_add(struct drm_minor *minor);
 +extern void drm_sysfs_hotplug_event(struct drm_device *dev);
  extern void drm_sysfs_device_remove(struct drm_minor *minor);
 +extern char *drm_get_connector_status_name(enum drm_connector_status status);
 +extern int drm_sysfs_connector_add(struct drm_connector *connector);
 +extern void drm_sysfs_connector_remove(struct drm_connector *connector);
  
  /*
-  * Basic memory manager support (drm_memrange.c)
+  * Basic memory manager support (drm_mm.c)
   */
  
- extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
-                                                       unsigned long size,
-                                                       unsigned alignment);
- extern void drm_memrange_put_block(struct drm_memrange_node *cur);
- extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
-                                                         unsigned long size,
-                                                         unsigned alignment, int best_match);
- extern int drm_memrange_init(struct drm_memrange *mm,
-                            unsigned long start, unsigned long size);
- extern void drm_memrange_takedown(struct drm_memrange *mm);
- extern int drm_memrange_clean(struct drm_memrange *mm);
- extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
- extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
-                                              unsigned long size);
- extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
-                                         unsigned long size);
- static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
+ extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
+                                              unsigned alignment);
+ extern void drm_mm_put_block(struct drm_mm_node *cur);
+ extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
+                                               unsigned alignment, int best_match);
+ extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+ extern void drm_mm_takedown(struct drm_mm *mm);
+ extern int drm_mm_clean(struct drm_mm *mm);
+ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+ extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
  {
        return block->mm;
  }
Simple merge
Simple merge
@@@ -428,13 -419,10 +428,13 @@@ static void drm_cleanup(struct drm_devi
  
        drm_ctxbitmap_cleanup(dev);
        drm_ht_remove(&dev->map_hash);
-       drm_memrange_takedown(&dev->offset_manager);
+       drm_mm_takedown(&dev->offset_manager);
        drm_ht_remove(&dev->object_hash);
  
 -      drm_put_minor(dev);
 +      drm_put_minor(dev, &dev->primary);
 +      if (drm_core_check_feature(dev, DRIVER_MODESET))
 +              drm_put_minor(dev, &dev->control);
 +
        if (drm_put_dev(dev))
                DRM_ERROR("Cannot unload module\n");
  }
@@@ -294,4 -294,5 +294,4 @@@ void drm_mm_takedown(struct drm_mm * mm
        list_del(&entry->ml_entry);
        drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
  }
- EXPORT_SYMBOL(drm_memrange_takedown);
 -
+ EXPORT_SYMBOL(drm_mm_takedown);
Simple merge
Simple merge
@@@ -198,11 -112,11 +198,10 @@@ static int drm_fill_in_dev(struct drm_d
        dev->irq = pdev->irq;
        dev->irq_enabled = 0;
  
 -      if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
 +      if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
                return -ENOMEM;
-       if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-                             DRM_FILE_PAGE_OFFSET_SIZE)) {
 -      }
+       if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
                drm_ht_remove(&dev->map_hash);
                return -ENOMEM;
        }
@@@ -71,204 -61,8 +71,8 @@@ static struct drm_bo_driver i915_bo_dri
        .ttm_cache_flush = i915_flush_ttm,
        .command_stream_barrier = NULL,
  };
 -#endif
 +#endif /* ttm */
  
- static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (pipe == PIPE_A)
-               return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-       else
-               return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
- }
- static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-       u32 *array;
-       int i;
-       if (!i915_pipe_enabled(dev, pipe))
-               return;
-       if (pipe == PIPE_A)
-               array = dev_priv->save_palette_a;
-       else
-               array = dev_priv->save_palette_b;
-       for(i = 0; i < 256; i++)
-               array[i] = I915_READ(reg + (i << 2));
- }
- static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-       u32 *array;
-       int i;
-       if (!i915_pipe_enabled(dev, pipe))
-               return;
-       if (pipe == PIPE_A)
-               array = dev_priv->save_palette_a;
-       else
-               array = dev_priv->save_palette_b;
-       for(i = 0; i < 256; i++)
-               I915_WRITE(reg + (i << 2), array[i]);
- }
- static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
- {
-       outb(reg, index_port);
-       return inb(data_port);
- }
- static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
- {
-       inb(st01);
-       outb(palette_enable | reg, VGA_AR_INDEX);
-       return inb(VGA_AR_DATA_READ);
- }
- static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
- {
-       inb(st01);
-       outb(palette_enable | reg, VGA_AR_INDEX);
-       outb(val, VGA_AR_DATA_WRITE);
- }
- static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
- {
-       outb(reg, index_port);
-       outb(val, data_port);
- }
- static void i915_save_vga(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-       u16 cr_index, cr_data, st01;
-       /* VGA color palette registers */
-       dev_priv->saveDACMASK = inb(VGA_DACMASK);
-       /* DACCRX automatically increments during read */
-       outb(0, VGA_DACRX);
-       /* Read 3 bytes of color data from each index */
-       for (i = 0; i < 256 * 3; i++)
-               dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
-       /* MSR bits */
-       dev_priv->saveMSR = inb(VGA_MSR_READ);
-       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-               cr_index = VGA_CR_INDEX_CGA;
-               cr_data = VGA_CR_DATA_CGA;
-               st01 = VGA_ST01_CGA;
-       } else {
-               cr_index = VGA_CR_INDEX_MDA;
-               cr_data = VGA_CR_DATA_MDA;
-               st01 = VGA_ST01_MDA;
-       }
-       /* CRT controller regs */
-       i915_write_indexed(cr_index, cr_data, 0x11,
-                          i915_read_indexed(cr_index, cr_data, 0x11) &
-                          (~0x80));
-       for (i = 0; i <= 0x24; i++)
-               dev_priv->saveCR[i] =
-                       i915_read_indexed(cr_index, cr_data, i);
-       /* Make sure we don't turn off CR group 0 writes */
-       dev_priv->saveCR[0x11] &= ~0x80;
-       /* Attribute controller registers */
-       inb(st01);
-       dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
-       for (i = 0; i <= 0x14; i++)
-               dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
-       inb(st01);
-       outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
-       inb(st01);
-       /* Graphics controller registers */
-       for (i = 0; i < 9; i++)
-               dev_priv->saveGR[i] =
-                       i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
-       dev_priv->saveGR[0x10] =
-               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-       dev_priv->saveGR[0x11] =
-               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-       dev_priv->saveGR[0x18] =
-               i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-       /* Sequencer registers */
-       for (i = 0; i < 8; i++)
-               dev_priv->saveSR[i] =
-                       i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
- }
- static void i915_restore_vga(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-       u16 cr_index, cr_data, st01;
-       /* MSR bits */
-       outb(dev_priv->saveMSR, VGA_MSR_WRITE);
-       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-               cr_index = VGA_CR_INDEX_CGA;
-               cr_data = VGA_CR_DATA_CGA;
-               st01 = VGA_ST01_CGA;
-       } else {
-               cr_index = VGA_CR_INDEX_MDA;
-               cr_data = VGA_CR_DATA_MDA;
-               st01 = VGA_ST01_MDA;
-       }
-       /* Sequencer registers, don't write SR07 */
-       for (i = 0; i < 7; i++)
-               i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
-                                  dev_priv->saveSR[i]);
-       /* CRT controller regs */
-       /* Enable CR group 0 writes */
-       i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
-       for (i = 0; i <= 0x24; i++)
-               i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
-       /* Graphics controller regs */
-       for (i = 0; i < 9; i++)
-               i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
-                                  dev_priv->saveGR[i]);
-       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-                          dev_priv->saveGR[0x10]);
-       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-                          dev_priv->saveGR[0x11]);
-       i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-                          dev_priv->saveGR[0x18]);
-       /* Attribute controller registers */
-       inb(st01); /* switch back to index mode */
-       for (i = 0; i <= 0x14; i++)
-               i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
-       inb(st01); /* switch back to index mode */
-       outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
-       inb(st01);
-       /* VGA color palette registers */
-       outb(dev_priv->saveDACMASK, VGA_DACMASK);
-       /* DACCRX automatically increments during read */
-       outb(0, VGA_DACWX);
-       /* Read 3 bytes of color data from each index */
-       for (i = 0; i < 256 * 3; i++)
-               outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
- }
  static int i915_suspend(struct drm_device *dev, pm_message_t state)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -54,41 -47,34 +47,41 @@@ i915_gem_set_domain(struct drm_gem_obje
                    struct drm_file *file_priv,
                    uint32_t read_domains,
                    uint32_t write_domain);
- static void
i915_gem_clflush_object(struct drm_gem_object *obj);
+ static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
  
 -int
 -i915_gem_init_ioctl(struct drm_device *dev, void *data,
 -                  struct drm_file *file_priv)
 +int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 +                   unsigned long end)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_init *args = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      mutex_lock(&dev->struct_mutex);
 -
 -      if (args->gtt_start >= args->gtt_end ||
 -          (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
 -          (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
 -              mutex_unlock(&dev->struct_mutex);
 +      if (start >= end ||
 +          (start & (PAGE_SIZE - 1)) != 0 ||
 +          (end & (PAGE_SIZE - 1)) != 0) {
                return -EINVAL;
        }
  
-       drm_memrange_init(&dev_priv->mm.gtt_space, start,
-                         end - start);
 -      drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
 -          args->gtt_end - args->gtt_start);
++      drm_mm_init(&dev_priv->mm.gtt_space, start,
++                  end - start);
  
 -      dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
 +      dev->gtt_total = (uint32_t) (end - start);
 +
 +      return 0;
 +}
 +
 +int
 +i915_gem_init_ioctl(struct drm_device *dev, void *data,
 +                  struct drm_file *file_priv)
 +{
 +      struct drm_i915_gem_init *args = data;
 +      int ret;
  
 +      mutex_lock(&dev->struct_mutex);
 +      ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
  
 -      return 0;
 +      return ret;
  }
  
  
@@@ -702,11 -745,12 +752,12 @@@ i915_wait_request(struct drm_device *de
  
        if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
                dev_priv->mm.waiting_gem_seqno = seqno;
 -              i915_user_irq_on(dev_priv);
 +              i915_user_irq_on(dev);
                ret = wait_event_interruptible(dev_priv->irq_queue,
                                               i915_seqno_passed(i915_get_gem_seqno(dev),
-                                                                seqno) || dev_priv->mm.wedged);
+                                                                seqno) ||
+                                              dev_priv->mm.wedged);
 -              i915_user_irq_off(dev_priv);
 +              i915_user_irq_off(dev);
                dev_priv->mm.waiting_gem_seqno = 0;
        }
        if (dev_priv->mm.wedged)
@@@ -1130,9 -1099,9 +1106,9 @@@ static in
  i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  {
        struct drm_device *dev = obj->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
-       struct drm_memrange_node *free_space;
+       struct drm_mm_node *free_space;
        int page_count, ret;
  
        if (alignment == 0)
@@@ -2317,10 -2283,60 +2290,60 @@@ i915_gem_idle(struct drm_device *dev
        return 0;
  }
  
 -      drm_i915_private_t *dev_priv = dev->dev_private;
+ static int
+ i915_gem_init_hws(struct drm_device *dev)
+ {
 -static int
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+       /* If we need a physical address for the status page, it's already
+        * initialized at driver load time.
+        */
+       if (!I915_NEED_GFX_HWS(dev))
+               return 0;
+       obj = drm_gem_object_alloc(dev, 4096);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate status page\n");
+               return -ENOMEM;
+       }
+       obj_priv = obj->driver_private;
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+       dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
+       dev_priv->hws_map.size = 4096;
+       dev_priv->hws_map.type = 0;
+       dev_priv->hws_map.flags = 0;
+       dev_priv->hws_map.mtrr = 0;
+       drm_core_ioremap(&dev_priv->hws_map, dev);
+       if (dev_priv->hws_map.handle == NULL) {
+               DRM_ERROR("Failed to map status page.\n");
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       dev_priv->hws_obj = obj;
+       dev_priv->hw_status_page = dev_priv->hws_map.handle;
+       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+       DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+       return 0;
+ }
 +int
  i915_gem_init_ringbuffer(struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret;
  i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
++      if (drm_core_check_feature(dev, DRIVER_MODESET))
++              return 0;
++
        if (dev_priv->mm.wedged) {
-               DRM_ERROR("Renabling wedged hardware, good luck\n");
+               DRM_ERROR("Reenabling wedged hardware, good luck\n");
                dev_priv->mm.wedged = 0;
        }
  
@@@ -2424,6 -2455,6 +2465,9 @@@ i915_gem_leavevt_ioctl(struct drm_devic
  {
        int ret;
  
++      if (drm_core_check_feature(dev, DRIVER_MODESET))
++              return 0;
++
        mutex_lock(&dev->struct_mutex);
        ret = i915_gem_idle(dev);
        if (ret == 0)
@@@ -2702,9 -2476,24 +2489,24 @@@ i915_gem_lastclose(struct drm_device *d
                ret = i915_gem_idle(dev);
                if (ret)
                        DRM_ERROR("failed to idle hardware: %d\n", ret);
-       
                i915_gem_cleanup_ringbuffer(dev);
        }
-       
        mutex_unlock(&dev->struct_mutex);
  }
 -      drm_i915_private_t *dev_priv = dev->dev_private;
+ void i915_gem_load(struct drm_device *dev)
+ {
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       INIT_LIST_HEAD(&dev_priv->mm.active_list);
+       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.request_list);
+       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+                         i915_gem_retire_work_handler);
+       dev_priv->mm.next_gem_seqno = 1;
+       i915_gem_detect_bit_6_swizzle(dev);
+ }
index 0000000,132eb3d..2704f92
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,293 +1,293 @@@
 -      drm_i915_private_t *dev_priv = dev->dev_private;
+ /*
+  * Copyright © 2008 Intel Corporation
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+  * to deal in the Software without restriction, including without limitation
+  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+  * and/or sell copies of the Software, and to permit persons to whom the
+  * Software is furnished to do so, subject to the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the next
+  * paragraph) shall be included in all copies or substantial portions of the
+  * Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+  * IN THE SOFTWARE.
+  *
+  * Authors:
+  *    Eric Anholt <eric@anholt.net>
+  *    Keith Packard <keithp@keithp.com>
+  *
+  */
+ #include "drmP.h"
+ #include "drm.h"
+ #include "drm_compat.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ static int i915_gem_active_info(char *buf, char **start, off_t offset,
+                               int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Active:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n",
+                                      obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+                                 int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Flushing:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+                                 int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Inactive:\n");
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+                           list)
+       {
+               struct drm_gem_object *obj = obj_priv->obj;
+               if (obj->name) {
+                       DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+                                      obj, obj->name,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               } else {
+                       DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+                                      obj->read_domains, obj->write_domain,
+                                      obj_priv->last_rendering_seqno);
+               }
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static int i915_gem_request_info(char *buf, char **start, off_t offset,
+                                int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *gem_request;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Request:\n");
+       list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+                           list)
+       {
+               DRM_PROC_PRINT("    %d @ %d %08x\n",
+                              gem_request->seqno,
+                              (int) (jiffies - gem_request->emitted_jiffies),
+                              gem_request->flush_domains);
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+       DRM_PROC_PRINT("Waiter sequence:  %d\n",
+                      dev_priv->mm.waiting_gem_seqno);
+       DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static int i915_interrupt_info(char *buf, char **start, off_t offset,
+                              int request, int *eof, void *data)
+ {
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       int len = 0;
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+       *start = &buf[offset];
+       *eof = 0;
+       DRM_PROC_PRINT("Interrupt enable:    %08x\n",
+                      I915_READ(IER));
+       DRM_PROC_PRINT("Interrupt identity:  %08x\n",
+                      I915_READ(IIR));
+       DRM_PROC_PRINT("Interrupt mask:      %08x\n",
+                      I915_READ(IMR));
+       DRM_PROC_PRINT("Pipe A stat:         %08x\n",
+                      I915_READ(PIPEASTAT));
+       DRM_PROC_PRINT("Pipe B stat:         %08x\n",
+                      I915_READ(PIPEBSTAT));
+       DRM_PROC_PRINT("Interrupts received: %d\n",
+                      atomic_read(&dev_priv->irq_received));
+       DRM_PROC_PRINT("Current sequence:    %d\n",
+                      i915_get_gem_seqno(dev));
+       DRM_PROC_PRINT("Waiter sequence:     %d\n",
+                      dev_priv->mm.waiting_gem_seqno);
+       DRM_PROC_PRINT("IRQ sequence:        %d\n",
+                      dev_priv->mm.irq_gem_seqno);
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+ }
+ static struct drm_proc_list {
+       /** file name */
+       const char *name;
+       /** proc callback*/
+       int (*f) (char *, char **, off_t, int, int *, void *);
+ } i915_gem_proc_list[] = {
+       {"i915_gem_active", i915_gem_active_info},
+       {"i915_gem_flushing", i915_gem_flushing_info},
+       {"i915_gem_inactive", i915_gem_inactive_info},
+       {"i915_gem_request", i915_gem_request_info},
+       {"i915_gem_seqno", i915_gem_seqno_info},
+       {"i915_gem_interrupt", i915_interrupt_info},
+ };
+ #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+ int i915_gem_proc_init(struct drm_minor *minor)
+ {
+       struct proc_dir_entry *ent;
+       int i, j;
+       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+               ent = create_proc_entry(i915_gem_proc_list[i].name,
+                                       S_IFREG | S_IRUGO, minor->dev_root);
+               if (!ent) {
+                       DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+                                 i915_gem_proc_list[i].name);
+                       for (j = 0; j < i; j++)
+                               remove_proc_entry(i915_gem_proc_list[i].name,
+                                                 minor->dev_root);
+                       return -1;
+               }
+               ent->read_proc = i915_gem_proc_list[i].f;
+               ent->data = minor;
+       }
+       return 0;
+ }
+ void i915_gem_proc_cleanup(struct drm_minor *minor)
+ {
+       int i;
+       if (!minor->dev_root)
+               return;
+       for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+               remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+ }
index 0000000,a4ff736..c5825fb
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,309 +1,309 @@@
 -      drm_i915_private_t *dev_priv = dev->dev_private;
+ /*
+  * Copyright © 2008 Intel Corporation
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+  * to deal in the Software without restriction, including without limitation
+  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+  * and/or sell copies of the Software, and to permit persons to whom the
+  * Software is furnished to do so, subject to the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the next
+  * paragraph) shall be included in all copies or substantial portions of the
+  * Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+  * IN THE SOFTWARE.
+  *
+  * Authors:
+  *    Eric Anholt <eric@anholt.net>
+  *
+  */
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ /** @file i915_gem_tiling.c
+  *
+  * Support for managing tiling state of buffer objects.
+  *
+  * The idea behind tiling is to increase cache hit rates by rearranging
+  * pixel data so that a group of pixel accesses are in the same cacheline.
+  * Performance improvement from doing this on the back/depth buffer are on
+  * the order of 30%.
+  *
+  * Intel architectures make this somewhat more complicated, though, by
+  * adjustments made to addressing of data when the memory is in interleaved
+  * mode (matched pairs of DIMMS) to improve memory bandwidth.
+  * For interleaved memory, the CPU sends every sequential 64 bytes
+  * to an alternate memory channel so it can get the bandwidth from both.
+  *
+  * The GPU also rearranges its accesses for increased bandwidth to interleaved
+  * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+  * it does it a little differently, since one walks addresses not just in the
+  * X direction but also Y.  So, along with alternating channels when bit
+  * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+  * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+  * are common to both the 915 and 965-class hardware.
+  *
+  * The CPU also sometimes XORs in higher bits as well, to improve
+  * bandwidth doing strided access like we do so frequently in graphics.  This
+  * is called "Channel XOR Randomization" in the MCH documentation.  The result
+  * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+  * decode.
+  *
+  * All of this bit 6 XORing has an effect on our memory management,
+  * as we need to make sure that the 3d driver can correctly address object
+  * contents.
+  *
+  * If we don't have interleaved memory, all tiling is safe and no swizzling is
+  * required.
+  *
+  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+  * 17 is not just a page offset, so as we page an objet out and back in,
+  * individual pages in it will have different bit 17 addresses, resulting in
+  * each 64 bytes being swapped with its neighbor!
+  *
+  * Otherwise, if interleaved, we have to tell the 3d driver what the address
+  * swizzling it needs to do is, since it's writing with the CPU to the pages
+  * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+  * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+  * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+  * to match what the GPU expects.
+  */
+ /**
+  * Detects bit 6 swizzling of address lookup between IGD access and CPU
+  * access through main memory.
+  */
+ void
+ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct pci_dev *bridge;
+       uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+       uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+       int mchbar_offset;
+       char __iomem *mchbar;
+       int ret;
+       bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+       if (bridge == NULL) {
+               DRM_ERROR("Couldn't get bridge device\n");
+               return;
+       }
+       ret = pci_enable_device(bridge);
+       if (ret != 0) {
+               DRM_ERROR("pci_enable_device failed: %d\n", ret);
+               return;
+       }
+       if (IS_I965G(dev))
+               mchbar_offset = 0x48;
+       else
+               mchbar_offset = 0x44;
+       /* Use resource 2 for our BAR that's stashed in a nonstandard location,
+        * since the bridge would only ever use standard BARs 0-1 (though it
+        * doesn't anyway)
+        */
+       ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
+       if (ret != 0) {
+               DRM_ERROR("pci_read_base failed: %d\n", ret);
+               return;
+       }
+       mchbar = ioremap(pci_resource_start(bridge, 2),
+                        pci_resource_len(bridge, 2));
+       if (mchbar == NULL) {
+               DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
+               return;
+       }
+       if (IS_I965G(dev) && !IS_I965GM(dev)) {
+               uint32_t chdecmisc;
+               /* On the 965, channel interleave appears to be determined by
+                * the flex bit.  If flex is set, then the ranks (sides of a
+                * DIMM) of memory will be "stacked" (physical addresses walk
+                * through one rank then move on to the next, flipping channels
+                * or not depending on rank configuration).  The GPU in this
+                * case does exactly the same addressing as the CPU.
+                *
+                * Unlike the 945, channel randomization based does not
+                * appear to be available.
+                *
+                * XXX: While the G965 doesn't appear to do any interleaving
+                * when the DIMMs are not exactly matched, the G4x chipsets
+                * might be for "L-shaped" configurations, and will need to be
+                * detected.
+                *
+                * L-shaped configuration:
+                *
+                * +-----+
+                * |     |
+                * |DIMM2|         <-- non-interleaved
+                * +-----+
+                * +-----+ +-----+
+                * |     | |     |
+                * |DIMM0| |DIMM1| <-- interleaved area
+                * +-----+ +-----+
+                */
+               chdecmisc = readb(mchbar + CHDECMISC);
+               if (chdecmisc == 0xff) {
+                       DRM_ERROR("Couldn't read from MCHBAR.  "
+                                 "Disabling tiling.\n");
+               } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
+                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+               } else {
+                       swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                       swizzle_y = I915_BIT_6_SWIZZLE_9;
+               }
+       } else if (IS_I9XX(dev)) {
+               uint32_t dcc;
+               /* On 915-945 and GM965, channel interleave by the CPU is
+                * determined by DCC.  The CPU will alternate based on bit 6
+                * in interleaved mode, and the GPU will then also alternate
+                * on bit 6, 9, and 10 for X, but the CPU may also optionally
+                * alternate based on bit 17 (XOR not disabled and XOR
+                * bit == 17).
+                */
+               dcc = readl(mchbar + DCC);
+               switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+               case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       break;
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+                       if (IS_I915G(dev) || IS_I915GM(dev) ||
+                           dcc & DCC_CHANNEL_XOR_DISABLE) {
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+                       } else if (IS_I965GM(dev)) {
+                               /* GM965 only does bit 11-based channel
+                                * randomization
+                                */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+                       } else {
+                               /* Bit 17 or perhaps other swizzling */
+                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       }
+                       break;
+               }
+               if (dcc == 0xffffffff) {
+                       DRM_ERROR("Couldn't read from MCHBAR.  "
+                                 "Disabling tiling.\n");
+                       swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+               }
+       } else {
+               /* As far as we know, the 865 doesn't have these bit 6
+                * swizzling issues.
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+       }
+       iounmap(mchbar);
+       dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+       dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+ }
+ /**
+  * Sets the tiling mode of an object, returning the required swizzling of
+  * bit 6 of addresses in the object.
+  */
+ int
+ i915_gem_set_tiling(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_set_tiling *args = data;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+       obj_priv = obj->driver_private;
+       mutex_lock(&dev->struct_mutex);
+       if (args->tiling_mode == I915_TILING_NONE) {
+               obj_priv->tiling_mode = I915_TILING_NONE;
+               args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+       } else {
+               if (args->tiling_mode == I915_TILING_X)
+                       args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+               else
+                       args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+               /* If we can't handle the swizzling, make it untiled. */
+               if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+                       args->tiling_mode = I915_TILING_NONE;
+                       args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+               }
+       }
+       obj_priv->tiling_mode = args->tiling_mode;
+       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       return 0;
+ }
+ /**
+  * Returns the current tiling mode and required bit 6 swizzling for the object.
+  */
+ int
+ i915_gem_get_tiling(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_get_tiling *args = data;
++      struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EINVAL;
+       obj_priv = obj->driver_private;
+       mutex_lock(&dev->struct_mutex);
+       args->tiling_mode = obj_priv->tiling_mode;
+       switch (obj_priv->tiling_mode) {
+       case I915_TILING_X:
+               args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+               break;
+       case I915_TILING_Y:
+               args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+               break;
+       case I915_TILING_NONE:
+               args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+               break;
+       default:
+               DRM_ERROR("unknown tiling mode\n");
+       }
+       mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_unreference(obj);
+       return 0;
+ }
Simple merge
index e9ba11d,0000000..227a2fa
mode 100644,000000..100644
--- /dev/null
@@@ -1,266 -1,0 +1,266 @@@
-                       drm_memrange_put_block(tmp_mem.mm_node);
 +/**************************************************************************
 + * 
 + * Copyright 2007 Dave Airlie
 + * All Rights Reserved.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + * 
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
 + * USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + * 
 + * 
 + **************************************************************************/
 +/*
 + * Authors: Dave Airlie <airlied@linux.ie>
 + */
 +
 +#include "drmP.h"
 +#include "radeon_drm.h"
 +#include "radeon_drv.h"
 +
 +struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +
 +      if(dev_priv->flags & RADEON_IS_AGP)
 +              return drm_agp_init_ttm(dev);
 +      else
 +              return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
 +}
 +
 +int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
 +{
 +      *class = 0;
 +      *type = 1;
 +      return 0;
 +}
 +
 +int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +      RING_LOCALS;
 +
 +      BEGIN_RING(4);
 +      RADEON_FLUSH_CACHE();
 +      RADEON_FLUSH_ZCACHE();
 +      ADVANCE_RING();
 +      return 0;
 +}
 +
 +int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
 +                       struct drm_mem_type_manager * man)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +
 +      switch (type) {
 +      case DRM_BO_MEM_LOCAL:
 +              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                  _DRM_FLAG_MEMTYPE_CACHED;
 +              man->drm_bus_maptype = 0;
 +              break;
 +      case DRM_BO_MEM_VRAM:
 +              man->flags =  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
 +              man->io_addr = NULL;
 +              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
 +              man->io_offset = drm_get_resource_start(dev, 0);
 +              man->io_size = drm_get_resource_len(dev, 0);
 +              break;
 +      case DRM_BO_MEM_TT:
 +              if (dev_priv->flags & RADEON_IS_AGP) {
 +                      if (!(drm_core_has_AGP(dev) && dev->agp)) {
 +                              DRM_ERROR("AGP is not enabled for memory type %u\n",
 +                                        (unsigned)type);
 +                              return -EINVAL;
 +                      }
 +                      man->io_offset = dev->agp->agp_info.aper_base;
 +                      man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                              _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
 +                      man->drm_bus_maptype = _DRM_AGP;
 +              } else {
 +                      man->io_offset = dev_priv->gart_vm_start;
 +                      man->io_size = dev_priv->gart_size;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
 +                      man->drm_bus_maptype = _DRM_SCATTER_GATHER;
 +              }
 +              break;
 +      default:
 +              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +static void radeon_emit_copy_blit(struct drm_device * dev,
 +                                uint32_t src_offset,
 +                                uint32_t dst_offset,
 +                                uint32_t pages, int direction)
 +{
 +      uint32_t cur_pages;
 +      uint32_t stride = PAGE_SIZE;
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +      uint32_t format, height;
 +      RING_LOCALS;
 +
 +      if (!dev_priv)
 +              return;
 +
 +      /* 32-bit copy format */
 +      format = RADEON_COLOR_FORMAT_ARGB8888;
 +
 +      /* radeon limited to 16k stride */
 +      stride &= 0x3fff;
 +      while(pages > 0) {
 +              cur_pages = pages;
 +              if (cur_pages > 2048)
 +                      cur_pages = 2048;
 +              pages -= cur_pages;
 +
 +              /* needs verification */
 +              BEGIN_RING(7);          
 +              OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
 +              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
 +                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
 +                       RADEON_GMC_BRUSH_NONE |
 +                       (format << 8) |
 +                       RADEON_GMC_SRC_DATATYPE_COLOR |
 +                       RADEON_ROP3_S |
 +                       RADEON_DP_SRC_SOURCE_MEMORY |
 +                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
 +              if (direction) {
 +                      OUT_RING((stride << 22) | (src_offset >> 10));
 +                      OUT_RING((stride << 22) | (dst_offset >> 10));
 +              } else {
 +                      OUT_RING((stride << 22) | (dst_offset >> 10));
 +                      OUT_RING((stride << 22) | (src_offset >> 10));
 +              }
 +              OUT_RING(0);
 +              OUT_RING(pages); /* x - y */
 +              OUT_RING((stride << 16) | cur_pages);
 +              ADVANCE_RING();
 +      }
 +
 +      BEGIN_RING(2);
 +      RADEON_WAIT_UNTIL_2D_IDLE();
 +      ADVANCE_RING();
 +
 +      return;
 +}
 +
 +static int radeon_move_blit(struct drm_buffer_object * bo,
 +                          int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
 +{
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +      int dir = 0;
 +
 +      if ((old_mem->mem_type == new_mem->mem_type) &&
 +          (new_mem->mm_node->start <
 +           old_mem->mm_node->start + old_mem->mm_node->size)) {
 +              dir = 1;
 +      }
 +
 +      radeon_emit_copy_blit(bo->dev,
 +                            old_mem->mm_node->start << PAGE_SHIFT,
 +                            new_mem->mm_node->start << PAGE_SHIFT,
 +                            new_mem->num_pages, dir);
 +
 +      
 +      return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
 +                                       DRM_FENCE_TYPE_EXE, 0,
 +                                       new_mem);
 +}
 +
 +static int radeon_move_flip(struct drm_buffer_object * bo,
 +                          int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
 +{
 +      struct drm_device *dev = bo->dev;
 +      struct drm_bo_mem_reg tmp_mem;
 +      int ret;
 +
 +      tmp_mem = *new_mem;
 +      tmp_mem.mm_node = NULL;
 +      //      tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
 +      //          DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
 +
 +      ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
 +      if (ret)
 +              return ret;
 +
 +      ret = drm_ttm_bind(bo->ttm, &tmp_mem);
 +      if (ret)
 +              goto out_cleanup;
 +
 +      ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
 +      if (ret)
 +              goto out_cleanup;
 +
 +      ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
 +out_cleanup:
 +      if (tmp_mem.mm_node) {
 +              mutex_lock(&dev->struct_mutex);
 +              if (tmp_mem.mm_node != bo->pinned_node)
++                      drm_mm_put_block(tmp_mem.mm_node);
 +              tmp_mem.mm_node = NULL;
 +              mutex_unlock(&dev->struct_mutex);
 +      }
 +      return ret;
 +}
 +
 +int radeon_move(struct drm_buffer_object * bo,
 +              int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
 +{
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +
 +      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +#if 0
 +      DRM_DEBUG("\n");
 +      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              if (radeon_move_flip(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else {
 +              if (radeon_move_blit(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      }
 +      return 0;
 +#endif
 +}
 +
 +
 +/*
 + * i915_evict_flags:
 + *
 + * @bo: the buffer object to be evicted
 + *
 + * Return the bo flags for a buffer which is not mapped to the hardware.
 + * These will be placed in proposed_flags so that when the move is
 + * finished, they'll end up in bo->mem.flags
 + */
 +uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
 +{
 +      switch (bo->mem.mem_type) {
 +      case DRM_BO_MEM_LOCAL:
 +      case DRM_BO_MEM_TT:
 +              return DRM_BO_FLAG_MEM_LOCAL;
 +      default:
 +              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
 +      }
 +}
   */
  int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
        u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
        u32 last_acthd = I915_READ(acthd_reg);
        u32 acthd;
        int i;
@@@ -124,27 -153,16 +124,14 @@@ int i915_dma_cleanup(struct drm_device 
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
  
 -      if (dev_priv->ring.virtual_start) {
 -              drm_core_ioremapfree(&dev_priv->ring.map, dev);
 -              dev_priv->ring.virtual_start = 0;
 -              dev_priv->ring.map.handle = 0;
 -              dev_priv->ring.map.size = 0;
 -      }
 -
 -      if (I915_NEED_GFX_HWS(dev))
 -              i915_free_hardware_status(dev);
 +        if (dev_priv->ring.virtual_start) {
 +                drm_core_ioremapfree(&dev_priv->ring.map, dev);
 +                dev_priv->ring.virtual_start = 0;
 +                dev_priv->ring.map.handle = 0;
 +                dev_priv->ring.map.size = 0;
 +              dev_priv->ring.Size = 0;
 +        }
  
-         if (dev_priv->status_page_dmah) {
-                 drm_pci_free(dev, dev_priv->status_page_dmah);
-                 dev_priv->status_page_dmah = NULL;
-                 /* Need to rewrite hardware status page */
-                 I915_WRITE(0x02080, 0x1ffff000);
-         }
-         if (dev_priv->hws_agpoffset) {
-                 dev_priv->hws_agpoffset = 0;
-                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
-                 I915_WRITE(0x02080, 0x1ffff000);
-         }
        return 0;
  }
  
@@@ -266,32 -294,12 +253,13 @@@ static int i915_initialize(struct drm_d
         */
        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
  
-       /* Program Hardware Status Page */
-       if (!I915_NEED_GFX_HWS(dev)) {
-               dev_priv->status_page_dmah =
-                       drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-               if (!dev_priv->status_page_dmah) {
-                       i915_dma_cleanup(dev);
-                       DRM_ERROR("Can not allocate hardware status page\n");
-                       return -ENOMEM;
-               }
-               dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
-               dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-               memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
-               I915_WRITE(0x02080, dev_priv->dma_status_page);
-       }
-       DRM_DEBUG("Enabled hardware status page\n");
  #ifdef I915_HAVE_BUFFER
 -      mutex_init(&dev_priv->cmdbuf_mutex);
 -#endif
 -#if defined(I915_HAVE_BUFFER)
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              mutex_init(&dev_priv->cmdbuf_mutex);
 +      }
 +#ifdef DRI2
        if (init->func == I915_INIT_DMA2) {
 -              ret = setup_dri2_sarea(dev, file_priv, init);
 +              int ret = setup_dri2_sarea(dev, file_priv, init);
                if (ret) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("could not set up dri2 sarea\n");
@@@ -324,12 -333,12 +292,12 @@@ static int i915_dma_resume(struct drm_d
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
-       DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr);
+       DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
  
-       if (dev_priv->hws_agpoffset != 0)
-               I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
+       if (dev_priv->status_gfx_addr != 0)
 -              I915_WRITE(0x02080, dev_priv->status_gfx_addr);
++              I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        else
 -              I915_WRITE(0x02080, dev_priv->dma_status_page);
 +              I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        DRM_DEBUG("Enabled hardware status page\n");
  
        return 0;
@@@ -1027,13 -1014,9 +998,13 @@@ static int i915_set_status_page(struct 
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
 +
 +      if (drm_core_check_feature(dev, DRIVER_MODESET))
 +              return 0;
 +
        DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
  
-       dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12);
+       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
  
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
                                " G33 hw status page\n");
                return -ENOMEM;
        }
-       dev_priv->hws_vaddr = dev_priv->hws_map.handle;
 +
+       dev_priv->hw_status_page = dev_priv->hws_map.handle;
  
-       memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
-       DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
+       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 -      DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
 -                      dev_priv->status_gfx_addr);
+       DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
 -      return 0;
 -}
 -
 -int i915_driver_load(struct drm_device *dev, unsigned long flags)
 -{
 -      struct drm_i915_private *dev_priv;
 -      unsigned long base, size;
 -      int ret = 0, num_pipes = 2, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 -
 -      /* i915 has 4 more counters */
 -      dev->counters += 4;
 -      dev->types[6] = _DRM_STAT_IRQ;
 -      dev->types[7] = _DRM_STAT_PRIMARY;
 -      dev->types[8] = _DRM_STAT_SECONDARY;
 -      dev->types[9] = _DRM_STAT_DMA;
 -
 -      dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
 -      if (dev_priv == NULL)
 -              return -ENOMEM;
 -
 -      memset(dev_priv, 0, sizeof(drm_i915_private_t));
 -
 -      dev->dev_private = (void *)dev_priv;
 -      dev_priv->dev = dev;
 -
 -      /* Add register map (needed for suspend/resume) */
 -      base = drm_get_resource_start(dev, mmio_bar);
 -      size = drm_get_resource_len(dev, mmio_bar);
 -
 -      ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
 -              _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
 -
 -      i915_gem_load(dev);
 -
 -#ifdef __linux__
 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -      intel_init_chipset_flush_compat(dev);
 -#endif
 -      intel_opregion_init(dev);
 -#endif
 -
 -      /* Init HWS */
 -      if (!I915_NEED_GFX_HWS(dev)) {
 -              ret = i915_init_hardware_status(dev);
 -              if(ret)
 -                      return ret;
 -      }
 -
 -      I915_WRITE16(HWSTAM, 0xeffe);
 -      I915_WRITE16(IMR, 0x0);
 -      I915_WRITE16(IER, 0x0);
 -
 -      DRM_SPININIT(&dev_priv->swaps_lock, "swap");
 -      INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
 -      dev_priv->swaps_pending = 0;
 -
 -      DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
 -      dev_priv->user_irq_refcount = 0;
 -      dev_priv->irq_mask_reg = ~0;
 -
 -      ret = drm_vblank_init(dev, num_pipes);
 -      if (ret)
 -              return ret;
 -
 -      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 -      dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 -
 -      i915_enable_interrupt(dev);
 -      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 -
 -      /*
 -       * Initialize the hardware status page IRQ location.
 -       */
 -
 -      I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
 -
 -      return ret;
 -}
 -
 -int i915_driver_unload(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 temp;
 -
 -      if (dev_priv) {
 -              dev_priv->vblank_pipe = 0;
 -
 -              dev_priv->irq_enabled = 0;
 -              I915_WRITE(HWSTAM, 0xffffffff);
 -              I915_WRITE(IMR, 0xffffffff);
 -              I915_WRITE(IER, 0x0);
 -
 -              temp = I915_READ(PIPEASTAT);
 -              I915_WRITE(PIPEASTAT, temp);
 -              temp = I915_READ(PIPEBSTAT);
 -              I915_WRITE(PIPEBSTAT, temp);
 -              temp = I915_READ(IIR);
 -              I915_WRITE(IIR, temp);
 -      }
 -
 -      i915_free_hardware_status(dev);
 -
 -      drm_rmmap(dev, dev_priv->mmio_map);
 -
 -#ifdef __linux__
 -      intel_opregion_free(dev);
 -#endif
 -
 -      drm_free(dev->dev_private, sizeof(drm_i915_private_t),
 -               DRM_MEM_DRIVER);
 -#ifdef __linux__
 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -      intel_fini_chipset_flush_compat(dev);
 -#endif
 -#endif
 -      return 0;
 -}
 -
 -void i915_driver_lastclose(struct drm_device * dev)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -
 -      /* agp off can use this to get called before dev_priv */
 -      if (!dev_priv)
 -              return;
 -
 -#ifdef I915_HAVE_BUFFER
 -      if (dev_priv->val_bufs) {
 -              vfree(dev_priv->val_bufs);
 -              dev_priv->val_bufs = NULL;
 -      }
 -#endif
 -      i915_gem_lastclose(dev);
 -
 -      if (drm_getsarea(dev) && dev_priv->sarea_priv)
 -              i915_do_cleanup_pageflip(dev);
 -      if (dev_priv->agp_heap)
 -              i915_mem_takedown(&(dev_priv->agp_heap));
 -#if defined(I915_HAVE_BUFFER)
 -      if (dev_priv->sarea_kmap.virtual) {
 -              drm_bo_kunmap(&dev_priv->sarea_kmap);
 -              dev_priv->sarea_kmap.virtual = NULL;
 -              dev->lock.hw_lock = NULL;
 -              dev->sigdata.lock = NULL;
 -      }
 -
 -      if (dev_priv->sarea_bo) {
 -              mutex_lock(&dev->struct_mutex);
 -              drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
 -              mutex_unlock(&dev->struct_mutex);
 -              dev_priv->sarea_bo = NULL;
 -      }
 -#endif
 -      i915_dma_cleanup(dev);
 -}
 -
 -int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 -{
 -      struct drm_i915_file_private *i915_file_priv;
 -
 -      DRM_DEBUG("\n");
 -      i915_file_priv = (struct drm_i915_file_private *)
 -          drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
 -
 -      if (!i915_file_priv)
 -              return -ENOMEM;
 -
 -      file_priv->driver_priv = i915_file_priv;
 -
 -      i915_file_priv->mm.last_gem_seqno = 0;
 -      i915_file_priv->mm.last_gem_throttle_seqno = 0;
  
        return 0;
  }
Simple merge
@@@ -79,7 -77,15 +79,15 @@@ enum pipe 
  struct drm_i915_validate_buffer;
  #endif
  
 -typedef struct _drm_i915_ring_buffer {
+ #define WATCH_COHERENCY       0
+ #define WATCH_BUF     0
+ #define WATCH_EXEC    0
+ #define WATCH_LRU     0
+ #define WATCH_RELOC   0
+ #define WATCH_INACTIVE        0
+ #define WATCH_PWRITE  0
 +struct drm_i915_ring_buffer {
        int tail_mask;
        unsigned long Size;
        u8 *virtual_start;
@@@ -124,28 -128,22 +132,28 @@@ struct intel_opregion 
  };
  #endif
  
 -typedef struct drm_i915_private {
 -      struct drm_device *dev;
 -
 +struct drm_i915_master_private {
        drm_local_map_t *sarea;
 +      struct drm_i915_sarea *sarea_priv;
 +};
 +      
 +struct drm_i915_private {
 +        struct drm_device *dev;
 +
        drm_local_map_t *mmio_map;
  
 -      drm_i915_sarea_t *sarea_priv;
 -      drm_i915_ring_buffer_t ring;
 +      unsigned long mmiobase;
 +      unsigned long mmiolen;
 +
 +      struct drm_i915_ring_buffer ring;
  
 -      drm_dma_handle_t *status_page_dmah;
 +      struct drm_dma_handle *status_page_dmah;
+       void *hw_status_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
-       uint32_t hws_agpoffset;
 -      unsigned int status_gfx_addr;
++      uint32_t status_gfx_addr;
        drm_local_map_t hws_map;
-       void *hws_vaddr;
-       struct drm_memrange_node *hws;
+       struct drm_gem_object *hws_obj;
  
        unsigned int cpp;
  
        int fence_irq_on;
        uint32_t irq_mask_reg;
        int irq_enabled;
-       struct drm_memrange vram;
 +      struct workqueue_struct *wq;
 +
 +      bool cursor_needs_physical;
 +
++      struct drm_mm vram;
  
  #ifdef I915_HAVE_FENCE
        uint32_t flush_sequence;
  #endif
  
        DRM_SPINTYPE swaps_lock;
 -      drm_i915_vbl_swap_t vbl_swaps;
 +      struct drm_i915_vbl_swap vbl_swaps;
        unsigned int swaps_pending;
 -#if defined(I915_HAVE_BUFFER)
 +
 +      /* LVDS info */
 +      int backlight_duty_cycle;  /* restore backlight to this value */
 +      bool panel_wants_dither;
 +      struct drm_display_mode *panel_fixed_mode;
 +      struct drm_display_mode *vbt_mode; /* if any */
 +
 +#if defined(I915_HAVE_BUFFER) && defined(DRI2)
        /* DRI2 sarea */
 -      struct drm_buffer_object *sarea_bo;
 -      struct drm_bo_kmap_obj sarea_kmap;
 +      struct drm_gem_object *sarea_object;
 +        struct drm_bo_kmap_obj sarea_kmap;
  #endif
  
-       struct {
-               struct drm_memrange gtt_space;
-               /**
-                * List of objects currently involved in rendering from the
-                * ringbuffer.
-                *
-                * A reference is held on the buffer while on this list.
-                */
-               struct list_head active_list;
-               /**
-                * List of objects which are not in the ringbuffer but which
-                * still have a write_domain which needs to be flushed before
-                * unbinding.
-                *
-                * A reference is held on the buffer while on this list.
-                */
-               struct list_head flushing_list;
-               /**
-                * LRU list of objects which are not in the ringbuffer and
-                * are ready to unbind, but are still in the GTT.
-                *
-                * A reference is not held on the buffer while on this list,
-                * as merely being GTT-bound shouldn't prevent its being
-                * freed, and we'll pull it off the list in the free path.
-                */
-               struct list_head inactive_list;
-               /**
-                * List of breadcrumbs associated with GPU requests currently
-                * outstanding.
-                */
-               struct list_head request_list;
-               /**
-                * We leave the user IRQ off as much as possible,
-                * but this means that requests will finish and never
-                * be retired once the system goes idle. Set a timer to
-                * fire periodically while the ring is running. When it
-                * fires, go retire requests.
-                */
-               struct delayed_work retire_work;
-               
-               uint32_t next_gem_seqno;
-               /**
-                * Waiting sequence number, if any
-                */
-               uint32_t waiting_gem_seqno;
-       
-               /**
-                * Last seq seen at irq time
-                */
-               uint32_t irq_gem_seqno;
-               /**
-                * Flag if the X Server, and thus DRM, is not currently in
-                * control of the device.
-                *
-                * This is set between LeaveVT and EnterVT.  It needs to be
-                * replaced with a semaphore.  It also needs to be
-                * transitioned away from for kernel modesetting.
-                */
-               int suspended;
-               /**
-                * Flag if the hardware appears to be wedged.
-                *
-                * This is set when attempts to idle the device timeout.
-                * It prevents command submission from occuring and makes
-                * every pending request fail
-                */
-               int wedged;
-       } mm;
 +      /* Feature bits from the VBIOS */
 +      int int_tv_support:1;
 +      int lvds_dither:1;
 +      int lvds_vbt:1;
 +      int int_crt_support:1;
 +
 +      struct work_struct user_interrupt_task;
 +
  #ifdef __linux__
        struct intel_opregion opregion;
  #endif
@@@ -597,15 -586,26 +624,32 @@@ void i915_gem_object_unpin(struct drm_g
  void i915_gem_lastclose(struct drm_device *dev);
  uint32_t i915_get_gem_seqno(struct drm_device *dev);
  void i915_gem_retire_requests(struct drm_device *dev);
 +int i915_gem_init_ringbuffer(struct drm_device *dev);
 +void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 +int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 +                   unsigned long end);
  void i915_gem_retire_work_handler(struct work_struct *work);
+ void i915_gem_clflush_object(struct drm_gem_object *obj);
  #endif
  
 +extern unsigned int i915_fbpercrtc;
 +
+ /* i915_gem_tiling.c */
+ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+ /* i915_gem_debug.c */
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+                         const char *where, uint32_t mark);
+ #if WATCH_INACTIVE
+ void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+ #else
+ #define i915_verify_inactive(dev,file,line)
+ #endif
+ void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+                         const char *where, uint32_t mark);
+ void i915_dump_lru(struct drm_device *dev, const char *where);
  #ifdef __linux__
  /* i915_opregion.c */
  extern int intel_opregion_init(struct drm_device *dev);
index 009d447,0000000..4f2d3a4
mode 100644,000000..100644
--- /dev/null
@@@ -1,594 -1,0 +1,495 @@@
- static int i915_init_hwstatus(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_memrange_node *free_space;
-       int ret = 0;
-       /* Program Hardware Status Page */
-       if (!IS_G33(dev)) {
-               dev_priv->status_page_dmah = 
-                       drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-               if (!dev_priv->status_page_dmah) {
-                       DRM_ERROR("Can not allocate hardware status page\n");
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
-               dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-               I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-       } else {
-               free_space = drm_memrange_search_free(&dev_priv->vram,
-                                                     PAGE_SIZE,
-                                                     PAGE_SIZE, 0);
-               if (!free_space) {
-                       DRM_ERROR("No free vram available, aborting\n");
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
-                                                      PAGE_SIZE);
-               if (!dev_priv->hws) {
-                       DRM_ERROR("Unable to allocate or pin hw status page\n");
-                       ret = -EINVAL;
-                       goto out;
-               }
-               dev_priv->hws_agpoffset = dev_priv->hws->start;
-               dev_priv->hws_map.offset = dev->agp->base +
-                       dev_priv->hws->start;
-               dev_priv->hws_map.size = PAGE_SIZE;
-               dev_priv->hws_map.type= 0;
-               dev_priv->hws_map.flags= 0;
-               dev_priv->hws_map.mtrr = 0;
-               drm_core_ioremap(&dev_priv->hws_map, dev);
-               if (dev_priv->hws_map.handle == NULL) {
-                       dev_priv->hws_agpoffset = 0;
-                       DRM_ERROR("can not ioremap virtual addr for"
-                                       "G33 hw status page\n");
-                       ret = -ENOMEM;
-                       goto out_free;
-               }
-               dev_priv->hws_vaddr = dev_priv->hws_map.handle;
-               I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
-       }
-       memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
-       DRM_DEBUG("Enabled hardware status page\n");
-       return 0;
- out_free:
-       /* free hws */
- out:
-       return ret;
- }
- static void i915_cleanup_hwstatus(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!IS_G33(dev)) {
-               if (dev_priv->status_page_dmah)
-                       drm_pci_free(dev, dev_priv->status_page_dmah);
-       } else {
-               if (dev_priv->hws_map.handle)
-                       drm_core_ioremapfree(&dev_priv->hws_map, dev);
-               if (dev_priv->hws)
-                       drm_memrange_put_block(dev_priv->hws);
-       }
-       I915_WRITE(HWS_PGA, 0x1ffff000);
- }
 +/*
 + * Copyright (c) 2007 Intel Corporation
 + *   Jesse Barnes <jesse.barnes@intel.com>
 + *
 + * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
 + *                   2004 Sylvain Meyer
 + *
 + * GPL/BSD dual license
 + */
 +#include "drmP.h"
 +#include "drm.h"
 +#include "drm_sarea.h"
 +#include "i915_drm.h"
 +#include "i915_drv.h"
 +#include "intel_bios.h"
 +#include "intel_drv.h"
 +
 +/**
 + * i915_probe_agp - get AGP bootup configuration
 + * @pdev: PCI device
 + * @aperture_size: returns AGP aperture configured size
 + * @preallocated_size: returns size of BIOS preallocated AGP space
 + *
 + * Since Intel integrated graphics are UMA, the BIOS has to set aside
 + * some RAM for the framebuffer at early boot.  This code figures out
 + * how much was set aside so we can use it for our own purposes.
 + */
 +int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
 +                 unsigned long *preallocated_size)
 +{
 +      struct pci_dev *bridge_dev;
 +      u16 tmp = 0;
 +      unsigned long overhead;
 +
 +      bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
 +      if (!bridge_dev) {
 +              DRM_ERROR("bridge device not found\n");
 +              return -1;
 +      }
 +
 +      /* Get the fb aperture size and "stolen" memory amount. */
 +      pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
 +      pci_dev_put(bridge_dev);
 +
 +      *aperture_size = 1024 * 1024;
 +      *preallocated_size = 1024 * 1024;
 +
 +      switch (pdev->device) {
 +      case PCI_DEVICE_ID_INTEL_82830_CGC:
 +      case PCI_DEVICE_ID_INTEL_82845G_IG:
 +      case PCI_DEVICE_ID_INTEL_82855GM_IG:
 +      case PCI_DEVICE_ID_INTEL_82865_IG:
 +              if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
 +                      *aperture_size *= 64;
 +              else
 +                      *aperture_size *= 128;
 +              break;
 +      default:
 +              /* 9xx supports large sizes, just look at the length */
 +              *aperture_size = pci_resource_len(pdev, 2);
 +              break;
 +      }
 +
 +      /*
 +       * Some of the preallocated space is taken by the GTT
 +       * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
 +       */
 +      overhead = (*aperture_size / 1024) + 4096;
 +      switch (tmp & INTEL_855_GMCH_GMS_MASK) {
 +      case INTEL_855_GMCH_GMS_STOLEN_1M:
 +              break; /* 1M already */
 +      case INTEL_855_GMCH_GMS_STOLEN_4M:
 +              *preallocated_size *= 4;
 +              break;
 +      case INTEL_855_GMCH_GMS_STOLEN_8M:
 +              *preallocated_size *= 8;
 +              break;
 +      case INTEL_855_GMCH_GMS_STOLEN_16M:
 +              *preallocated_size *= 16;
 +              break;
 +      case INTEL_855_GMCH_GMS_STOLEN_32M:
 +              *preallocated_size *= 32;
 +              break;
 +      case INTEL_915G_GMCH_GMS_STOLEN_48M:
 +              *preallocated_size *= 48;
 +              break;
 +      case INTEL_915G_GMCH_GMS_STOLEN_64M:
 +              *preallocated_size *= 64;
 +              break;
 +      case INTEL_855_GMCH_GMS_DISABLED:
 +              DRM_ERROR("video memory is disabled\n");
 +              return -1;
 +      default:
 +              DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
 +                      tmp & INTEL_855_GMCH_GMS_MASK);
 +              return -1;
 +      }
 +      *preallocated_size -= overhead;
 +
 +      return 0;
 +}
 +
-       drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
 +static int i915_load_modeset_init(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      unsigned long agp_size, prealloc_size;
 +      int ret = 0;
 +
 +      i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
 +
 +      /* Basic memrange allocator for stolen space (aka vram) */
-       ret = i915_init_hwstatus(dev);
-       if (ret)
-               goto destroy_ringbuffer;
++      drm_mm_init(&dev_priv->vram, 0, prealloc_size);
 +      /* Let GEM Manage from end of prealloc space to end of aperture */
 +      i915_gem_do_init(dev, prealloc_size, agp_size);
 +
 +      ret = i915_gem_init_ringbuffer(dev);
 +      if (ret)
 +              goto out;
 +
-               goto destroy_hws;
 +      /* Allow hardware batchbuffers unless told otherwise.
 +       */
 +      dev_priv->allow_batchbuffer = 1;
 +      dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
 +      mutex_init(&dev_priv->cmdbuf_mutex);
 +
 +      dev_priv->wq = create_singlethread_workqueue("i915");
 +      if (dev_priv->wq == 0) {
 +              DRM_DEBUG("Error\n");
 +              ret = -EINVAL;
- destroy_hws:
-       i915_cleanup_hwstatus(dev);
++              goto destroy_ringbuffer;
 +      }
 +
 +      ret = intel_init_bios(dev);
 +      if (ret) {
 +              DRM_ERROR("failed to find VBIOS tables\n");
 +              ret = -ENODEV;
 +              goto destroy_wq;
 +      }
 +
 +      intel_modeset_init(dev);
 +      drm_helper_initial_config(dev, false);
 +
 +      dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
 +      if (!dev->devname) {
 +              ret = -ENOMEM;
 +              goto modeset_cleanup;
 +      }
 +
 +      ret = drm_irq_install(dev);
 +      if (ret) {
 +              kfree(dev->devname);
 +              goto modeset_cleanup;
 +      }
 +      return 0;
 +
 +modeset_cleanup:
 +      intel_modeset_cleanup(dev);
 +destroy_wq:
 +      destroy_workqueue(dev_priv->wq);
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-       INIT_LIST_HEAD(&dev_priv->mm.request_list);
-       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
-                         i915_gem_retire_work_handler);
-       dev_priv->mm.next_gem_seqno = 1;
 +destroy_ringbuffer:
 +      i915_gem_cleanup_ringbuffer(dev);
 +out:
 +      return ret;
 +}
 +
 +/**
 + * i915_driver_load - setup chip and create an initial config
 + * @dev: DRM device
 + * @flags: startup flags
 + *
 + * The driver load routine has to do several things:
 + *   - drive output discovery via intel_modeset_init()
 + *   - initialize the memory manager
 + *   - allocate initial config memory
 + *   - setup the DRM framebuffer with the allocated memory
 + */
 +int i915_driver_load(struct drm_device *dev, unsigned long flags)
 +{
 +      struct drm_i915_private *dev_priv;
 +      int ret = 0, num_pipes = 2;
 +      u32 tmp;
 +
 +      dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
 +      if (dev_priv == NULL)
 +              return -ENOMEM;
 +
 +      memset(dev_priv, 0, sizeof(struct drm_i915_private));
 +      dev->dev_private = (void *)dev_priv;
 +      dev_priv->dev = dev;
 +
 +      /* i915 has 4 more counters */
 +      dev->counters += 4;
 +      dev->types[6] = _DRM_STAT_IRQ;
 +      dev->types[7] = _DRM_STAT_PRIMARY;
 +      dev->types[8] = _DRM_STAT_SECONDARY;
 +      dev->types[9] = _DRM_STAT_DMA;
 +
 +      if (IS_MOBILE(dev) || IS_I9XX(dev))
 +              dev_priv->cursor_needs_physical = true;
 +      else
 +              dev_priv->cursor_needs_physical = false;
 +
 +      if (IS_I965G(dev) || IS_G33(dev))
 +              dev_priv->cursor_needs_physical = false;
 +
 +      if (IS_I9XX(dev))
 +              pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
 +
 +      if (IS_I9XX(dev)) {
 +              dev_priv->mmiobase = drm_get_resource_start(dev, 0);
 +              dev_priv->mmiolen = drm_get_resource_len(dev, 0);
 +              dev->mode_config.fb_base =
 +                      drm_get_resource_start(dev, 2) & 0xff000000;
 +      } else if (drm_get_resource_start(dev, 1)) {
 +              dev_priv->mmiobase = drm_get_resource_start(dev, 1);
 +              dev_priv->mmiolen = drm_get_resource_len(dev, 1);
 +              dev->mode_config.fb_base =
 +                      drm_get_resource_start(dev, 0) & 0xff000000;
 +      } else {
 +              DRM_ERROR("Unable to find MMIO registers\n");
 +              ret = -ENODEV;
 +              goto free_priv;
 +      }
 +
 +      DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
 +
 +      ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
 +                       _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER,
 +                       &dev_priv->mmio_map);
 +      if (ret != 0) {
 +              DRM_ERROR("Cannot add mapping for MMIO registers\n");
 +              goto free_priv;
 +      }
 +
-       i915_cleanup_hwstatus(dev);
++      i915_gem_load(dev);
 +
 +#ifdef __linux__
 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 +      intel_init_chipset_flush_compat(dev);
 +#endif
 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
 +      intel_opregion_init(dev);
 +#endif
 +#endif
 +
 +      tmp = I915_READ(PIPEASTAT);
 +      I915_WRITE(PIPEASTAT, tmp);
 +      tmp = I915_READ(PIPEBSTAT);
 +      I915_WRITE(PIPEBSTAT, tmp);
 +
 +      atomic_set(&dev_priv->irq_received, 0);
 +      I915_WRITE(HWSTAM, 0xeffe);
 +      I915_WRITE(IMR, 0x0);
 +      I915_WRITE(IER, 0x0);
 +
 +      DRM_SPININIT(&dev_priv->swaps_lock, "swap");
 +      INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
 +      dev_priv->swaps_pending = 0;
 +
 +      DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
 +      dev_priv->user_irq_refcount = 0;
 +      dev_priv->irq_mask_reg = ~0;
 +
 +      ret = drm_vblank_init(dev, num_pipes);
 +      if (ret)
 +              goto out_rmmap;
 +
 +      ret = drm_hotplug_init(dev);
 +      if (ret)
 +              goto out_rmmap;
 +
 +      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 +      dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 +
 +      i915_enable_interrupt(dev);
 +      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 +
 +      /*
 +       * Initialize the hardware status page IRQ location.
 +       */
 +
 +      I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
 +
 +      if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              ret = i915_load_modeset_init(dev);
 +              if (ret < 0) {
 +                      DRM_ERROR("failed to init modeset\n");
 +                      goto out_rmmap;
 +              }
 +      }
 +
 +      return 0;
 +
 +out_rmmap:
 +      drm_rmmap(dev, dev_priv->mmio_map);
 +free_priv:
 +      drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
 +      return ret;
 +}
 +
 +int i915_driver_unload(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      u32 temp;
 +
 +      dev_priv->vblank_pipe = 0;
 +
 +      dev_priv->irq_enabled = 0;
 +
 +      I915_WRITE(HWSTAM, 0xffffffff);
 +      I915_WRITE(IMR, 0xffffffff);
 +      I915_WRITE(IER, 0x0);
 +
 +      temp = I915_READ(PIPEASTAT);
 +      I915_WRITE(PIPEASTAT, temp);
 +      temp = I915_READ(PIPEBSTAT);
 +      I915_WRITE(PIPEBSTAT, temp);
 +      temp = I915_READ(IIR);
 +      I915_WRITE(IIR, temp);
 +
 +      I915_WRITE(PRB0_CTL, 0);
 +
 +      if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              drm_irq_uninstall(dev);
 +              intel_modeset_cleanup(dev);
 +              destroy_workqueue(dev_priv->wq);
 +      }
 +
 +#if 0
 +      if (dev_priv->ring.virtual_start) {
 +              drm_core_ioremapfree(&dev_priv->ring.map, dev);
 +      }
 +#endif
 +
 +#ifdef DRI2
 +      if (dev_priv->sarea_kmap.virtual) {
 +              drm_bo_kunmap(&dev_priv->sarea_kmap);
 +              dev_priv->sarea_kmap.virtual = NULL;
 +              dev->sigdata.lock = NULL;
 +      }
 +
 +      if (dev_priv->sarea_bo) {
 +              mutex_lock(&dev->struct_mutex);
 +              drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
 +              mutex_unlock(&dev->struct_mutex);
 +              dev_priv->sarea_bo = NULL;
 +      }
 +#endif
-               drm_memrange_takedown(&dev_priv->vram);
 +
 +      if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              mutex_lock(&dev->struct_mutex);
 +              i915_gem_cleanup_ringbuffer(dev);
 +              mutex_unlock(&dev->struct_mutex);
++              drm_mm_takedown(&dev_priv->vram);
 +              i915_gem_lastclose(dev);
 +      }
 +
 +        drm_rmmap(dev, dev_priv->mmio_map);
 +
 +#ifdef __linux__
 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
 +      intel_opregion_free(dev);
 +#endif
 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 +      intel_fini_chipset_flush_compat(dev);
 +#endif
 +#endif
 +
 +      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
 +
 +      dev->dev_private = NULL;
 +      return 0;
 +}
 +
 +int i915_master_create(struct drm_device *dev, struct drm_master *master)
 +{
 +      struct drm_i915_master_private *master_priv;
 +      unsigned long sareapage;
 +      int ret;
 +
 +      master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
 +      if (!master_priv)
 +              return -ENOMEM;
 +
 +      /* prebuild the SAREA */
 +      sareapage = max(SAREA_MAX, PAGE_SIZE);
 +      ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
 +                       &master_priv->sarea);
 +      if (ret) {
 +              DRM_ERROR("SAREA setup failed\n");
 +              return ret;
 +      }
 +      master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
 +      master_priv->sarea_priv->pf_current_page = 0;
 +
 +      master->driver_priv = master_priv;
 +      return 0;
 +}
 +
 +void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
 +{
 +      struct drm_i915_master_private *master_priv = master->driver_priv;
 +
 +      if (!master_priv)
 +              return;
 +
 +      if (master_priv->sarea)
 +              drm_rmmap(dev, master_priv->sarea);
 +              
 +      drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
 +
 +      master->driver_priv = NULL;
 +}
 +
 +int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 +{
 +      struct drm_i915_file_private *i915_file_priv;
 +
 +      DRM_DEBUG("\n");
 +      i915_file_priv = (struct drm_i915_file_private *)
 +          drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
 +
 +      if (!i915_file_priv)
 +              return -ENOMEM;
 +
 +      file_priv->driver_priv = i915_file_priv;
 +
 +      i915_file_priv->mm.last_gem_seqno = 0;
 +      i915_file_priv->mm.last_gem_throttle_seqno = 0;
 +
 +      return 0;
 +}
 +
 +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 +{
 +      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 +
 +      drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
 +}
 +
 +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 +{
 +        struct drm_i915_private *dev_priv = dev->dev_private;
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 +}
 +
 +void i915_driver_lastclose(struct drm_device * dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (drm_core_check_feature(dev, DRIVER_MODESET))
 +              return;
 +
 +#ifdef I915_HAVE_BUFFER
 +      if (dev_priv->val_bufs) {
 +              vfree(dev_priv->val_bufs);
 +              dev_priv->val_bufs = NULL;
 +      }
 +#endif
 +
 +      i915_gem_lastclose(dev);
 +
 +      if (dev_priv->agp_heap)
 +              i915_mem_takedown(&(dev_priv->agp_heap));
 +
 +#if defined(DRI2)
 +      if (dev_priv->sarea_kmap.virtual) {
 +              drm_bo_kunmap(&dev_priv->sarea_kmap);
 +              dev_priv->sarea_kmap.virtual = NULL;
 +              dev->control->master->lock.hw_lock = NULL;
 +              dev->sigdata.lock = NULL;
 +      }
 +
 +      if (dev_priv->sarea_bo) {
 +              mutex_lock(&dev->struct_mutex);
 +              drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
 +              mutex_unlock(&dev->struct_mutex);
 +              dev_priv->sarea_bo = NULL;
 +      }
 +#endif
 +      
 +      i915_dma_cleanup(dev);
 +}
 +
 +int i915_driver_firstopen(struct drm_device *dev)
 +{
 +      if (drm_core_check_feature(dev, DRIVER_MODESET))
 +              return 0;
 +#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
 +      drm_bo_driver_init(dev);
 +#endif
 +      return 0;
 +}
@@@ -819,81 -722,17 +819,76 @@@ void i915_disable_vblank(struct drm_dev
  
  void i915_enable_interrupt (struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 +      struct drm_connector *o;
 +      
 +      dev_priv->irq_mask_reg &= ~0;
  
 -      dev_priv->irq_mask_reg = ~0;
 -      I915_WRITE(IMR, dev_priv->irq_mask_reg);
 -      I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
 -      (void) I915_READ (IER);
 +      if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
 +              if (dev->mode_config.num_connector)
 +                      dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
 +      } else {
 +              if (dev->mode_config.num_connector)
 +                      dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 +
 +              /* Enable global interrupts for hotplug - not a pipeA event */
 +              I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
 +                         PIPE_HOTPLUG_INTERRUPT_ENABLE |
 +                         PIPE_HOTPLUG_TV_INTERRUPT_ENABLE |
 +                         PIPE_HOTPLUG_TV_INTERRUPT_STATUS |
 +                         PIPE_HOTPLUG_INTERRUPT_STATUS);
 +      }
 +
 +      if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
 +          !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
 +              u32 temp = 0;
 +
 +              if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
 +                      temp = I915_READ(PORT_HOTPLUG_EN);
 +
 +                      /* Activate the CRT */
 +                      temp |= CRT_HOTPLUG_INT_EN;
 +              }
 +
 +              if (IS_I9XX(dev)) {
 +                      /* SDVOB */
 +                      o = intel_sdvo_find(dev, 1);
 +                      if (o && intel_sdvo_supports_hotplug(o)) {
 +                              intel_sdvo_set_hotplug(o, 1);
 +                              temp |= SDVOB_HOTPLUG_INT_EN;
 +                      }
 +
 +                      /* SDVOC */
 +                      o = intel_sdvo_find(dev, 0);
 +                      if (o && intel_sdvo_supports_hotplug(o)) {
 +                              intel_sdvo_set_hotplug(o, 1);
 +                              temp |= SDVOC_HOTPLUG_INT_EN;
 +                      }
 +
 +                      I915_WRITE(SDVOB, I915_READ(SDVOB) | SDVO_INTERRUPT_ENABLE);
 +                      I915_WRITE(SDVOC, I915_READ(SDVOC) | SDVO_INTERRUPT_ENABLE);
 +
 +                      /* TV */
 +                      I915_WRITE(TV_DAC, I915_READ(TV_DAC) | TVDAC_STATE_CHG_EN);
 +              } else {
 +                      /* DVO ???? */
 +              }
 +
 +              if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
 +                      I915_WRITE(PORT_HOTPLUG_EN, temp);
 +
 +                      DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
 +                      DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
 +
 +                      I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 +              }
 +      }
  
  #ifdef __linux__
 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
        opregion_enable_asle(dev);
  #endif
 -
 +#endif
-       I915_WRITE(IMR, dev_priv->irq_mask_reg);
-       I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
-       (void) I915_READ (IER);
        dev_priv->irq_enabled = 1;
  }