Merge remote-tracking branch 'airlied/drm-fixes' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Feb 2012 15:52:55 +0000 (16:52 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Feb 2012 16:14:49 +0000 (17:14 +0100)
Back-merge from drm-fixes into drm-intel-next to sort out two things:

- interlaced support: -fixes contains a bugfix to correctly clear
  interlaced configuration bits in case the bios sets up an interlaced
  mode and we want to set up the progressive mode (current kernels
  don't support interlaced). The actual feature work to support
  interlaced depends upon (and conflicts with) this bugfix.

- forcewake voodoo to workaround missed IRQ issues: -fixes only enabled
  this for ivybridge, but some recent bug reports indicate that we
  need this on Sandybridge, too. But in a slightly different flavour
  and with other fixes and reworks on top. Additionally there are some
  forcewake cleanup patches heading to -next that would conflict with
  currrent -fixes.

Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
1  2 
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c

index 5d24581452ebcdadd6878c35b9f25221443fd477,deaa657292b45b910a81cd9a018baf81c1fce6dc..ae73288a9699cbb7c02915680e656fced951842c
@@@ -83,7 -83,6 +83,7 @@@ static int i915_capabilities(struct seq
        B(supports_tv);
        B(has_bsd_ring);
        B(has_blt_ring);
 +      B(has_llc);
  #undef B
  
        return 0;
@@@ -122,11 -121,11 +122,11 @@@ static const char *cache_level_str(int 
  static void
  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  {
-       seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+       seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
-                  obj->base.size,
+                  obj->base.size / 1024,
                   obj->base.read_domains,
                   obj->base.write_domain,
                   obj->last_rendering_seqno,
@@@ -564,6 -563,45 +564,6 @@@ static int i915_hws_info(struct seq_fil
        return 0;
  }
  
 -static void i915_dump_object(struct seq_file *m,
 -                           struct io_mapping *mapping,
 -                           struct drm_i915_gem_object *obj)
 -{
 -      int page, page_count, i;
 -
 -      page_count = obj->base.size / PAGE_SIZE;
 -      for (page = 0; page < page_count; page++) {
 -              u32 *mem = io_mapping_map_wc(mapping,
 -                                           obj->gtt_offset + page * PAGE_SIZE);
 -              for (i = 0; i < PAGE_SIZE; i += 4)
 -                      seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 -              io_mapping_unmap(mem);
 -      }
 -}
 -
 -static int i915_batchbuffer_info(struct seq_file *m, void *data)
 -{
 -      struct drm_info_node *node = (struct drm_info_node *) m->private;
 -      struct drm_device *dev = node->minor->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_object *obj;
 -      int ret;
 -
 -      ret = mutex_lock_interruptible(&dev->struct_mutex);
 -      if (ret)
 -              return ret;
 -
 -      list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 -              if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
 -                  seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
 -                  i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 -              }
 -      }
 -
 -      mutex_unlock(&dev->struct_mutex);
 -      return 0;
 -}
 -
  static int i915_ringbuffer_data(struct seq_file *m, void *data)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@@ -615,7 -653,7 +615,7 @@@ static int i915_ringbuffer_info(struct 
        seq_printf(m, "  Size :    %08x\n", ring->size);
        seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
        seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
                seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
                seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
        }
  static const char *ring_str(int ring)
  {
        switch (ring) {
 -      case RING_RENDER: return " render";
 -      case RING_BSD: return " bsd";
 -      case RING_BLT: return " blt";
 +      case RCS: return "render";
 +      case VCS: return "bsd";
 +      case BCS: return "blt";
        default: return "";
        }
  }
@@@ -675,7 -713,7 +675,7 @@@ static void print_error_buffers(struct 
        seq_printf(m, "%s [%d]:\n", name, count);
  
        while (count--) {
 -              seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
 +              seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
                           err->gtt_offset,
                           err->size,
                           err->read_domains,
                           tiling_flag(err->tiling),
                           dirty_flag(err->dirty),
                           purgeable_flag(err->purgeable),
 +                         err->ring != -1 ? " " : "",
                           ring_str(err->ring),
                           cache_level_str(err->cache_level));
  
        }
  }
  
 +static void i915_ring_error_state(struct seq_file *m,
 +                                struct drm_device *dev,
 +                                struct drm_i915_error_state *error,
 +                                unsigned ring)
 +{
 +      seq_printf(m, "%s command stream:\n", ring_str(ring));
 +      seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
 +      seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
 +      seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
 +      seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
 +      seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
 +      seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 +      if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
 +              seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 +              seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
 +      }
 +      if (INTEL_INFO(dev)->gen >= 4)
 +              seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 +      seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
 +      if (INTEL_INFO(dev)->gen >= 6) {
 +              seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 +              seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
 +              seq_printf(m, "  SYNC_0: 0x%08x\n",
 +                         error->semaphore_mboxes[ring][0]);
 +              seq_printf(m, "  SYNC_1: 0x%08x\n",
 +                         error->semaphore_mboxes[ring][1]);
 +      }
 +      seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
 +      seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 +      seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 +}
 +
  static int i915_error_state(struct seq_file *m, void *unused)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 +
 +      for (i = 0; i < dev_priv->num_fence_regs; i++)
 +              seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 +
        if (INTEL_INFO(dev)->gen >= 6) {
                seq_printf(m, "ERROR: 0x%08x\n", error->error);
 -              seq_printf(m, "Blitter command stream:\n");
 -              seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
 -              seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
 -              seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
 -              seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
 -              seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
 -              seq_printf(m, "Video (BSD) command stream:\n");
 -              seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
 -              seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
 -              seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
 -              seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
 -              seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
 -      }
 -      seq_printf(m, "Render command stream:\n");
 -      seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 -      seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
 -      seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 -      seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
 -      if (INTEL_INFO(dev)->gen >= 4) {
 -              seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 -              seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 +              seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
 -      seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
 -      seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
  
 -      for (i = 0; i < dev_priv->num_fence_regs; i++)
 -              seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 +      i915_ring_error_state(m, dev, error, RCS);
 +      if (HAS_BLT(dev))
 +              i915_ring_error_state(m, dev, error, BCS);
 +      if (HAS_BSD(dev))
 +              i915_ring_error_state(m, dev, error, VCS);
  
        if (error->active_bo)
                print_error_buffers(m, "Active",
@@@ -1055,6 -1075,7 +1055,7 @@@ static int gen6_drpc_info(struct seq_fi
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rpmodectl1, gt_core_status, rcctl1;
+       unsigned forcewake_count;
        int count=0, ret;
  
  
        if (ret)
                return ret;
  
-       if (atomic_read(&dev_priv->forcewake_count)) {
-               seq_printf(m, "RC information inaccurate because userspace "
-                             "holds a reference \n");
+       spin_lock_irq(&dev_priv->gt_lock);
+       forcewake_count = dev_priv->forcewake_count;
+       spin_unlock_irq(&dev_priv->gt_lock);
+       if (forcewake_count) {
+               seq_printf(m, "RC information inaccurate because somebody "
+                             "holds a forcewake reference \n");
        } else {
                /* NB: we cannot use forcewake, else we read the wrong values */
                while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
        seq_printf(m, "SW control enabled: %s\n",
                   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
                          GEN6_RP_MEDIA_SW_MODE));
-       seq_printf(m, "RC6 Enabled: %s\n",
+       seq_printf(m, "RC1e Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
@@@ -1378,115 -1403,20 +1383,119 @@@ static int i915_gen6_forcewake_count_in
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned forcewake_count;
+       spin_lock_irq(&dev_priv->gt_lock);
+       forcewake_count = dev_priv->forcewake_count;
+       spin_unlock_irq(&dev_priv->gt_lock);
  
-       seq_printf(m, "forcewake count = %d\n",
-                  atomic_read(&dev_priv->forcewake_count));
+       seq_printf(m, "forcewake count = %u\n", forcewake_count);
  
        return 0;
  }
  
 +static const char *swizzle_string(unsigned swizzle)
 +{
 +      switch(swizzle) {
 +      case I915_BIT_6_SWIZZLE_NONE:
 +              return "none";
 +      case I915_BIT_6_SWIZZLE_9:
 +              return "bit9";
 +      case I915_BIT_6_SWIZZLE_9_10:
 +              return "bit9/bit10";
 +      case I915_BIT_6_SWIZZLE_9_11:
 +              return "bit9/bit11";
 +      case I915_BIT_6_SWIZZLE_9_10_11:
 +              return "bit9/bit10/bit11";
 +      case I915_BIT_6_SWIZZLE_9_17:
 +              return "bit9/bit17";
 +      case I915_BIT_6_SWIZZLE_9_10_17:
 +              return "bit9/bit10/bit17";
 +      case I915_BIT_6_SWIZZLE_UNKNOWN:
 +              return "unkown";
 +      }
 +
 +      return "bug";
 +}
 +
 +static int i915_swizzle_info(struct seq_file *m, void *data)
 +{
 +      struct drm_info_node *node = (struct drm_info_node *) m->private;
 +      struct drm_device *dev = node->minor->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
 +                 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
 +      seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
 +                 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 +
 +      if (IS_GEN3(dev) || IS_GEN4(dev)) {
 +              seq_printf(m, "DDC = 0x%08x\n",
 +                         I915_READ(DCC));
 +              seq_printf(m, "C0DRB3 = 0x%04x\n",
 +                         I915_READ16(C0DRB3));
 +              seq_printf(m, "C1DRB3 = 0x%04x\n",
 +                         I915_READ16(C1DRB3));
 +      } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
 +              seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 +                         I915_READ(MAD_DIMM_C0));
 +              seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
 +                         I915_READ(MAD_DIMM_C1));
 +              seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
 +                         I915_READ(MAD_DIMM_C2));
 +              seq_printf(m, "TILECTL = 0x%08x\n",
 +                         I915_READ(TILECTL));
 +              seq_printf(m, "ARB_MODE = 0x%08x\n",
 +                         I915_READ(ARB_MODE));
 +              seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 +                         I915_READ(DISP_ARB_CTL));
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
 +static int i915_ppgtt_info(struct seq_file *m, void *data)
 +{
 +      struct drm_info_node *node = (struct drm_info_node *) m->private;
 +      struct drm_device *dev = node->minor->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_ring_buffer *ring;
 +      int i, ret;
 +
 +
 +      ret = mutex_lock_interruptible(&dev->struct_mutex);
 +      if (ret)
 +              return ret;
 +      if (INTEL_INFO(dev)->gen == 6)
 +              seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 +
 +      for (i = 0; i < I915_NUM_RINGS; i++) {
 +              ring = &dev_priv->ring[i];
 +
 +              seq_printf(m, "%s\n", ring->name);
 +              if (INTEL_INFO(dev)->gen == 7)
 +                      seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
 +              seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
 +              seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
 +              seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
 +      }
 +      if (dev_priv->mm.aliasing_ppgtt) {
 +              struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 +
 +              seq_printf(m, "aliasing PPGTT:\n");
 +              seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
 +      }
 +      seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
  static int
 -i915_wedged_open(struct inode *inode,
 -               struct file *filp)
 +i915_debugfs_common_open(struct inode *inode,
 +                       struct file *filp)
  {
        filp->private_data = inode->i_private;
        return 0;
@@@ -1542,12 -1472,20 +1551,12 @@@ i915_wedged_write(struct file *filp
  
  static const struct file_operations i915_wedged_fops = {
        .owner = THIS_MODULE,
 -      .open = i915_wedged_open,
 +      .open = i915_debugfs_common_open,
        .read = i915_wedged_read,
        .write = i915_wedged_write,
        .llseek = default_llseek,
  };
  
 -static int
 -i915_max_freq_open(struct inode *inode,
 -                 struct file *filp)
 -{
 -      filp->private_data = inode->i_private;
 -      return 0;
 -}
 -
  static ssize_t
  i915_max_freq_read(struct file *filp,
                   char __user *ubuf,
@@@ -1604,12 -1542,20 +1613,12 @@@ i915_max_freq_write(struct file *filp
  
  static const struct file_operations i915_max_freq_fops = {
        .owner = THIS_MODULE,
 -      .open = i915_max_freq_open,
 +      .open = i915_debugfs_common_open,
        .read = i915_max_freq_read,
        .write = i915_max_freq_write,
        .llseek = default_llseek,
  };
  
 -static int
 -i915_cache_sharing_open(struct inode *inode,
 -                 struct file *filp)
 -{
 -      filp->private_data = inode->i_private;
 -      return 0;
 -}
 -
  static ssize_t
  i915_cache_sharing_read(struct file *filp,
                   char __user *ubuf,
@@@ -1675,7 -1621,7 +1684,7 @@@ i915_cache_sharing_write(struct file *f
  
  static const struct file_operations i915_cache_sharing_fops = {
        .owner = THIS_MODULE,
 -      .open = i915_cache_sharing_open,
 +      .open = i915_debugfs_common_open,
        .read = i915_cache_sharing_read,
        .write = i915_cache_sharing_write,
        .llseek = default_llseek,
@@@ -1707,13 -1653,28 +1716,13 @@@ drm_add_fake_info_node(struct drm_mino
        return 0;
  }
  
 -static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 -{
 -      struct drm_device *dev = minor->dev;
 -      struct dentry *ent;
 -
 -      ent = debugfs_create_file("i915_wedged",
 -                                S_IRUGO | S_IWUSR,
 -                                root, dev,
 -                                &i915_wedged_fops);
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 -
 -      return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
 -}
 -
  static int i915_forcewake_open(struct inode *inode, struct file *file)
  {
        struct drm_device *dev = inode->i_private;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       if (!IS_GEN6(dev))
+       if (INTEL_INFO(dev)->gen < 6)
                return 0;
  
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@@ -1730,7 -1691,7 +1739,7 @@@ int i915_forcewake_release(struct inod
        struct drm_device *dev = inode->i_private;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       if (!IS_GEN6(dev))
+       if (INTEL_INFO(dev)->gen < 6)
                return 0;
  
        /*
@@@ -1768,22 -1729,34 +1777,22 @@@ static int i915_forcewake_create(struc
        return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
  }
  
 -static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
 +static int i915_debugfs_create(struct dentry *root,
 +                             struct drm_minor *minor,
 +                             const char *name,
 +                             const struct file_operations *fops)
  {
        struct drm_device *dev = minor->dev;
        struct dentry *ent;
  
 -      ent = debugfs_create_file("i915_max_freq",
 +      ent = debugfs_create_file(name,
                                  S_IRUGO | S_IWUSR,
                                  root, dev,
 -                                &i915_max_freq_fops);
 +                                fops);
        if (IS_ERR(ent))
                return PTR_ERR(ent);
  
 -      return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
 -}
 -
 -static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
 -{
 -      struct drm_device *dev = minor->dev;
 -      struct dentry *ent;
 -
 -      ent = debugfs_create_file("i915_cache_sharing",
 -                                S_IRUGO | S_IWUSR,
 -                                root, dev,
 -                                &i915_cache_sharing_fops);
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 -
 -      return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
 +      return drm_add_fake_info_node(minor, ent, fops);
  }
  
  static struct drm_info_list i915_debugfs_list[] = {
        {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
        {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
        {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
 -      {"i915_batchbuffers", i915_batchbuffer_info, 0},
        {"i915_error_state", i915_error_state, 0},
        {"i915_rstdby_delays", i915_rstdby_delays, 0},
        {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
        {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
        {"i915_context_status", i915_context_status, 0},
        {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
 +      {"i915_swizzle_info", i915_swizzle_info, 0},
 +      {"i915_ppgtt_info", i915_ppgtt_info, 0},
  };
  #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  
@@@ -1833,25 -1805,17 +1842,25 @@@ int i915_debugfs_init(struct drm_minor 
  {
        int ret;
  
 -      ret = i915_wedged_create(minor->debugfs_root, minor);
 +      ret = i915_debugfs_create(minor->debugfs_root, minor,
 +                                "i915_wedged",
 +                                &i915_wedged_fops);
        if (ret)
                return ret;
  
        ret = i915_forcewake_create(minor->debugfs_root, minor);
        if (ret)
                return ret;
 -      ret = i915_max_freq_create(minor->debugfs_root, minor);
 +
 +      ret = i915_debugfs_create(minor->debugfs_root, minor,
 +                                "i915_max_freq",
 +                                &i915_max_freq_fops);
        if (ret)
                return ret;
 -      ret = i915_cache_sharing_create(minor->debugfs_root, minor);
 +
 +      ret = i915_debugfs_create(minor->debugfs_root, minor,
 +                                "i915_cache_sharing",
 +                                &i915_cache_sharing_fops);
        if (ret)
                return ret;
  
index 40bfafa13b7233b28a29c04a12909d7e090e0d5f,ddfe3d902b2a3a5d908b0c7664a4348f94ce3988..38dfcf91f400332af1ea9641699d375d9384c33f
@@@ -784,9 -784,6 +784,9 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_HAS_GEN7_SOL_RESET:
                value = 1;
                break;
 +      case I915_PARAM_HAS_LLC:
 +              value = HAS_LLC(dev);
 +              break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@@ -1196,39 -1193,22 +1196,39 @@@ static int i915_load_gem_init(struct dr
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
  
 -      /* Let GEM Manage all of the aperture.
 -       *
 -       * However, leave one page at the end still bound to the scratch page.
 -       * There are a number of places where the hardware apparently
 -       * prefetches past the end of the object, and we've seen multiple
 -       * hangs with the GPU head pointer stuck in a batchbuffer bound
 -       * at the last page of the aperture.  One page should be enough to
 -       * keep any prefetching inside of the aperture.
 -       */
 -      i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 +      if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
 +              /* PPGTT pdes are stolen from global gtt ptes, so shrink the
 +               * aperture accordingly when using aliasing ppgtt. */
 +              gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
 +              /* For paranoia keep the guard page in between. */
 +              gtt_size -= PAGE_SIZE;
 +
 +              i915_gem_do_init(dev, 0, mappable_size, gtt_size);
 +
 +              ret = i915_gem_init_aliasing_ppgtt(dev);
 +              if (ret)
 +                      return ret;
 +      } else {
 +              /* Let GEM Manage all of the aperture.
 +               *
 +               * However, leave one page at the end still bound to the scratch
 +               * page.  There are a number of places where the hardware
 +               * apparently prefetches past the end of the object, and we've
 +               * seen multiple hangs with the GPU head pointer stuck in a
 +               * batchbuffer bound at the last page of the aperture.  One page
 +               * should be enough to keep any prefetching inside of the
 +               * aperture.
 +               */
 +              i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 +      }
  
        mutex_lock(&dev->struct_mutex);
 -      ret = i915_gem_init_ringbuffer(dev);
 +      ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
 -      if (ret)
 +      if (ret) {
 +              i915_gem_cleanup_aliasing_ppgtt(dev);
                return ret;
 +      }
  
        /* Try to set up FBC with a reasonable compressed buffer size */
        if (I915_HAS_FBC(dev) && i915_powersave) {
@@@ -1315,7 -1295,6 +1315,7 @@@ cleanup_gem
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
        mutex_unlock(&dev->struct_mutex);
 +      i915_gem_cleanup_aliasing_ppgtt(dev);
  cleanup_vga_switcheroo:
        vga_switcheroo_unregister_client(dev->pdev);
  cleanup_vga_client:
@@@ -2066,6 -2045,7 +2066,7 @@@ int i915_driver_load(struct drm_device 
        if (!IS_I945G(dev) && !IS_I945GM(dev))
                pci_enable_msi(dev->pdev);
  
+       spin_lock_init(&dev_priv->gt_lock);
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->error_lock);
        spin_lock_init(&dev_priv->rps_lock);
@@@ -2149,7 -2129,7 +2150,7 @@@ int i915_driver_unload(struct drm_devic
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  
        mutex_lock(&dev->struct_mutex);
 -      ret = i915_gpu_idle(dev);
 +      ret = i915_gpu_idle(dev, true);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
        mutex_unlock(&dev->struct_mutex);
                i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
 +              i915_gem_cleanup_aliasing_ppgtt(dev);
                if (I915_HAS_FBC(dev) && i915_powersave)
                        i915_cleanup_compression(dev);
                drm_mm_takedown(&dev_priv->mm.stolen);
@@@ -2268,12 -2247,18 +2269,12 @@@ void i915_driver_lastclose(struct drm_d
  
        i915_gem_lastclose(dev);
  
 -      if (dev_priv->agp_heap)
 -              i915_mem_takedown(&(dev_priv->agp_heap));
 -
        i915_dma_cleanup(dev);
  }
  
  void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        i915_gem_release(dev, file_priv);
 -      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 -              i915_mem_release(dev, file_priv, dev_priv->agp_heap);
  }
  
  void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@@ -2292,11 -2277,11 +2293,11 @@@ struct drm_ioctl_desc i915_ioctls[] = 
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 -      DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
 -      DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
 -      DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 +      DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
 +      DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
 +      DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
 -      DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 +      DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
index d92c92dea4ec05df455fac3470f5e43ae05147ab,308f819135626c6b9c8d3805c953bd7a9ddf55dc..189041984aba1b3db287778f881c905703a3358f
@@@ -103,11 -103,6 +103,11 @@@ MODULE_PARM_DESC(enable_hangcheck
                "WARNING: Disabling this can cause system wide hangs. "
                "(default: true)");
  
 +bool i915_enable_ppgtt __read_mostly = 1;
 +module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
 +MODULE_PARM_DESC(i915_enable_ppgtt,
 +              "Enable PPGTT (default: true)");
 +
  static struct drm_driver driver;
  extern int intel_agp_enabled;
  
@@@ -203,7 -198,7 +203,7 @@@ static const struct intel_device_info i
  
  static const struct intel_device_info intel_ironlake_d_info = {
        .gen = 5,
 -      .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
 +      .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
  };
  
@@@ -219,7 -214,6 +219,7 @@@ static const struct intel_device_info i
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
 +      .has_llc = 1,
  };
  
  static const struct intel_device_info intel_sandybridge_m_info = {
        .has_fbc = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
 +      .has_llc = 1,
  };
  
  static const struct intel_device_info intel_ivybridge_d_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
 +      .has_llc = 1,
  };
  
  static const struct intel_device_info intel_ivybridge_m_info = {
        .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
 +      .has_llc = 1,
  };
  
  static const struct pci_device_id pciidlist[] = {             /* aka */
@@@ -377,11 -368,12 +377,12 @@@ void __gen6_gt_force_wake_mt_get(struc
   */
  void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  {
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       unsigned long irqflags;
  
-       /* Forcewake is atomic in case we get in here without the lock */
-       if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+       if (dev_priv->forcewake_count++ == 0)
                dev_priv->display.force_wake_get(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
  }
  
  void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
@@@ -401,10 -393,12 +402,12 @@@ void __gen6_gt_force_wake_mt_put(struc
   */
  void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  {
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       unsigned long irqflags;
  
-       if (atomic_dec_and_test(&dev_priv->forcewake_count))
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+       if (--dev_priv->forcewake_count == 0)
                dev_priv->display.force_wake_put(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
  }
  
  void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@@ -500,7 -494,7 +503,7 @@@ static int i915_drm_thaw(struct drm_dev
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.suspended = 0;
  
 -              error = i915_gem_init_ringbuffer(dev);
 +              error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
  
                if (HAS_PCH_SPLIT(dev))
@@@ -606,13 -600,40 +609,40 @@@ static int ironlake_do_reset(struct drm
  static int gen6_do_reset(struct drm_device *dev, u8 flags)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int     ret;
+       unsigned long irqflags;
  
-       I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
-       return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+       /* Hold gt_lock across reset to prevent any register access
+        * with forcewake not set correctly
+        */
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+       /* Reset the chip */
+       /* GEN6_GDRST is not in the gt power well, no need to check
+        * for fifo space for the write or forcewake the chip for
+        * the read
+        */
+       I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+       /* Spin waiting for the device to ack the reset request */
+       ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+       /* If reset with a user forcewake, try to restore, otherwise turn it off */
+       if (dev_priv->forcewake_count)
+               dev_priv->display.force_wake_get(dev_priv);
+       else
+               dev_priv->display.force_wake_put(dev_priv);
+       /* Restore fifo count */
+       dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+       return ret;
  }
  
  /**
 - * i965_reset - reset chip after a hang
 + * i915_reset - reset chip after a hang
   * @dev: drm device to reset
   * @flags: reset domains
   *
@@@ -652,9 -673,6 +682,6 @@@ int i915_reset(struct drm_device *dev, 
        case 7:
        case 6:
                ret = gen6_do_reset(dev, flags);
-               /* If reset with a user forcewake, try to restore */
-               if (atomic_read(&dev_priv->forcewake_count))
-                       __gen6_gt_force_wake_get(dev_priv);
                break;
        case 5:
                ret = ironlake_do_reset(dev, flags);
                        !dev_priv->mm.suspended) {
                dev_priv->mm.suspended = 0;
  
 +              i915_gem_init_swizzling(dev);
 +
                dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
                if (HAS_BSD(dev))
                    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
                if (HAS_BLT(dev))
                    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
  
 +              i915_gem_init_ppgtt(dev);
 +
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
                drm_mode_config_reset(dev);
@@@ -940,9 -954,14 +967,14 @@@ MODULE_LICENSE("GPL and additional righ
  u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-               gen6_gt_force_wake_get(dev_priv); \
+               unsigned long irqflags; \
+               spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_get(dev_priv); \
                val = read##y(dev_priv->regs + reg); \
-               gen6_gt_force_wake_put(dev_priv); \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_put(dev_priv); \
+               spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
        } else { \
                val = read##y(dev_priv->regs + reg); \
        } \
index 45b609e6b131aa33c588ec571819159d56e8d599,9689ca38b2b333f26c75e95421e773b3331bd81c..922aed33035d01143940227df2a5e77fdc49a5be
@@@ -135,7 -135,6 +135,7 @@@ struct drm_i915_fence_reg 
        struct list_head lru_list;
        struct drm_i915_gem_object *obj;
        uint32_t setup_seqno;
 +      int pin_count;
  };
  
  struct sdvo_device_mapping {
@@@ -153,25 -152,26 +153,25 @@@ struct drm_i915_error_state 
        u32 eir;
        u32 pgtbl_er;
        u32 pipestat[I915_MAX_PIPES];
 -      u32 ipeir;
 -      u32 ipehr;
 -      u32 instdone;
 -      u32 acthd;
 +      u32 tail[I915_NUM_RINGS];
 +      u32 head[I915_NUM_RINGS];
 +      u32 ipeir[I915_NUM_RINGS];
 +      u32 ipehr[I915_NUM_RINGS];
 +      u32 instdone[I915_NUM_RINGS];
 +      u32 acthd[I915_NUM_RINGS];
 +      u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
 +      /* our own tracking of ring head and tail */
 +      u32 cpu_ring_head[I915_NUM_RINGS];
 +      u32 cpu_ring_tail[I915_NUM_RINGS];
        u32 error; /* gen6+ */
 -      u32 bcs_acthd; /* gen6+ blt engine */
 -      u32 bcs_ipehr;
 -      u32 bcs_ipeir;
 -      u32 bcs_instdone;
 -      u32 bcs_seqno;
 -      u32 vcs_acthd; /* gen6+ bsd engine */
 -      u32 vcs_ipehr;
 -      u32 vcs_ipeir;
 -      u32 vcs_instdone;
 -      u32 vcs_seqno;
 -      u32 instpm;
 -      u32 instps;
 +      u32 instpm[I915_NUM_RINGS];
 +      u32 instps[I915_NUM_RINGS];
        u32 instdone1;
 -      u32 seqno;
 +      u32 seqno[I915_NUM_RINGS];
        u64 bbaddr;
 +      u32 fault_reg[I915_NUM_RINGS];
 +      u32 done_reg;
 +      u32 faddr[I915_NUM_RINGS];
        u64 fence[I915_MAX_NUM_FENCES];
        struct timeval time;
        struct drm_i915_error_object {
@@@ -255,17 -255,6 +255,17 @@@ struct intel_device_info 
        u8 supports_tv:1;
        u8 has_bsd_ring:1;
        u8 has_blt_ring:1;
 +      u8 has_llc:1;
 +};
 +
 +#define I915_PPGTT_PD_ENTRIES 512
 +#define I915_PPGTT_PT_ENTRIES 1024
 +struct i915_hw_ppgtt {
 +      unsigned num_pd_entries;
 +      struct page **pt_pages;
 +      uint32_t pd_offset;
 +      dma_addr_t *pt_dma_addr;
 +      dma_addr_t scratch_page_dma_addr;
  };
  
  enum no_fbc_reason {
@@@ -299,7 -288,13 +299,13 @@@ typedef struct drm_i915_private 
        int relative_constants_mode;
  
        void __iomem *regs;
-       u32 gt_fifo_count;
+       /** gt_fifo_count and the subsequent register write are synchronized
+        * with dev->struct_mutex. */
+       unsigned gt_fifo_count;
+       /** forcewake_count is protected by gt_lock */
+       unsigned forcewake_count;
+       /** gt_lock is also taken in irq contexts. */
+       struct spinlock gt_lock;
  
        struct intel_gmbus {
                struct i2c_adapter adapter;
  
        int tex_lru_log_granularity;
        int allow_batchbuffer;
 -      struct mem_block *agp_heap;
        unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
        int vblank_pipe;
        int num_pipe;
                struct io_mapping *gtt_mapping;
                int gtt_mtrr;
  
 +              /** PPGTT used for aliasing the PPGTT with the GTT */
 +              struct i915_hw_ppgtt *aliasing_ppgtt;
 +
                struct shrinker inactive_shrinker;
  
                /**
  
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
-       atomic_t forcewake_count;
  } drm_i915_private_t;
  
  enum i915_cache_level {
@@@ -850,8 -841,6 +854,8 @@@ struct drm_i915_gem_object 
  
        unsigned int cache_level:2;
  
 +      unsigned int has_aliasing_ppgtt_mapping:1;
 +
        struct page **pages;
  
        /**
@@@ -985,11 -974,8 +989,11 @@@ struct drm_i915_file_private 
  
  #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
  #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 +#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
  #define I915_NEED_GFX_HWS(dev)        (INTEL_INFO(dev)->need_gfx_hws)
  
 +#define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >=6)
 +
  #define HAS_OVERLAY(dev)              (INTEL_INFO(dev)->has_overlay)
  #define OVERLAY_NEEDS_PHYSICAL(dev)   (INTEL_INFO(dev)->overlay_needs_physical)
  
@@@ -1032,7 -1018,6 +1036,7 @@@ extern int i915_vbt_sdvo_panel_type __r
  extern int i915_enable_rc6 __read_mostly;
  extern int i915_enable_fbc __read_mostly;
  extern bool i915_enable_hangcheck __read_mostly;
 +extern bool i915_enable_ppgtt __read_mostly;
  
  extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  extern int i915_resume(struct drm_device *dev);
@@@ -1094,6 -1079,18 +1098,6 @@@ extern void i915_destroy_error_state(st
  #endif
  
  
 -/* i915_mem.c */
 -extern int i915_mem_alloc(struct drm_device *dev, void *data,
 -                        struct drm_file *file_priv);
 -extern int i915_mem_free(struct drm_device *dev, void *data,
 -                       struct drm_file *file_priv);
 -extern int i915_mem_init_heap(struct drm_device *dev, void *data,
 -                            struct drm_file *file_priv);
 -extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
 -                               struct drm_file *file_priv);
 -extern void i915_mem_takedown(struct mem_block **heap);
 -extern void i915_mem_release(struct drm_device * dev,
 -                           struct drm_file *file_priv, struct mem_block *heap);
  /* i915_gem.c */
  int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@@ -1184,24 -1181,6 +1188,24 @@@ int __must_check i915_gem_object_get_fe
                                           struct intel_ring_buffer *pipelined);
  int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
  
 +static inline void
 +i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 +{
 +      if (obj->fence_reg != I915_FENCE_REG_NONE) {
 +              struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 +              dev_priv->fence_regs[obj->fence_reg].pin_count++;
 +      }
 +}
 +
 +static inline void
 +i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 +{
 +      if (obj->fence_reg != I915_FENCE_REG_NONE) {
 +              struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 +              dev_priv->fence_regs[obj->fence_reg].pin_count--;
 +      }
 +}
 +
  void i915_gem_retire_requests(struct drm_device *dev);
  void i915_gem_reset(struct drm_device *dev);
  void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@@ -1209,22 -1188,19 +1213,22 @@@ int __must_check i915_gem_object_set_do
                                            uint32_t read_domains,
                                            uint32_t write_domain);
  int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 -int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
 +int __must_check i915_gem_init_hw(struct drm_device *dev);
 +void i915_gem_init_swizzling(struct drm_device *dev);
 +void i915_gem_init_ppgtt(struct drm_device *dev);
  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  void i915_gem_do_init(struct drm_device *dev,
                      unsigned long start,
                      unsigned long mappable_end,
                      unsigned long end);
 -int __must_check i915_gpu_idle(struct drm_device *dev);
 +int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
  int __must_check i915_gem_idle(struct drm_device *dev);
  int __must_check i915_add_request(struct intel_ring_buffer *ring,
                                  struct drm_file *file,
                                  struct drm_i915_gem_request *request);
  int __must_check i915_wait_request(struct intel_ring_buffer *ring,
 -                                 uint32_t seqno);
 +                                 uint32_t seqno,
 +                                 bool do_retire);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  int __must_check
  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@@ -1251,14 -1227,6 +1255,14 @@@ int i915_gem_object_set_cache_level(str
                                    enum i915_cache_level cache_level);
  
  /* i915_gem_gtt.c */
 +int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
 +void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
 +void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 +                          struct drm_i915_gem_object *obj,
 +                          enum i915_cache_level cache_level);
 +void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 +                            struct drm_i915_gem_object *obj);
 +
  void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
  void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
index 9835b2efd93e4e6d2d74c71105a33dbfee8bb3ca,65e1f0043f9df5564d63eb8e95d413f7b5f01513..81687af00893ca5c20edfe2256dd5b98da7fdf45
@@@ -203,9 -203,9 +203,9 @@@ i915_gem_object_set_to_gpu_domain(struc
        cd->invalidate_domains |= invalidate_domains;
        cd->flush_domains |= flush_domains;
        if (flush_domains & I915_GEM_GPU_DOMAINS)
 -              cd->flush_rings |= obj->ring->id;
 +              cd->flush_rings |= intel_ring_flag(obj->ring);
        if (invalidate_domains & I915_GEM_GPU_DOMAINS)
 -              cd->flush_rings |= ring->id;
 +              cd->flush_rings |= intel_ring_flag(ring);
  }
  
  struct eb_objects {
@@@ -287,14 -287,14 +287,14 @@@ i915_gem_execbuffer_relocate_entry(stru
         * exec_object list, so it should have a GTT space bound by now.
         */
        if (unlikely(target_offset == 0)) {
 -              DRM_ERROR("No GTT space found for object %d\n",
 +              DRM_DEBUG("No GTT space found for object %d\n",
                          reloc->target_handle);
                return ret;
        }
  
        /* Validate that the target is in a valid r/w GPU domain */
        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 -              DRM_ERROR("reloc with multiple write domains: "
 +              DRM_DEBUG("reloc with multiple write domains: "
                          "obj %p target %d offset %d "
                          "read %08x write %08x",
                          obj, reloc->target_handle,
                          reloc->write_domain);
                return ret;
        }
 -      if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
 -              DRM_ERROR("reloc with read/write CPU domains: "
 +      if (unlikely((reloc->write_domain | reloc->read_domains)
 +                   & ~I915_GEM_GPU_DOMAINS)) {
 +              DRM_DEBUG("reloc with read/write non-GPU domains: "
                          "obj %p target %d offset %d "
                          "read %08x write %08x",
                          obj, reloc->target_handle,
        }
        if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
                     reloc->write_domain != target_obj->pending_write_domain)) {
 -              DRM_ERROR("Write domain conflict: "
 +              DRM_DEBUG("Write domain conflict: "
                          "obj %p target %d offset %d "
                          "new %08x old %08x\n",
                          obj, reloc->target_handle,
  
        /* Check that the relocation address is valid... */
        if (unlikely(reloc->offset > obj->base.size - 4)) {
 -              DRM_ERROR("Relocation beyond object bounds: "
 +              DRM_DEBUG("Relocation beyond object bounds: "
                          "obj %p target %d offset %d size %d.\n",
                          obj, reloc->target_handle,
                          (int) reloc->offset,
                return ret;
        }
        if (unlikely(reloc->offset & 3)) {
 -              DRM_ERROR("Relocation not 4-byte aligned: "
 +              DRM_DEBUG("Relocation not 4-byte aligned: "
                          "obj %p target %d offset %d.\n",
                          obj, reloc->target_handle,
                          (int) reloc->offset);
@@@ -462,60 -461,11 +462,60 @@@ i915_gem_execbuffer_relocate(struct drm
        return ret;
  }
  
 +#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
 +
 +static int
 +pin_and_fence_object(struct drm_i915_gem_object *obj,
 +                   struct intel_ring_buffer *ring)
 +{
 +      struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 +      bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 +      bool need_fence, need_mappable;
 +      int ret;
 +
 +      need_fence =
 +              has_fenced_gpu_access &&
 +              entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 +              obj->tiling_mode != I915_TILING_NONE;
 +      need_mappable =
 +              entry->relocation_count ? true : need_fence;
 +
 +      ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
 +      if (ret)
 +              return ret;
 +
 +      if (has_fenced_gpu_access) {
 +              if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 +                      if (obj->tiling_mode) {
 +                              ret = i915_gem_object_get_fence(obj, ring);
 +                              if (ret)
 +                                      goto err_unpin;
 +
 +                              entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 +                              i915_gem_object_pin_fence(obj);
 +                      } else {
 +                              ret = i915_gem_object_put_fence(obj);
 +                              if (ret)
 +                                      goto err_unpin;
 +                      }
 +              }
 +              obj->pending_fenced_gpu_access = need_fence;
 +      }
 +
 +      entry->offset = obj->gtt_offset;
 +      return 0;
 +
 +err_unpin:
 +      i915_gem_object_unpin(obj);
 +      return ret;
 +}
 +
  static int
  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct drm_file *file,
                            struct list_head *objects)
  {
 +      drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_object *obj;
        int ret, retry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
                list_for_each_entry(obj, objects, exec_list) {
                        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
                        bool need_fence, need_mappable;
 +
                        if (!obj->gtt_space)
                                continue;
  
                            (need_mappable && !obj->map_and_fenceable))
                                ret = i915_gem_object_unbind(obj);
                        else
 -                              ret = i915_gem_object_pin(obj,
 -                                                        entry->alignment,
 -                                                        need_mappable);
 +                              ret = pin_and_fence_object(obj, ring);
                        if (ret)
                                goto err;
 -
 -                      entry++;
                }
  
                /* Bind fresh objects */
                list_for_each_entry(obj, objects, exec_list) {
 -                      struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 -                      bool need_fence;
 +                      if (obj->gtt_space)
 +                              continue;
  
 -                      need_fence =
 -                              has_fenced_gpu_access &&
 -                              entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 -                              obj->tiling_mode != I915_TILING_NONE;
 +                      ret = pin_and_fence_object(obj, ring);
 +                      if (ret) {
 +                              int ret_ignore;
 +
 +                              /* This can potentially raise a harmless
 +                               * -EINVAL if we failed to bind in the above
 +                               * call. It cannot raise -EINTR since we know
 +                               * that the bo is freshly bound and so will
 +                               * not need to be flushed or waited upon.
 +                               */
 +                              ret_ignore = i915_gem_object_unbind(obj);
 +                              (void)ret_ignore;
 +                              WARN_ON(obj->gtt_space);
 +                              break;
 +                      }
 +              }
  
 -                      if (!obj->gtt_space) {
 -                              bool need_mappable =
 -                                      entry->relocation_count ? true : need_fence;
 +              /* Decrement pin count for bound objects */
 +              list_for_each_entry(obj, objects, exec_list) {
 +                      struct drm_i915_gem_exec_object2 *entry;
  
 -                              ret = i915_gem_object_pin(obj,
 -                                                        entry->alignment,
 -                                                        need_mappable);
 -                              if (ret)
 -                                      break;
 -                      }
 +                      if (!obj->gtt_space)
 +                              continue;
  
 -                      if (has_fenced_gpu_access) {
 -                              if (need_fence) {
 -                                      ret = i915_gem_object_get_fence(obj, ring);
 -                                      if (ret)
 -                                              break;
 -                              } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 -                                         obj->tiling_mode == I915_TILING_NONE) {
 -                                      /* XXX pipelined! */
 -                                      ret = i915_gem_object_put_fence(obj);
 -                                      if (ret)
 -                                              break;
 -                              }
 -                              obj->pending_fenced_gpu_access = need_fence;
 +                      entry = obj->exec_entry;
 +                      if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
 +                              i915_gem_object_unpin_fence(obj);
 +                              entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
                        }
  
 -                      entry->offset = obj->gtt_offset;
 -              }
 +                      i915_gem_object_unpin(obj);
  
 -              /* Decrement pin count for bound objects */
 -              list_for_each_entry(obj, objects, exec_list) {
 -                      if (obj->gtt_space)
 -                              i915_gem_object_unpin(obj);
 +                      /* ... and ensure ppgtt mapping exist if needed. */
 +                      if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
 +                              i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
 +                                                     obj, obj->cache_level);
 +
 +                              obj->has_aliasing_ppgtt_mapping = 1;
 +                      }
                }
  
                if (ret != -ENOSPC || retry > 1)
        } while (1);
  
  err:
 -      obj = list_entry(obj->exec_list.prev,
 -                       struct drm_i915_gem_object,
 -                       exec_list);
 -      while (objects != &obj->exec_list) {
 -              if (obj->gtt_space)
 -                      i915_gem_object_unpin(obj);
 +      list_for_each_entry_continue_reverse(obj, objects, exec_list) {
 +              struct drm_i915_gem_exec_object2 *entry;
 +
 +              if (!obj->gtt_space)
 +                      continue;
 +
 +              entry = obj->exec_entry;
 +              if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
 +                      i915_gem_object_unpin_fence(obj);
 +                      entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
 +              }
  
 -              obj = list_entry(obj->exec_list.prev,
 -                               struct drm_i915_gem_object,
 -                               exec_list);
 +              i915_gem_object_unpin(obj);
        }
  
        return ret;
@@@ -733,7 -682,7 +733,7 @@@ i915_gem_execbuffer_relocate_slow(struc
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
                if (&obj->base == NULL) {
 -                      DRM_ERROR("Invalid object handle %d at index %d\n",
 +                      DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        ret = -ENOENT;
                        goto err;
@@@ -807,9 -756,9 +807,9 @@@ intel_enable_semaphores(struct drm_devi
        if (i915_semaphores >= 0)
                return i915_semaphores;
  
-       /* Enable semaphores on SNB when IO remapping is off */
+       /* Disable semaphores on SNB */
        if (INTEL_INFO(dev)->gen == 6)
-               return !intel_iommu_enabled;
+               return 0;
  
        return 1;
  }
@@@ -1064,7 -1013,7 +1064,7 @@@ i915_gem_do_execbuffer(struct drm_devic
        int ret, mode, i;
  
        if (!i915_gem_check_execbuffer(args)) {
 -              DRM_ERROR("execbuf with invalid offset/length\n");
 +              DRM_DEBUG("execbuf with invalid offset/length\n");
                return -EINVAL;
        }
  
                break;
        case I915_EXEC_BSD:
                if (!HAS_BSD(dev)) {
 -                      DRM_ERROR("execbuf with invalid ring (BSD)\n");
 +                      DRM_DEBUG("execbuf with invalid ring (BSD)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->ring[VCS];
                break;
        case I915_EXEC_BLT:
                if (!HAS_BLT(dev)) {
 -                      DRM_ERROR("execbuf with invalid ring (BLT)\n");
 +                      DRM_DEBUG("execbuf with invalid ring (BLT)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->ring[BCS];
                break;
        default:
 -              DRM_ERROR("execbuf with unknown ring: %d\n",
 +              DRM_DEBUG("execbuf with unknown ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                return -EINVAL;
        }
                }
                break;
        default:
 -              DRM_ERROR("execbuf with unknown constants: %d\n", mode);
 +              DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
                return -EINVAL;
        }
  
        if (args->buffer_count < 1) {
 -              DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
 +              DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
  
        if (args->num_cliprects != 0) {
                if (ring != &dev_priv->ring[RCS]) {
 -                      DRM_ERROR("clip rectangles are only valid with the render ring\n");
 +                      DRM_DEBUG("clip rectangles are only valid with the render ring\n");
                        return -EINVAL;
                }
  
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
                if (&obj->base == NULL) {
 -                      DRM_ERROR("Invalid object handle %d at index %d\n",
 +                      DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        /* prevent error path from reading uninitialized data */
                        ret = -ENOENT;
                }
  
                if (!list_empty(&obj->exec_list)) {
 -                      DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
 +                      DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
                                   obj, exec[i].handle, i);
                        ret = -EINVAL;
                        goto err;
  
        /* Set the pending read domains for the batch buffer to COMMAND */
        if (batch_obj->base.pending_write_domain) {
 -              DRM_ERROR("Attempting to use self-modifying batch buffer\n");
 +              DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
                ret = -EINVAL;
                goto err;
        }
                         * so every billion or so execbuffers, we need to stall
                         * the GPU in order to reset the counters.
                         */
 -                      ret = i915_gpu_idle(dev);
 +                      ret = i915_gpu_idle(dev, true);
                        if (ret)
                                goto err;
  
@@@ -1325,7 -1274,7 +1325,7 @@@ i915_gem_execbuffer(struct drm_device *
        int ret, i;
  
        if (args->buffer_count < 1) {
 -              DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
 +              DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
  
        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
        if (exec_list == NULL || exec2_list == NULL) {
 -              DRM_ERROR("Failed to allocate exec list for %d buffers\n",
 +              DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
                drm_free_large(exec_list);
                drm_free_large(exec2_list);
                             (uintptr_t) args->buffers_ptr,
                             sizeof(*exec_list) * args->buffer_count);
        if (ret != 0) {
 -              DRM_ERROR("copy %d exec entries failed %d\n",
 +              DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
                drm_free_large(exec_list);
                drm_free_large(exec2_list);
                                   sizeof(*exec_list) * args->buffer_count);
                if (ret) {
                        ret = -EFAULT;
 -                      DRM_ERROR("failed to copy %d exec entries "
 +                      DRM_DEBUG("failed to copy %d exec entries "
                                  "back to user (%d)\n",
                                  args->buffer_count, ret);
                }
@@@ -1405,7 -1354,7 +1405,7 @@@ i915_gem_execbuffer2(struct drm_device 
        int ret;
  
        if (args->buffer_count < 1) {
 -              DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
 +              DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
  
                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
                                           args->buffer_count);
        if (exec2_list == NULL) {
 -              DRM_ERROR("Failed to allocate exec list for %d buffers\n",
 +              DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
                return -ENOMEM;
        }
                             (uintptr_t) args->buffers_ptr,
                             sizeof(*exec2_list) * args->buffer_count);
        if (ret != 0) {
 -              DRM_ERROR("copy %d exec entries failed %d\n",
 +              DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
                drm_free_large(exec2_list);
                return -EFAULT;
                                   sizeof(*exec2_list) * args->buffer_count);
                if (ret) {
                        ret = -EFAULT;
 -                      DRM_ERROR("failed to copy %d exec entries "
 +                      DRM_DEBUG("failed to copy %d exec entries "
                                  "back to user (%d)\n",
                                  args->buffer_count, ret);
                }
index cde1ce94563ca326ef5b2d8b367bd83a1fb5470d,5bd4361ea84dd2e5e4a0e39a6af249ccd7786573..063b4577d4c6f30513282cbbc461090f019a3eca
@@@ -720,6 -720,7 +720,6 @@@ i915_error_object_create(struct drm_i91
        reloc_offset = src->gtt_offset;
        for (page = 0; page < page_count; page++) {
                unsigned long flags;
 -              void __iomem *s;
                void *d;
  
                d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
                        goto unwind;
  
                local_irq_save(flags);
 -              s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                           reloc_offset);
 -              memcpy_fromio(d, s, PAGE_SIZE);
 -              io_mapping_unmap_atomic(s);
 +              if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
 +                      void __iomem *s;
 +
 +                      /* Simply ignore tiling or any overlapping fence.
 +                       * It's part of the error state, and this hopefully
 +                       * captures what the GPU read.
 +                       */
 +
 +                      s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 +                                                   reloc_offset);
 +                      memcpy_fromio(d, s, PAGE_SIZE);
 +                      io_mapping_unmap_atomic(s);
 +              } else {
 +                      void *s;
 +
 +                      drm_clflush_pages(&src->pages[page], 1);
 +
 +                      s = kmap_atomic(src->pages[page]);
 +                      memcpy(d, s, PAGE_SIZE);
 +                      kunmap_atomic(s);
 +
 +                      drm_clflush_pages(&src->pages[page], 1);
 +              }
                local_irq_restore(flags);
  
                dst->pages[page] = d;
@@@ -822,7 -804,7 +822,7 @@@ static u32 capture_bo_list(struct drm_i
                err->tiling = obj->tiling_mode;
                err->dirty = obj->dirty;
                err->purgeable = obj->madv != I915_MADV_WILLNEED;
 -              err->ring = obj->ring ? obj->ring->id : 0;
 +              err->ring = obj->ring ? obj->ring->id : -1;
                err->cache_level = obj->cache_level;
  
                if (++i == count)
@@@ -894,46 -876,6 +894,46 @@@ i915_error_first_batchbuffer(struct drm
        return NULL;
  }
  
 +static void i915_record_ring_state(struct drm_device *dev,
 +                                 struct drm_i915_error_state *error,
 +                                 struct intel_ring_buffer *ring)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (INTEL_INFO(dev)->gen >= 6) {
 +              error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
 +              error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
 +              error->semaphore_mboxes[ring->id][0]
 +                      = I915_READ(RING_SYNC_0(ring->mmio_base));
 +              error->semaphore_mboxes[ring->id][1]
 +                      = I915_READ(RING_SYNC_1(ring->mmio_base));
 +      }
 +
 +      if (INTEL_INFO(dev)->gen >= 4) {
 +              error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
 +              error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 +              error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 +              error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 +              if (ring->id == RCS) {
 +                      error->instdone1 = I915_READ(INSTDONE1);
 +                      error->bbaddr = I915_READ64(BB_ADDR);
 +              }
 +      } else {
 +              error->ipeir[ring->id] = I915_READ(IPEIR);
 +              error->ipehr[ring->id] = I915_READ(IPEHR);
 +              error->instdone[ring->id] = I915_READ(INSTDONE);
 +      }
 +
 +      error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
 +      error->seqno[ring->id] = ring->get_seqno(ring);
 +      error->acthd[ring->id] = intel_ring_get_active_head(ring);
 +      error->head[ring->id] = I915_READ_HEAD(ring);
 +      error->tail[ring->id] = I915_READ_TAIL(ring);
 +
 +      error->cpu_ring_head[ring->id] = ring->head;
 +      error->cpu_ring_tail[ring->id] = ring->tail;
 +}
 +
  /**
   * i915_capture_error_state - capture an error record for later analysis
   * @dev: drm device
@@@ -958,7 -900,7 +958,7 @@@ static void i915_capture_error_state(st
                return;
  
        /* Account for pipe specific data like PIPE*STAT */
 -      error = kmalloc(sizeof(*error), GFP_ATOMIC);
 +      error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (!error) {
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
                return;
        DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
                 dev->primary->index);
  
 -      error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        for_each_pipe(pipe)
                error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 -      error->instpm = I915_READ(INSTPM);
 -      error->error = 0;
 +
        if (INTEL_INFO(dev)->gen >= 6) {
                error->error = I915_READ(ERROR_GEN6);
 -
 -              error->bcs_acthd = I915_READ(BCS_ACTHD);
 -              error->bcs_ipehr = I915_READ(BCS_IPEHR);
 -              error->bcs_ipeir = I915_READ(BCS_IPEIR);
 -              error->bcs_instdone = I915_READ(BCS_INSTDONE);
 -              error->bcs_seqno = 0;
 -              if (dev_priv->ring[BCS].get_seqno)
 -                      error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
 -
 -              error->vcs_acthd = I915_READ(VCS_ACTHD);
 -              error->vcs_ipehr = I915_READ(VCS_IPEHR);
 -              error->vcs_ipeir = I915_READ(VCS_IPEIR);
 -              error->vcs_instdone = I915_READ(VCS_INSTDONE);
 -              error->vcs_seqno = 0;
 -              if (dev_priv->ring[VCS].get_seqno)
 -                      error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
 -      }
 -      if (INTEL_INFO(dev)->gen >= 4) {
 -              error->ipeir = I915_READ(IPEIR_I965);
 -              error->ipehr = I915_READ(IPEHR_I965);
 -              error->instdone = I915_READ(INSTDONE_I965);
 -              error->instps = I915_READ(INSTPS);
 -              error->instdone1 = I915_READ(INSTDONE1);
 -              error->acthd = I915_READ(ACTHD_I965);
 -              error->bbaddr = I915_READ64(BB_ADDR);
 -      } else {
 -              error->ipeir = I915_READ(IPEIR);
 -              error->ipehr = I915_READ(IPEHR);
 -              error->instdone = I915_READ(INSTDONE);
 -              error->acthd = I915_READ(ACTHD);
 -              error->bbaddr = 0;
 +              error->done_reg = I915_READ(DONE_REG);
        }
 +
 +      i915_record_ring_state(dev, error, &dev_priv->ring[RCS]);
 +      if (HAS_BLT(dev))
 +              i915_record_ring_state(dev, error, &dev_priv->ring[BCS]);
 +      if (HAS_BSD(dev))
 +              i915_record_ring_state(dev, error, &dev_priv->ring[VCS]);
 +
        i915_gem_record_fences(dev, error);
  
        /* Record the active batch and ring buffers */
@@@ -1050,12 -1017,11 +1050,12 @@@ void i915_destroy_error_state(struct dr
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
 +      unsigned long flags;
  
 -      spin_lock(&dev_priv->error_lock);
 +      spin_lock_irqsave(&dev_priv->error_lock, flags);
        error = dev_priv->first_error;
        dev_priv->first_error = NULL;
 -      spin_unlock(&dev_priv->error_lock);
 +      spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  
        if (error)
                i915_error_state_free(dev, error);
@@@ -1732,7 -1698,6 +1732,7 @@@ void i915_hangcheck_elapsed(unsigned lo
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
                        DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
 +                      i915_handle_error(dev, true);
  
                        if (!IS_GEN2(dev)) {
                                /* Is the chip hanging on a WAIT_FOR_EVENT?
                                 * and break the hang. This should work on
                                 * all but the second generation chipsets.
                                 */
 -
                                if (kick_ring(&dev_priv->ring[RCS]))
                                        goto repeat;
  
                                        goto repeat;
                        }
  
 -                      i915_handle_error(dev, true);
                        return;
                }
        } else {
@@@ -1784,7 -1751,8 +1784,8 @@@ static void ironlake_irq_preinstall(str
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
  
        I915_WRITE(HWSTAM, 0xeffe);
-       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+       if (IS_GEN6(dev)) {
                /* Workaround stalls observed on Sandy Bridge GPUs by
                 * making the blitter command streamer generate a
                 * write to the Hardware Status Page for
index 5ab967ce86ccee4fd2f6cd89d2aac7091ba6595f,00fbff5ddd814b3ae73a4f7abf936274f3b06111..efe56a2c4f4b88be123078a3be8501e1e1e74d85
@@@ -75,7 -75,7 +75,7 @@@ struct intel_limit 
        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
        intel_p2_t          p2;
        bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
 -                      int, int, intel_clock_t *);
 +                      int, int, intel_clock_t *, intel_clock_t *);
  };
  
  /* FDI */
  
  static bool
  intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                  int target, int refclk, intel_clock_t *best_clock);
 +                  int target, int refclk, intel_clock_t *match_clock,
 +                  intel_clock_t *best_clock);
  static bool
  intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                      int target, int refclk, intel_clock_t *best_clock);
 +                      int target, int refclk, intel_clock_t *match_clock,
 +                      intel_clock_t *best_clock);
  
  static bool
  intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
 -                    int target, int refclk, intel_clock_t *best_clock);
 +                    int target, int refclk, intel_clock_t *match_clock,
 +                    intel_clock_t *best_clock);
  static bool
  intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
 -                         int target, int refclk, intel_clock_t *best_clock);
 +                         int target, int refclk, intel_clock_t *match_clock,
 +                         intel_clock_t *best_clock);
  
  static inline u32 /* units of 100MHz */
  intel_fdi_link_freq(struct drm_device *dev)
@@@ -519,8 -515,7 +519,8 @@@ static bool intel_PLL_is_valid(struct d
  
  static bool
  intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                  int target, int refclk, intel_clock_t *best_clock)
 +                  int target, int refclk, intel_clock_t *match_clock,
 +                  intel_clock_t *best_clock)
  
  {
        struct drm_device *dev = crtc->dev;
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
 +                                      if (match_clock &&
 +                                          clock.p != match_clock->p)
 +                                              continue;
  
                                        this_err = abs(clock.dot - target);
                                        if (this_err < err) {
  
  static bool
  intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                      int target, int refclk, intel_clock_t *best_clock)
 +                      int target, int refclk, intel_clock_t *match_clock,
 +                      intel_clock_t *best_clock)
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
                                        if (!intel_PLL_is_valid(dev, limit,
                                                                &clock))
                                                continue;
 +                                      if (match_clock &&
 +                                          clock.p != match_clock->p)
 +                                              continue;
  
                                        this_err = abs(clock.dot - target);
                                        if (this_err < err_most) {
  
  static bool
  intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                         int target, int refclk, intel_clock_t *best_clock)
 +                         int target, int refclk, intel_clock_t *match_clock,
 +                         intel_clock_t *best_clock)
  {
        struct drm_device *dev = crtc->dev;
        intel_clock_t clock;
  /* DisplayPort has only two frequencies, 162MHz and 270MHz */
  static bool
  intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 -                    int target, int refclk, intel_clock_t *best_clock)
 +                    int target, int refclk, intel_clock_t *match_clock,
 +                    intel_clock_t *best_clock)
  {
        intel_clock_t clock;
        if (target < 200000) {
@@@ -936,10 -922,6 +936,10 @@@ void assert_pipe(struct drm_i915_privat
        u32 val;
        bool cur_state;
  
 +      /* if we need the pipe A quirk it must be always on */
 +      if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
 +              state = true;
 +
        reg = PIPECONF(pipe);
        val = I915_READ(reg);
        cur_state = !!(val & PIPECONF_ENABLE);
             pipe_name(pipe), state_string(state), state_string(cur_state));
  }
  
 -static void assert_plane_enabled(struct drm_i915_private *dev_priv,
 -                               enum plane plane)
 +static void assert_plane(struct drm_i915_private *dev_priv,
 +                       enum plane plane, bool state)
  {
        int reg;
        u32 val;
 +      bool cur_state;
  
        reg = DSPCNTR(plane);
        val = I915_READ(reg);
 -      WARN(!(val & DISPLAY_PLANE_ENABLE),
 -           "plane %c assertion failure, should be active but is disabled\n",
 -           plane_name(plane));
 +      cur_state = !!(val & DISPLAY_PLANE_ENABLE);
 +      WARN(cur_state != state,
 +           "plane %c assertion failure (expected %s, current %s)\n",
 +           plane_name(plane), state_string(state), state_string(cur_state));
  }
  
 +#define assert_plane_enabled(d, p) assert_plane(d, p, true)
 +#define assert_plane_disabled(d, p) assert_plane(d, p, false)
 +
  static void assert_planes_disabled(struct drm_i915_private *dev_priv,
                                   enum pipe pipe)
  {
        int cur_pipe;
  
        /* Planes are fixed to pipes on ILK+ */
 -      if (HAS_PCH_SPLIT(dev_priv->dev))
 +      if (HAS_PCH_SPLIT(dev_priv->dev)) {
 +              reg = DSPCNTR(pipe);
 +              val = I915_READ(reg);
 +              WARN((val & DISPLAY_PLANE_ENABLE),
 +                   "plane %c assertion failure, should be disabled but not\n",
 +                   plane_name(pipe));
                return;
 +      }
  
        /* Need to check both planes against the pipe */
        for (i = 0; i < 2; i++) {
@@@ -1100,7 -1071,7 +1100,7 @@@ static void assert_pch_hdmi_disabled(st
  {
        u32 val = I915_READ(reg);
        WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
 -           "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
 +           "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
  }
  
@@@ -1901,7 -1872,7 +1901,7 @@@ static void intel_update_fbc(struct drm
        if (enable_fbc < 0) {
                DRM_DEBUG_KMS("fbc set to per-chip default\n");
                enable_fbc = 1;
-               if (INTEL_INFO(dev)->gen <= 5)
+               if (INTEL_INFO(dev)->gen <= 6)
                        enable_fbc = 0;
        }
        if (!enable_fbc) {
@@@ -2041,8 -2012,6 +2041,8 @@@ intel_pin_and_fence_fb_obj(struct drm_d
                ret = i915_gem_object_get_fence(obj, pipelined);
                if (ret)
                        goto err_unpin;
 +
 +              i915_gem_object_pin_fence(obj);
        }
  
        dev_priv->mm.interruptible = true;
@@@ -2055,12 -2024,6 +2055,12 @@@ err_interruptible
        return ret;
  }
  
 +void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 +{
 +      i915_gem_object_unpin_fence(obj);
 +      i915_gem_object_unpin(obj);
 +}
 +
  static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                             int x, int y)
  {
@@@ -2292,7 -2255,7 +2292,7 @@@ intel_pipe_set_base(struct drm_crtc *cr
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
                                         LEAVE_ATOMIC_MODE_SET);
        if (ret) {
 -              i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
 +              intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
                DRM_ERROR("failed to update base address\n");
                return ret;
  
        if (old_fb) {
                intel_wait_for_vblank(dev, intel_crtc->pipe);
 -              i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
 +              intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
        }
  
        mutex_unlock(&dev->struct_mutex);
@@@ -3358,12 -3321,10 +3358,12 @@@ static void intel_crtc_disable(struct d
        struct drm_device *dev = crtc->dev;
  
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 +      assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
 +      assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
  
        if (crtc->fb) {
                mutex_lock(&dev->struct_mutex);
 -              i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
 +              intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
        }
  }
@@@ -4560,7 -4521,6 +4560,7 @@@ void sandybridge_update_wm(struct drm_d
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
 +      u32 val;
        int fbc_wm, plane_wm, cursor_wm;
        unsigned int enabled;
  
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
 -              I915_WRITE(WM0_PIPEA_ILK,
 -                         (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 +              val = I915_READ(WM0_PIPEA_ILK);
 +              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 +              I915_WRITE(WM0_PIPEA_ILK, val |
 +                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
 -              I915_WRITE(WM0_PIPEB_ILK,
 -                         (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 +              val = I915_READ(WM0_PIPEB_ILK);
 +              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 +              I915_WRITE(WM0_PIPEB_ILK, val |
 +                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
 -              I915_WRITE(WM0_PIPEC_IVB,
 -                         (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 +              val = I915_READ(WM0_PIPEC_IVB);
 +              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 +              I915_WRITE(WM0_PIPEC_IVB, val |
 +                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
                DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
@@@ -4746,7 -4700,6 +4746,7 @@@ static void sandybridge_update_sprite_w
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
 +      u32 val;
        int sprite_wm, reg;
        int ret;
  
                return;
        }
  
 -      I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
 +      val = I915_READ(reg);
 +      val &= ~WM0_PIPE_SPRITE_MASK;
 +      I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
        DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
  
  
@@@ -5017,82 -4968,6 +5017,82 @@@ static bool intel_choose_pipe_bpp_dithe
        return display_bpc != bpc;
  }
  
 +static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int refclk;
 +
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +          intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 +              refclk = dev_priv->lvds_ssc_freq * 1000;
 +              DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 +                            refclk / 1000);
 +      } else if (!IS_GEN2(dev)) {
 +              refclk = 96000;
 +      } else {
 +              refclk = 48000;
 +      }
 +
 +      return refclk;
 +}
 +
 +static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
 +                                    intel_clock_t *clock)
 +{
 +      /* SDVO TV has fixed PLL values depend on its clock range,
 +         this mirrors vbios setting. */
 +      if (adjusted_mode->clock >= 100000
 +          && adjusted_mode->clock < 140500) {
 +              clock->p1 = 2;
 +              clock->p2 = 10;
 +              clock->n = 3;
 +              clock->m1 = 16;
 +              clock->m2 = 8;
 +      } else if (adjusted_mode->clock >= 140500
 +                 && adjusted_mode->clock <= 200000) {
 +              clock->p1 = 1;
 +              clock->p2 = 10;
 +              clock->n = 6;
 +              clock->m1 = 12;
 +              clock->m2 = 8;
 +      }
 +}
 +
 +static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
 +                                   intel_clock_t *clock,
 +                                   intel_clock_t *reduced_clock)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      u32 fp, fp2 = 0;
 +
 +      if (IS_PINEVIEW(dev)) {
 +              fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
 +              if (reduced_clock)
 +                      fp2 = (1 << reduced_clock->n) << 16 |
 +                              reduced_clock->m1 << 8 | reduced_clock->m2;
 +      } else {
 +              fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
 +              if (reduced_clock)
 +                      fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
 +                              reduced_clock->m2;
 +      }
 +
 +      I915_WRITE(FP0(pipe), fp);
 +
 +      intel_crtc->lowfreq_avail = false;
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +          reduced_clock && i915_powersave) {
 +              I915_WRITE(FP1(pipe), fp2);
 +              intel_crtc->lowfreq_avail = true;
 +      } else {
 +              I915_WRITE(FP1(pipe), fp);
 +      }
 +}
 +
  static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                              struct drm_display_mode *mode,
                              struct drm_display_mode *adjusted_mode,
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
 -      u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
 +      u32 dpll, dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
                num_connectors++;
        }
  
 -      if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 -              refclk = dev_priv->lvds_ssc_freq * 1000;
 -              DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 -                            refclk / 1000);
 -      } else if (!IS_GEN2(dev)) {
 -              refclk = 96000;
 -      } else {
 -              refclk = 48000;
 -      }
 +      refclk = i9xx_get_refclk(crtc, num_connectors);
  
        /*
         * Returns a set of divisors for the desired target clock with the given
         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
         */
        limit = intel_limit(crtc, refclk);
 -      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 +      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
 +                           &clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
        intel_crtc_update_cursor(crtc, true);
  
        if (is_lvds && dev_priv->lvds_downclock_avail) {
 +              /*
 +               * Ensure we match the reduced clock's P to the target clock.
 +               * If the clocks don't match, we can't switch the display clock
 +               * by using the FP0/FP1. In such case we will disable the LVDS
 +               * downclock feature.
 +              */
                has_reduced_clock = limit->find_pll(limit, crtc,
                                                    dev_priv->lvds_downclock,
                                                    refclk,
 +                                                  &clock,
                                                    &reduced_clock);
 -              if (has_reduced_clock && (clock.p != reduced_clock.p)) {
 -                      /*
 -                       * If the different P is found, it means that we can't
 -                       * switch the display clock by using the FP0/FP1.
 -                       * In such case we will disable the LVDS downclock
 -                       * feature.
 -                       */
 -                      DRM_DEBUG_KMS("Different P is found for "
 -                                    "LVDS clock/downclock\n");
 -                      has_reduced_clock = 0;
 -              }
 -      }
 -      /* SDVO TV has fixed PLL values depend on its clock range,
 -         this mirrors vbios setting. */
 -      if (is_sdvo && is_tv) {
 -              if (adjusted_mode->clock >= 100000
 -                  && adjusted_mode->clock < 140500) {
 -                      clock.p1 = 2;
 -                      clock.p2 = 10;
 -                      clock.n = 3;
 -                      clock.m1 = 16;
 -                      clock.m2 = 8;
 -              } else if (adjusted_mode->clock >= 140500
 -                         && adjusted_mode->clock <= 200000) {
 -                      clock.p1 = 1;
 -                      clock.p2 = 10;
 -                      clock.n = 6;
 -                      clock.m1 = 12;
 -                      clock.m2 = 8;
 -              }
        }
  
 -      if (IS_PINEVIEW(dev)) {
 -              fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
 -              if (has_reduced_clock)
 -                      fp2 = (1 << reduced_clock.n) << 16 |
 -                              reduced_clock.m1 << 8 | reduced_clock.m2;
 -      } else {
 -              fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 -              if (has_reduced_clock)
 -                      fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 -                              reduced_clock.m2;
 -      }
 +      if (is_sdvo && is_tv)
 +              i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 +
 +      i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
 +                               &reduced_clock : NULL);
  
        dpll = DPLL_VGA_MODE_DIS;
  
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
  
 -      /* Ironlake's plane is forced to pipe, bit 24 is to
 -         enable color space conversion */
        if (pipe == 0)
                dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
        else
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
  
 -      I915_WRITE(FP0(pipe), fp);
        I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
  
        POSTING_READ(DPLL(pipe));
                I915_WRITE(DPLL(pipe), dpll);
        }
  
 -      intel_crtc->lowfreq_avail = false;
 -      if (is_lvds && has_reduced_clock && i915_powersave) {
 -              I915_WRITE(FP1(pipe), fp2);
 -              intel_crtc->lowfreq_avail = true;
 -              if (HAS_PIPE_CXSR(dev)) {
 +      if (HAS_PIPE_CXSR(dev)) {
 +              if (intel_crtc->lowfreq_avail) {
                        DRM_DEBUG_KMS("enabling CxSR downclocking\n");
                        pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 -              }
 -      } else {
 -              I915_WRITE(FP1(pipe), fp);
 -              if (HAS_PIPE_CXSR(dev)) {
 +              } else {
                        DRM_DEBUG_KMS("disabling CxSR downclocking\n");
                        pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
                }
        }
  
+       pipeconf &= ~PIPECONF_INTERLACE_MASK;
        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                /* the chip adds 2 halflines automatically */
                adjusted_mode->crtc_vsync_end -= 1;
                adjusted_mode->crtc_vsync_start -= 1;
        } else
-               pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
+               pipeconf |= PIPECONF_PROGRESSIVE;
  
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
@@@ -5663,8 -5584,7 +5664,8 @@@ static int ironlake_crtc_mode_set(struc
         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
         */
        limit = intel_limit(crtc, refclk);
 -      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 +      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
 +                           &clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
        intel_crtc_update_cursor(crtc, true);
  
        if (is_lvds && dev_priv->lvds_downclock_avail) {
 +              /*
 +               * Ensure we match the reduced clock's P to the target clock.
 +               * If the clocks don't match, we can't switch the display clock
 +               * by using the FP0/FP1. In such case we will disable the LVDS
 +               * downclock feature.
 +              */
                has_reduced_clock = limit->find_pll(limit, crtc,
                                                    dev_priv->lvds_downclock,
                                                    refclk,
 +                                                  &clock,
                                                    &reduced_clock);
 -              if (has_reduced_clock && (clock.p != reduced_clock.p)) {
 -                      /*
 -                       * If the different P is found, it means that we can't
 -                       * switch the display clock by using the FP0/FP1.
 -                       * In such case we will disable the LVDS downclock
 -                       * feature.
 -                       */
 -                      DRM_DEBUG_KMS("Different P is found for "
 -                                    "LVDS clock/downclock\n");
 -                      has_reduced_clock = 0;
 -              }
        }
        /* SDVO TV has fixed PLL values depend on its clock range,
           this mirrors vbios setting. */
        if (is_lvds) {
                temp = I915_READ(PCH_LVDS);
                temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-               if (HAS_PCH_CPT(dev))
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~PORT_TRANS_SEL_MASK;
                        temp |= PORT_TRANS_SEL_CPT(pipe);
-               else if (pipe == 1)
-                       temp |= LVDS_PIPEB_SELECT;
-               else
-                       temp &= ~LVDS_PIPEB_SELECT;
+               } else {
+                       if (pipe == 1)
+                               temp |= LVDS_PIPEB_SELECT;
+                       else
+                               temp &= ~LVDS_PIPEB_SELECT;
+               }
  
                /* set the corresponsding LVDS_BORDER bit */
                temp |= dev_priv->lvds_border_bits;
                }
        }
  
+       pipeconf &= ~PIPECONF_INTERLACE_MASK;
        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                /* the chip adds 2 halflines automatically */
                adjusted_mode->crtc_vsync_end -= 1;
                adjusted_mode->crtc_vsync_start -= 1;
        } else
-               pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+               pipeconf |= PIPECONF_PROGRESSIVE;
  
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
  
        intel_wait_for_vblank(dev, pipe);
  
 -      if (IS_GEN5(dev)) {
 -              /* enable address swizzle for tiling buffer */
 -              temp = I915_READ(DISP_ARB_CTL);
 -              I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
 -      }
 -
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
  
@@@ -6143,18 -6077,15 +6148,18 @@@ static void ironlake_write_eld(struct d
        uint32_t i;
        int len;
        int hdmiw_hdmiedid;
 +      int aud_config;
        int aud_cntl_st;
        int aud_cntrl_st2;
  
        if (HAS_PCH_IBX(connector->dev)) {
                hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
 +              aud_config = IBX_AUD_CONFIG_A;
                aud_cntl_st = IBX_AUD_CNTL_ST_A;
                aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else {
                hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
 +              aud_config = CPT_AUD_CONFIG_A;
                aud_cntl_st = CPT_AUD_CNTL_ST_A;
                aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
        i = to_intel_crtc(crtc)->pipe;
        hdmiw_hdmiedid += i * 0x100;
        aud_cntl_st += i * 0x100;
 +      aud_config += i * 0x100;
  
        DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
                DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
                eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
 -      }
 +              I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
 +      } else
 +              I915_WRITE(aud_config, 0);
  
        if (intel_eld_uptodate(connector,
                               aud_cntrl_st2, eldv,
@@@ -7160,7 -7088,7 +7165,7 @@@ static void intel_unpin_work_fn(struct 
                container_of(__work, struct intel_unpin_work, work);
  
        mutex_lock(&work->dev->struct_mutex);
 -      i915_gem_object_unpin(work->old_fb_obj);
 +      intel_unpin_fb_obj(work->old_fb_obj);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        drm_gem_object_unreference(&work->old_fb_obj->base);
  
@@@ -7310,7 -7238,7 +7315,7 @@@ static int intel_gen2_queue_flip(struc
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
 -      OUT_RING(MI_NOOP);
 +      OUT_RING(0); /* aux display base address, unused */
        ADVANCE_LP_RING();
  out:
        return ret;
@@@ -7902,8 -7830,7 +7907,8 @@@ int intel_framebuffer_init(struct drm_d
        case DRM_FORMAT_VYUY:
                break;
        default:
 -              DRM_ERROR("unsupported pixel format\n");
 +              DRM_DEBUG_KMS("unsupported pixel format %u\n",
 +                              mode_cmd->pixel_format);
                return -EINVAL;
        }
  
@@@ -8210,13 -8137,11 +8215,11 @@@ static bool intel_enable_rc6(struct drm
                return 0;
  
        /*
-        * Enable rc6 on Sandybridge if DMA remapping is disabled
+        * Disable rc6 on Sandybridge
         */
        if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
-                                intel_iommu_enabled ? "true" : "false",
-                                !intel_iommu_enabled ? "en" : "dis");
-               return !intel_iommu_enabled;
+               DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+               return 0;
        }
        DRM_DEBUG_DRIVER("RC6 enabled\n");
        return 1;
@@@ -9105,12 -9030,9 +9108,9 @@@ void intel_modeset_init(struct drm_devi
  
        for (i = 0; i < dev_priv->num_pipe; i++) {
                intel_crtc_init(dev, i);
-               if (HAS_PCH_SPLIT(dev)) {
-                       ret = intel_plane_init(dev, i);
-                       if (ret)
-                               DRM_ERROR("plane %d init failed: %d\n",
-                                         i, ret);
-               }
+               ret = intel_plane_init(dev, i);
+               if (ret)
+                       DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
        }
  
        /* Just disable it once at startup */
index 8f1148c0410837e2993063adc63b2652ad2c471b,94f860cce3f748f4c89a080cc32b561cfd0fae40..39eccf908a69973738d3570455123e56803611b7
@@@ -208,17 -208,8 +208,8 @@@ intel_dp_link_clock(uint8_t link_bw
   */
  
  static int
- intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
+ intel_dp_link_required(int pixel_clock, int bpp)
  {
-       struct drm_crtc *crtc = intel_dp->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int bpp = 24;
-       if (check_bpp)
-               bpp = check_bpp;
-       else if (intel_crtc)
-               bpp = intel_crtc->bpp;
        return (pixel_clock * bpp + 9) / 10;
  }
  
@@@ -245,12 -236,11 +236,11 @@@ intel_dp_mode_valid(struct drm_connecto
                        return MODE_PANEL;
        }
  
-       mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+       mode_rate = intel_dp_link_required(mode->clock, 24);
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
  
        if (mode_rate > max_rate) {
-                       mode_rate = intel_dp_link_required(intel_dp,
-                                                          mode->clock, 18);
+                       mode_rate = intel_dp_link_required(mode->clock, 18);
                        if (mode_rate > max_rate)
                                return MODE_CLOCK_HIGH;
                        else
@@@ -362,7 -352,7 +352,7 @@@ intel_dp_aux_ch(struct intel_dp *intel_
        int recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
 -      int try, precharge;
 +      int try, precharge = 5;
  
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
 -              aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
 +              aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
  
 -      if (IS_GEN6(dev))
 -              precharge = 3;
 -      else
 -              precharge = 5;
 -
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ(ch_ctl);
                           DP_AUX_CH_CTL_DONE |
                           DP_AUX_CH_CTL_TIME_OUT_ERROR |
                           DP_AUX_CH_CTL_RECEIVE_ERROR);
 +
 +              if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
 +                            DP_AUX_CH_CTL_RECEIVE_ERROR))
 +                      continue;
                if (status & DP_AUX_CH_CTL_DONE)
                        break;
        }
@@@ -682,7 -673,7 +672,7 @@@ intel_dp_mode_fixup(struct drm_encoder 
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_dp);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
-       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
+       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
  
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
  
-                       if (intel_dp_link_required(intel_dp, mode->clock, bpp)
+                       if (intel_dp_link_required(mode->clock, bpp)
                                        <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
index 6e80f83683558327387335eef35a36d70581ce16,1ab842c6032e949a37855a3995aa161d9f276977..4956f1bff5226e697f0ed63a94d80d57f20c6977
@@@ -399,6 -399,8 +399,6 @@@ static int init_render_ring(struct inte
  
        if (INTEL_INFO(dev)->gen > 3) {
                int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
 -              if (IS_GEN6(dev) || IS_GEN7(dev))
 -                      mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
                I915_WRITE(MI_MODE, mode);
                if (IS_GEN7(dev))
                        I915_WRITE(GFX_MODE_GEN7,
@@@ -633,6 -635,19 +633,19 @@@ render_ring_add_request(struct intel_ri
        return 0;
  }
  
+ static u32
+ gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+ {
+       struct drm_device *dev = ring->dev;
+       /* Workaround to force correct ordering between irq and seqno writes on
+        * ivb (and maybe also on snb) by reading from a CS register (like
+        * ACTHD) before reading the status page. */
+       if (IS_GEN7(dev))
+               intel_ring_get_active_head(ring);
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ }
  static u32
  ring_get_seqno(struct intel_ring_buffer *ring)
  {
@@@ -729,13 -744,13 +742,13 @@@ void intel_ring_setup_status_page(struc
         */
        if (IS_GEN7(dev)) {
                switch (ring->id) {
 -              case RING_RENDER:
 +              case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
                        break;
 -              case RING_BLT:
 +              case BCS:
                        mmio = BLT_HWS_PGA_GEN7;
                        break;
 -              case RING_BSD:
 +              case VCS:
                        mmio = BSD_HWS_PGA_GEN7;
                        break;
                }
@@@ -789,17 -804,6 +802,6 @@@ ring_add_request(struct intel_ring_buff
        return 0;
  }
  
- static bool
- gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
- {
-       /* The BLT ring on IVB appears to have broken synchronization
-        * between the seqno write and the interrupt, so that the
-        * interrupt appears first.  Returning false here makes
-        * i915_wait_request() do a polling loop, instead.
-        */
-       return false;
- }
  static bool
  gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
  {
        if (!dev->irq_enabled)
               return false;
  
+       /* It looks like we need to prevent the gt from suspending while waiting
+        * for an notifiy irq, otherwise irqs seem to get lost on at least the
+        * blt/bsd rings on ivb. */
+       if (IS_GEN7(dev))
+               gen6_gt_force_wake_get(dev_priv);
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
                ring->irq_mask &= ~rflag;
@@@ -833,6 -843,9 +841,9 @@@ gen6_ring_put_irq(struct intel_ring_buf
                ironlake_disable_irq(dev_priv, gflag);
        }
        spin_unlock(&ring->irq_lock);
+       if (IS_GEN7(dev))
+               gen6_gt_force_wake_put(dev_priv);
  }
  
  static bool
@@@ -1199,7 -1212,7 +1210,7 @@@ void intel_ring_advance(struct intel_ri
  
  static const struct intel_ring_buffer render_ring = {
        .name                   = "render ring",
 -      .id                     = RING_RENDER,
 +      .id                     = RCS,
        .mmio_base              = RENDER_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_render_ring,
  
  static const struct intel_ring_buffer bsd_ring = {
        .name                   = "bsd ring",
 -      .id                     = RING_BSD,
 +      .id                     = VCS,
        .mmio_base              = BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_ring_common,
@@@ -1332,14 -1345,14 +1343,14 @@@ gen6_bsd_ring_put_irq(struct intel_ring
  /* ring buffer for Video Codec for Gen6+ */
  static const struct intel_ring_buffer gen6_bsd_ring = {
        .name                   = "gen6 bsd ring",
 -      .id                     = RING_BSD,
 +      .id                     = VCS,
        .mmio_base              = GEN6_BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
        .init                   = init_ring_common,
        .write_tail             = gen6_bsd_ring_write_tail,
        .flush                  = gen6_ring_flush,
        .add_request            = gen6_add_request,
-       .get_seqno              = ring_get_seqno,
+       .get_seqno              = gen6_ring_get_seqno,
        .irq_get                = gen6_bsd_ring_get_irq,
        .irq_put                = gen6_bsd_ring_put_irq,
        .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
@@@ -1368,13 -1381,79 +1379,13 @@@ blt_ring_put_irq(struct intel_ring_buff
                          GEN6_BLITTER_USER_INTERRUPT);
  }
  
 -
 -/* Workaround for some stepping of SNB,
 - * each time when BLT engine ring tail moved,
 - * the first command in the ring to be parsed
 - * should be MI_BATCH_BUFFER_START
 - */
 -#define NEED_BLT_WORKAROUND(dev) \
 -      (IS_GEN6(dev) && (dev->pdev->revision < 8))
 -
 -static inline struct drm_i915_gem_object *
 -to_blt_workaround(struct intel_ring_buffer *ring)
 -{
 -      return ring->private;
 -}
 -
 -static int blt_ring_init(struct intel_ring_buffer *ring)
 -{
 -      if (NEED_BLT_WORKAROUND(ring->dev)) {
 -              struct drm_i915_gem_object *obj;
 -              u32 *ptr;
 -              int ret;
 -
 -              obj = i915_gem_alloc_object(ring->dev, 4096);
 -              if (obj == NULL)
 -                      return -ENOMEM;
 -
 -              ret = i915_gem_object_pin(obj, 4096, true);
 -              if (ret) {
 -                      drm_gem_object_unreference(&obj->base);
 -                      return ret;
 -              }
 -
 -              ptr = kmap(obj->pages[0]);
 -              *ptr++ = MI_BATCH_BUFFER_END;
 -              *ptr++ = MI_NOOP;
 -              kunmap(obj->pages[0]);
 -
 -              ret = i915_gem_object_set_to_gtt_domain(obj, false);
 -              if (ret) {
 -                      i915_gem_object_unpin(obj);
 -                      drm_gem_object_unreference(&obj->base);
 -                      return ret;
 -              }
 -
 -              ring->private = obj;
 -      }
 -
 -      return init_ring_common(ring);
 -}
 -
 -static int blt_ring_begin(struct intel_ring_buffer *ring,
 -                        int num_dwords)
 -{
 -      if (ring->private) {
 -              int ret = intel_ring_begin(ring, num_dwords+2);
 -              if (ret)
 -                      return ret;
 -
 -              intel_ring_emit(ring, MI_BATCH_BUFFER_START);
 -              intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
 -
 -              return 0;
 -      } else
 -              return intel_ring_begin(ring, 4);
 -}
 -
  static int blt_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate, u32 flush)
  {
        uint32_t cmd;
        int ret;
  
 -      ret = blt_ring_begin(ring, 4);
 +      ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
  
        return 0;
  }
  
 -static void blt_ring_cleanup(struct intel_ring_buffer *ring)
 -{
 -      if (!ring->private)
 -              return;
 -
 -      i915_gem_object_unpin(ring->private);
 -      drm_gem_object_unreference(ring->private);
 -      ring->private = NULL;
 -}
 -
  static const struct intel_ring_buffer gen6_blt_ring = {
        .name                   = "blt ring",
 -      .id                     = RING_BLT,
 +      .id                     = BCS,
        .mmio_base              = BLT_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
 -      .init                   = blt_ring_init,
 +      .init                   = init_ring_common,
        .write_tail             = ring_write_tail,
        .flush                  = blt_ring_flush,
        .add_request            = gen6_add_request,
-       .get_seqno              = ring_get_seqno,
+       .get_seqno              = gen6_ring_get_seqno,
        .irq_get                = blt_ring_get_irq,
        .irq_put                = blt_ring_put_irq,
        .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
 -      .cleanup                = blt_ring_cleanup,
        .sync_to                = gen6_blt_ring_sync_to,
        .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
                                   MI_SEMAPHORE_SYNC_BV,
@@@ -1420,6 -1510,7 +1431,7 @@@ int intel_init_render_ring_buffer(struc
                ring->flush = gen6_render_ring_flush;
                ring->irq_get = gen6_render_ring_get_irq;
                ring->irq_put = gen6_render_ring_put_irq;
+               ring->get_seqno = gen6_ring_get_seqno;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
                ring->get_seqno = pc_render_get_seqno;
@@@ -1498,8 -1589,5 +1510,5 @@@ int intel_init_blt_ring_buffer(struct d
  
        *ring = gen6_blt_ring;
  
-       if (IS_GEN7(dev))
-               ring->irq_get = gen7_blt_ring_get_irq;
        return intel_init_ring_buffer(dev, ring);
  }
index ad3bd929aec759950346f8457feb61d4458aad90,2288abf88cce4e3420bbedc379747480aa8843e8..98444ab68bc367243f6cc0c593e39bbbe1fdad76
@@@ -466,10 -466,8 +466,8 @@@ intel_update_plane(struct drm_plane *pl
        mutex_lock(&dev->struct_mutex);
  
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-       if (ret) {
-               DRM_ERROR("failed to pin object\n");
+       if (ret)
                goto out_unlock;
-       }
  
        intel_plane->obj = obj;
  
                        intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
                        mutex_lock(&dev->struct_mutex);
                }
 -              i915_gem_object_unpin(old_obj);
 +              intel_unpin_fb_obj(old_obj);
        }
  
  out_unlock:
@@@ -530,7 -528,7 +528,7 @@@ intel_disable_plane(struct drm_plane *p
                goto out;
  
        mutex_lock(&dev->struct_mutex);
 -      i915_gem_object_unpin(intel_plane->obj);
 +      intel_unpin_fb_obj(intel_plane->obj);
        intel_plane->obj = NULL;
        mutex_unlock(&dev->struct_mutex);
  out:
@@@ -632,10 -630,8 +630,8 @@@ intel_plane_init(struct drm_device *dev
        unsigned long possible_crtcs;
        int ret;
  
-       if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
-               DRM_ERROR("new plane code only for SNB+\n");
+       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
-       }
  
        intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
        if (!intel_plane)