Merge tag 'v3.7-rc2' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 22 Oct 2012 12:34:51 +0000 (14:34 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 22 Oct 2012 12:34:51 +0000 (14:34 +0200)
Linux 3.7-rc2

Backmerge to solve two ugly conflicts:
- uapi. We've already added new ioctl definitions for -next. Do I need to say more?
- wc support gtt ptes. We've had to revert this for snb+ for 3.7 and
  also fix a few other things in the code. Now we know how to make it
  work on snb+, but to avoid losing the other fixes do the backmerge
  first before re-enabling wc gtt ptes on snb+.

And a few other minor things, among them git getting confused in
intel_dp.c and seemingly causing a conflict out of nothing ...

Conflicts:
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_modes.c
include/drm/i915_drm.h

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
18 files changed:
1  2 
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
include/uapi/drm/i915_drm.h

  #include <linux/debugfs.h>
  #include <linux/slab.h>
  #include <linux/export.h>
- #include "drmP.h"
- #include "drm.h"
+ #include <drm/drmP.h>
  #include "intel_drv.h"
  #include "intel_ringbuffer.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
  #define DRM_I915_RING_DEBUG 1
@@@ -1069,7 -1068,7 +1068,7 @@@ static int gen6_drpc_info(struct seq_fi
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 rpmodectl1, gt_core_status, rcctl1;
 +      u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
        int count=0, ret;
  
  
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
 +      sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
        mutex_unlock(&dev->struct_mutex);
  
        seq_printf(m, "Video Turbo Mode: %s\n",
        seq_printf(m, "RC6++ residency since boot: %u\n",
                   I915_READ(GEN6_GT_GFX_RC6pp));
  
 +      seq_printf(m, "RC6   voltage: %dmV\n",
 +                 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
 +      seq_printf(m, "RC6+  voltage: %dmV\n",
 +                 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
 +      seq_printf(m, "RC6++ voltage: %dmV\n",
 +                 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
        return 0;
  }
  
@@@ -1290,10 -1282,15 +1289,10 @@@ static int i915_ring_freq_table(struct 
        for (gpu_freq = dev_priv->rps.min_delay;
             gpu_freq <= dev_priv->rps.max_delay;
             gpu_freq++) {
 -              I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
 -              I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
 -                         GEN6_PCODE_READ_MIN_FREQ_TABLE);
 -              if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
 -                            GEN6_PCODE_READY) == 0, 10)) {
 -                      DRM_ERROR("pcode read of freq table timed out\n");
 -                      continue;
 -              }
 -              ia_freq = I915_READ(GEN6_PCODE_DATA);
 +              ia_freq = gpu_freq;
 +              sandybridge_pcode_read(dev_priv,
 +                                     GEN6_PCODE_READ_MIN_FREQ_TABLE,
 +                                     &ia_freq);
                seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
        }
  
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc_helper.h"
- #include "drm_fb_helper.h"
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_fb_helper.h>
  #include "intel_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include <linux/pci.h>
@@@ -1015,9 -1014,6 +1014,9 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
                value = 1;
                break;
 +      case I915_PARAM_HAS_SECURE_BATCHES:
 +              value = capable(CAP_SYS_ADMIN);
 +              break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@@ -1871,8 -1867,8 +1870,8 @@@ struct drm_ioctl_desc i915_ioctls[] = 
        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
   */
  
  #include <linux/device.h>
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
  
  #include <linux/console.h>
  #include <linux/module.h>
- #include "drm_crtc_helper.h"
+ #include <drm/drm_crtc_helper.h>
  
  static int i915_modeset __read_mostly = -1;
  module_param_named(modeset, i915_modeset, int, 0400);
@@@ -525,8 -524,6 +524,8 @@@ static int i915_drm_thaw(struct drm_dev
        struct drm_i915_private *dev_priv = dev->dev_private;
        int error = 0;
  
 +      intel_gt_reset(dev);
 +
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                mutex_lock(&dev->struct_mutex);
                i915_gem_restore_gtt_mappings(dev);
  
                intel_modeset_init_hw(dev);
                intel_modeset_setup_hw_state(dev);
 -              drm_mode_config_reset(dev);
                drm_irq_install(dev);
        }
  
@@@ -93,12 -93,6 +93,12 @@@ struct intel_pch_pll 
  };
  #define I915_NUM_PLLS 2
  
 +struct intel_ddi_plls {
 +      int spll_refcount;
 +      int wrpll1_refcount;
 +      int wrpll2_refcount;
 +};
 +
  /* Interface history:
   *
   * 1.1: Original.
@@@ -458,7 -452,6 +458,7 @@@ typedef struct drm_i915_private 
  
        /* For hangcheck timer */
  #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd[I915_NUM_RINGS];
        wait_queue_head_t pending_flip_queue;
  
        struct intel_pch_pll pch_plls[I915_NUM_PLLS];
 +      struct intel_ddi_plls ddi_plls;
  
        /* Reclocking support */
        bool render_reclock_avail;
@@@ -1264,7 -1256,6 +1264,7 @@@ void i915_handle_error(struct drm_devic
  
  extern void intel_irq_init(struct drm_device *dev);
  extern void intel_gt_init(struct drm_device *dev);
 +extern void intel_gt_reset(struct drm_device *dev);
  
  void i915_error_state_free(struct kref *error_ref);
  
@@@ -1310,10 -1301,10 +1310,10 @@@ int i915_gem_unpin_ioctl(struct drm_dev
                         struct drm_file *file_priv);
  int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
- int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file);
- int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file);
+ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
+ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
  int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
  int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@@ -1350,9 -1341,14 +1350,14 @@@ int __must_check i915_gem_object_get_pa
  static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
  {
        struct scatterlist *sg = obj->pages->sgl;
-       while (n >= SG_MAX_SINGLE_ALLOC) {
+       int nents = obj->pages->nents;
+       while (nents > SG_MAX_SINGLE_ALLOC) {
+               if (n < SG_MAX_SINGLE_ALLOC - 1)
+                       break;
                sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
                n -= SG_MAX_SINGLE_ALLOC - 1;
+               nents -= SG_MAX_SINGLE_ALLOC - 1;
        }
        return sg_page(sg+n);
  }
@@@ -1436,7 -1432,7 +1441,7 @@@ int __must_check i915_gpu_idle(struct d
  int __must_check i915_gem_idle(struct drm_device *dev);
  int i915_add_request(struct intel_ring_buffer *ring,
                     struct drm_file *file,
-                    struct drm_i915_gem_request *request);
+                    u32 *seqno);
  int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
                                 uint32_t seqno);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@@ -1631,9 -1627,6 +1636,9 @@@ void gen6_gt_force_wake_get(struct drm_
  void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
  int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
  
 +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
 +
  #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
  
@@@ -25,9 -25,8 +25,8 @@@
   *
   */
  
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
+       case -EBUSY:
+               /*
+                * EBUSY is ok: this just means that another thread
+                * already did the job.
+                */
                return VM_FAULT_NOPAGE;
        case -ENOMEM:
                return VM_FAULT_OOM;
        default:
+               WARN_ON_ONCE(ret);
                return VM_FAULT_SIGBUS;
        }
  }
@@@ -1950,11 -1955,12 +1955,12 @@@ i915_gem_next_request_seqno(struct inte
  int
  i915_add_request(struct intel_ring_buffer *ring,
                 struct drm_file *file,
-                struct drm_i915_gem_request *request)
+                u32 *out_seqno)
  {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       uint32_t seqno;
+       struct drm_i915_gem_request *request;
        u32 request_ring_position;
+       u32 seqno;
        int was_empty;
        int ret;
  
        if (ret)
                return ret;
  
-       if (request == NULL) {
-               request = kmalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return -ENOMEM;
-       }
+       request = kmalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
  
        seqno = i915_gem_next_request_seqno(ring);
  
        if (!dev_priv->mm.suspended) {
                if (i915_enable_hangcheck) {
                        mod_timer(&dev_priv->hangcheck_timer,
 -                                jiffies +
 -                                msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 +                                round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
                }
                if (was_empty) {
                        queue_delayed_work(dev_priv->wq,
 -                                         &dev_priv->mm.retire_work, HZ);
 +                                         &dev_priv->mm.retire_work,
 +                                         round_jiffies_up_relative(HZ));
                        intel_mark_busy(dev_priv->dev);
                }
        }
  
+       if (out_seqno)
+               *out_seqno = seqno;
        return 0;
  }
  
@@@ -2209,8 -2215,7 +2215,8 @@@ i915_gem_retire_work_handler(struct wor
  
        /* Come back later if the device is busy... */
        if (!mutex_trylock(&dev->struct_mutex)) {
 -              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 +              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 +                                 round_jiffies_up_relative(HZ));
                return;
        }
  
        }
  
        if (!dev_priv->mm.suspended && !idle)
 -              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 +              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 +                                 round_jiffies_up_relative(HZ));
        if (idle)
                intel_mark_idle(dev);
  
@@@ -3187,10 -3191,10 +3193,10 @@@ int i915_gem_object_set_cache_level(str
        return 0;
  }
  
- int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file)
+ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file)
  {
-       struct drm_i915_gem_cacheing *args = data;
+       struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
  
                goto unlock;
        }
  
-       args->cacheing = obj->cache_level != I915_CACHE_NONE;
+       args->caching = obj->cache_level != I915_CACHE_NONE;
  
        drm_gem_object_unreference(&obj->base);
  unlock:
        return ret;
  }
  
- int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file)
+ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file)
  {
-       struct drm_i915_gem_cacheing *args = data;
+       struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
        int ret;
  
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-       switch (args->cacheing) {
-       case I915_CACHEING_NONE:
+       switch (args->caching) {
+       case I915_CACHING_NONE:
                level = I915_CACHE_NONE;
                break;
-       case I915_CACHEING_CACHED:
+       case I915_CACHING_CACHED:
                level = I915_CACHE_LLC;
                break;
        default:
                return -EINVAL;
        }
  
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
@@@ -3956,6 -3960,9 +3962,9 @@@ i915_gem_init_hw(struct drm_device *dev
        if (!intel_enable_gtt())
                return -EIO;
  
+       if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+               I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
        i915_gem_l3_remap(dev);
  
        i915_gem_init_swizzling(dev);
@@@ -4095,7 -4102,6 +4104,6 @@@ i915_gem_entervt_ioctl(struct drm_devic
        }
  
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
-       BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        mutex_unlock(&dev->struct_mutex);
  
        ret = drm_irq_install(dev);
@@@ -26,9 -26,8 +26,8 @@@
   *
   */
  
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
@@@ -801,7 -800,6 +800,7 @@@ i915_gem_do_execbuffer(struct drm_devic
        u32 exec_start, exec_len;
        u32 seqno;
        u32 mask;
 +      u32 flags;
        int ret, mode, i;
  
        if (!i915_gem_check_execbuffer(args)) {
        if (ret)
                return ret;
  
 +      flags = 0;
 +      if (args->flags & I915_EXEC_SECURE) {
 +              if (!file->is_master || !capable(CAP_SYS_ADMIN))
 +                  return -EPERM;
 +
 +              flags |= I915_DISPATCH_SECURE;
 +      }
 +
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
        case I915_EXEC_RENDER:
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  
 +      /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 +       * batch" bit. Hence we need to pin secure batches into the global gtt.
 +       * hsw should have this fixed, but let's be paranoid and do it
 +       * unconditionally for now. */
 +      if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
 +              i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 +
        ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
        if (ret)
                goto err;
                        goto err;
        }
  
 -      trace_i915_gem_ring_dispatch(ring, seqno);
 +      trace_i915_gem_ring_dispatch(ring, seqno, flags);
  
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
        exec_len = args->batch_len;
                                goto err;
  
                        ret = ring->dispatch_execbuffer(ring,
 -                                                      exec_start, exec_len);
 +                                                      exec_start, exec_len,
 +                                                      flags);
                        if (ret)
                                goto err;
                }
        } else {
 -              ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
 +              ret = ring->dispatch_execbuffer(ring,
 +                                              exec_start, exec_len,
 +                                              flags);
                if (ret)
                        goto err;
        }
@@@ -30,9 -30,8 +30,8 @@@
  
  #include <linux/sysrq.h>
  #include <linux/slab.h>
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
@@@ -353,7 -352,8 +352,7 @@@ static void notify_ring(struct drm_devi
        if (i915_enable_hangcheck) {
                dev_priv->hangcheck_count = 0;
                mod_timer(&dev_priv->hangcheck_timer,
 -                        jiffies +
 -                        msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 +                        round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
        }
  }
  
@@@ -520,7 -520,7 +519,7 @@@ static void gen6_queue_rps_work(struct 
        queue_work(dev_priv->wq, &dev_priv->rps.work);
  }
  
 -static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@@ -606,9 -606,6 +605,9 @@@ static void ibx_irq_handler(struct drm_
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 +      if (pch_iir & SDE_HOTPLUG_MASK)
 +              queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 +
        if (pch_iir & SDE_AUDIO_POWER_MASK)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
                                 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@@ -649,9 -646,6 +648,9 @@@ static void cpt_irq_handler(struct drm_
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 +      if (pch_iir & SDE_HOTPLUG_MASK_CPT)
 +              queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 +
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
                                 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
                                         I915_READ(FDI_RX_IIR(pipe)));
  }
  
 -static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
                        intel_opregion_gse_intr(dev);
  
                for (i = 0; i < 3; i++) {
+                       if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+                               drm_handle_vblank(dev, i);
                        if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
                                intel_prepare_page_flip(dev, i);
                                intel_finish_page_flip_plane(dev, i);
                        }
-                       if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-                               drm_handle_vblank(dev, i);
                }
  
                /* check event from PCH */
                if (de_iir & DE_PCH_EVENT_IVB) {
                        u32 pch_iir = I915_READ(SDEIIR);
  
 -                      if (pch_iir & SDE_HOTPLUG_MASK_CPT)
 -                              queue_work(dev_priv->wq, &dev_priv->hotplug_work);
                        cpt_irq_handler(dev, pch_iir);
  
                        /* clear PCH hotplug event before clear CPU irq */
@@@ -749,12 -745,13 +748,12 @@@ static void ilk_gt_irq_handler(struct d
                notify_ring(dev, &dev_priv->ring[VCS]);
  }
  
 -static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 -      u32 hotplug_mask;
  
        atomic_inc(&dev_priv->irq_received);
  
            (!IS_GEN6(dev) || pm_iir == 0))
                goto done;
  
 -      if (HAS_PCH_CPT(dev))
 -              hotplug_mask = SDE_HOTPLUG_MASK_CPT;
 -      else
 -              hotplug_mask = SDE_HOTPLUG_MASK;
 -
        ret = IRQ_HANDLED;
  
        if (IS_GEN5(dev))
        if (de_iir & DE_GSE)
                intel_opregion_gse_intr(dev);
  
+       if (de_iir & DE_PIPEA_VBLANK)
+               drm_handle_vblank(dev, 0);
+       if (de_iir & DE_PIPEB_VBLANK)
+               drm_handle_vblank(dev, 1);
        if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
                intel_finish_page_flip_plane(dev, 0);
                intel_finish_page_flip_plane(dev, 1);
        }
  
-       if (de_iir & DE_PIPEA_VBLANK)
-               drm_handle_vblank(dev, 0);
-       if (de_iir & DE_PIPEB_VBLANK)
-               drm_handle_vblank(dev, 1);
        /* check event from PCH */
        if (de_iir & DE_PCH_EVENT) {
 -              if (pch_iir & hotplug_mask)
 -                      queue_work(dev_priv->wq, &dev_priv->hotplug_work);
                if (HAS_PCH_CPT(dev))
                        cpt_irq_handler(dev, pch_iir);
                else
@@@ -1747,7 -1751,7 +1746,7 @@@ void i915_hangcheck_elapsed(unsigned lo
  repeat:
        /* Reset timer case chip hangs without another request being added */
        mod_timer(&dev_priv->hangcheck_timer,
 -                jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 +                round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  }
  
  /* drm_dma.h hooks
@@@ -1952,7 -1956,6 +1951,7 @@@ static int valleyview_irq_postinstall(s
        u32 enable_mask;
        u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
 +      u32 render_irqs;
        u16 msid;
  
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
  
 -      dev_priv->gt_irq_mask = ~0;
 -
 -      I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 -      I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
 -                 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
 -                 GT_GEN6_BLT_USER_INTERRUPT |
 -                 GT_GEN6_BSD_USER_INTERRUPT |
 -                 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
 -                 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
 -                 GT_PIPE_NOTIFY |
 -                 GT_RENDER_CS_ERROR_INTERRUPT |
 -                 GT_SYNC_STATUS |
 -                 GT_USER_INTERRUPT);
 +
 +      render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
 +              GEN6_BLITTER_USER_INTERRUPT;
 +      I915_WRITE(GTIER, render_irqs);
        POSTING_READ(GTIER);
  
        /* ack & enable invalid PTE error interrupts */
  #endif
  
        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 -#if 0 /* FIXME: check register definitions; some have moved */
        /* Note HDMI and DP share bits */
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
                hotplug_en |= HDMIB_HOTPLUG_INT_EN;
                hotplug_en |= HDMIC_HOTPLUG_INT_EN;
        if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
                hotplug_en |= HDMID_HOTPLUG_INT_EN;
 -      if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
 +      if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
                hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 -      if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
 +      if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
                hotplug_en |= SDVOB_HOTPLUG_INT_EN;
        if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
                hotplug_en |= CRT_HOTPLUG_INT_EN;
                hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
        }
 -#endif
  
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  
@@@ -2115,7 -2129,7 +2114,7 @@@ static int i8xx_irq_postinstall(struct 
        return 0;
  }
  
 -static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@@ -2293,7 -2307,7 +2292,7 @@@ static int i915_irq_postinstall(struct 
        return 0;
  }
  
 -static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t i915_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@@ -2531,7 -2545,7 +2530,7 @@@ static int i965_irq_postinstall(struct 
        return 0;
  }
  
 -static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
 +static irqreturn_t i965_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  #define   MI_INVALIDATE_TLB   (1<<18)
  #define   MI_INVALIDATE_BSD   (1<<7)
  #define MI_BATCH_BUFFER               MI_INSTR(0x30, 1)
 -#define   MI_BATCH_NON_SECURE (1)
 -#define   MI_BATCH_NON_SECURE_I965 (1<<8)
 +#define   MI_BATCH_NON_SECURE         (1)
 +/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
 +#define   MI_BATCH_NON_SECURE_I965    (1<<8)
 +#define   MI_BATCH_PPGTT_HSW          (1<<8)
 +#define   MI_BATCH_NON_SECURE_HSW     (1<<13)
  #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
  #define   MI_BATCH_GTT                    (2<<6) /* aliased with (1<<7) on gen4 */
  #define MI_SEMAPHORE_MBOX     MI_INSTR(0x16, 1) /* gen6+ */
  #define   DPIO_PLL_MODESEL_SHIFT      24 /* 3 bits */
  #define   DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
  #define   DPIO_PLL_REFCLK_SEL_SHIFT   16 /* 2 bits */
 +#define   DPIO_PLL_REFCLK_SEL_MASK    3
  #define   DPIO_DRIVER_CTL_SHIFT               12 /* always set to 0x8 */
  #define   DPIO_CLK_BIAS_CTL_SHIFT     8 /* always set to 0x5 */
  #define _DPIO_REFSFR_B                        0x8034
  
  #define DPIO_FASTCLK_DISABLE          0x8100
  
 +#define DPIO_DATA_CHANNEL1            0x8220
 +#define DPIO_DATA_CHANNEL2            0x8420
 +
  /*
   * Fence registers
   */
   */
  # define _3D_CHICKEN2_WM_READ_PIPELINED                       (1 << 14)
  #define _3D_CHICKEN3  0x02090
- #define  _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
 +#define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL           (1 << 10)
+ #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL                (1 << 5)
  
  #define MI_MODE               0x0209c
  # define VS_TIMER_DISPATCH                            (1 << 6)
  # define MI_FLUSH_ENABLE                              (1 << 12)
  
+ #define GEN6_GT_MODE  0x20d0
+ #define   GEN6_GT_MODE_HI     (1 << 9)
  #define GFX_MODE      0x02520
  #define GFX_MODE_GEN7 0x0229c
  #define RING_MODE_GEN7(ring)  ((ring)->mmio_base+0x29c)
  
  /* Video Data Island Packet control */
  #define VIDEO_DIP_DATA                0x61178
+ /* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+  * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+  * of the infoframe structure specified by CEA-861. */
+ #define   VIDEO_DIP_DATA_SIZE 32
  #define VIDEO_DIP_CTL         0x61170
  /* Pre HSW: */
  #define   VIDEO_DIP_ENABLE            (1 << 31)
  #define   PIPECONF_GAMMA              (1<<24)
  #define   PIPECONF_FORCE_BORDER       (1<<25)
  #define   PIPECONF_INTERLACE_MASK     (7 << 21)
 +#define   PIPECONF_INTERLACE_MASK_HSW (3 << 21)
  /* Note that pre-gen3 does not support interlaced display directly. Panel
   * fitting must be disabled on pre-ilk for interlaced. */
  #define   PIPECONF_PROGRESSIVE                        (0 << 21)
  #define DISPLAY_PORT_PLL_BIOS_1         0x46010
  #define DISPLAY_PORT_PLL_BIOS_2         0x46014
  
 -#define PCH_DSPCLK_GATE_D     0x42020
 -# define DPFCUNIT_CLOCK_GATE_DISABLE          (1 << 9)
 -# define DPFCRUNIT_CLOCK_GATE_DISABLE         (1 << 8)
 -# define DPFDUNIT_CLOCK_GATE_DISABLE          (1 << 7)
 -# define DPARBUNIT_CLOCK_GATE_DISABLE         (1 << 5)
 -
  #define PCH_3DCGDIS0          0x46020
  # define MARIUNIT_CLOCK_GATE_DISABLE          (1 << 18)
  # define SVSMUNIT_CLOCK_GATE_DISABLE          (1 << 1)
  #define  ILK_HDCP_DISABLE             (1<<25)
  #define  ILK_eDP_A_DISABLE            (1<<24)
  #define  ILK_DESKTOP                  (1<<23)
 -#define ILK_DSPCLK_GATE               0x42020
 -#define  IVB_VRHUNIT_CLK_GATE (1<<28)
 -#define  ILK_DPARB_CLK_GATE   (1<<5)
 -#define  ILK_DPFD_CLK_GATE    (1<<7)
  
 -/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
 -#define   ILK_CLK_FBC         (1<<7)
 -#define   ILK_DPFC_DIS1               (1<<8)
 -#define   ILK_DPFC_DIS2               (1<<9)
 +#define ILK_DSPCLK_GATE_D                     0x42020
 +#define   ILK_VRHUNIT_CLOCK_GATE_DISABLE      (1 << 28)
 +#define   ILK_DPFCUNIT_CLOCK_GATE_DISABLE     (1 << 9)
 +#define   ILK_DPFCRUNIT_CLOCK_GATE_DISABLE    (1 << 8)
 +#define   ILK_DPFDUNIT_CLOCK_GATE_ENABLE      (1 << 7)
 +#define   ILK_DPARBUNIT_CLOCK_GATE_ENABLE     (1 << 5)
  
  #define IVB_CHICKEN3  0x4200c
  # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE    (1 << 5)
  #define GEN7_L3_CHICKEN_MODE_REGISTER         0xB030
  #define  GEN7_WA_L3_CHICKEN_MODE                              0x20000000
  
 +#define GEN7_L3SQCREG4                                0xb034
 +#define  L3SQ_URB_READ_CAM_MATCH_DISABLE      (1<<27)
 +
  /* WaCatErrorRejectionIssue */
  #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG                0x9030
  #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB     (1<<11)
  
 +#define HSW_FUSE_STRAP                0x42014
 +#define  HSW_CDCLK_LIMIT      (1 << 24)
 +
  /* PCH */
  
  /* south display engine interrupt: IBX */
  #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
  #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
  
 -#define VLV_VIDEO_DIP_CTL_A           0x60220
 +#define VLV_VIDEO_DIP_CTL_A           0x60200
  #define VLV_VIDEO_DIP_DATA_A          0x60208
  #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A  0x60210
  
  #define  FORCEWAKE_ACK_HSW                    0x130044
  #define  FORCEWAKE_ACK                                0x130090
  #define  FORCEWAKE_MT                         0xa188 /* multi-threaded */
 +#define   FORCEWAKE_KERNEL                    0x1
 +#define   FORCEWAKE_USER                      0x2
  #define  FORCEWAKE_MT_ACK                     0x130040
  #define  ECOBUS                                       0xa180
  #define    FORCEWAKE_MT_ENABLE                        (1<<5)
  #define   GEN6_READ_OC_PARAMS                 0xc
  #define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE     0x8
  #define   GEN6_PCODE_READ_MIN_FREQ_TABLE      0x9
 +#define         GEN6_PCODE_WRITE_RC6VIDS              0x4
 +#define         GEN6_PCODE_READ_RC6VIDS               0x5
 +#define   GEN6_ENCODE_RC6_VID(mv)             (((mv) / 5) - 245) < 0 ?: 0
 +#define   GEN6_DECODE_RC6_VID(vids)           (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
  #define GEN6_PCODE_DATA                               0x138128
  #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT      8
  
  /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
  #define  PIPE_DDI_PORT_MASK           (7<<28)
  #define  PIPE_DDI_SELECT_PORT(x)      ((x)<<28)
 +#define  PIPE_DDI_PORT_NONE           (0<<28)
  #define  PIPE_DDI_MODE_SELECT_MASK    (7<<24)
  #define  PIPE_DDI_MODE_SELECT_HDMI    (0<<24)
  #define  PIPE_DDI_MODE_SELECT_DVI     (1<<24)
  #define  DP_TP_CTL_LINK_TRAIN_MASK            (7<<8)
  #define  DP_TP_CTL_LINK_TRAIN_PAT1            (0<<8)
  #define  DP_TP_CTL_LINK_TRAIN_PAT2            (1<<8)
 +#define  DP_TP_CTL_LINK_TRAIN_PAT3            (4<<8)
 +#define  DP_TP_CTL_LINK_TRAIN_IDLE            (2<<8)
  #define  DP_TP_CTL_LINK_TRAIN_NORMAL          (3<<8)
 +#define  DP_TP_CTL_SCRAMBLE_DISABLE           (1<<7)
  
  /* DisplayPort Transport Status */
  #define DP_TP_STATUS_A                        0x64044
  #define DP_TP_STATUS_B                        0x64144
  #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
 +#define  DP_TP_STATUS_IDLE_DONE               (1<<25)
  #define  DP_TP_STATUS_AUTOTRAIN_DONE  (1<<12)
  
  /* DDI Buffer Control */
  /* SPLL */
  #define SPLL_CTL                      0x46020
  #define  SPLL_PLL_ENABLE              (1<<31)
 -#define  SPLL_PLL_SCC                 (1<<28)
 -#define  SPLL_PLL_NON_SCC             (2<<28)
 +#define  SPLL_PLL_SSC                 (1<<28)
 +#define  SPLL_PLL_NON_SSC             (2<<28)
  #define  SPLL_PLL_FREQ_810MHz         (0<<26)
  #define  SPLL_PLL_FREQ_1350MHz                (1<<26)
  
  #define WRPLL_CTL2                    0x46060
  #define  WRPLL_PLL_ENABLE             (1<<31)
  #define  WRPLL_PLL_SELECT_SSC         (0x01<<28)
 -#define  WRPLL_PLL_SELECT_NON_SCC     (0x02<<28)
 +#define  WRPLL_PLL_SELECT_NON_SSC     (0x02<<28)
  #define  WRPLL_PLL_SELECT_LCPLL_2700  (0x03<<28)
  /* WRPLL divider programming */
  #define  WRPLL_DIVIDER_REFERENCE(x)   ((x)<<0)
  #define  PORT_CLK_SEL_SPLL            (3<<29)
  #define  PORT_CLK_SEL_WRPLL1          (4<<29)
  #define  PORT_CLK_SEL_WRPLL2          (5<<29)
 +#define  PORT_CLK_SEL_NONE            (7<<29)
  
  /* Pipe clock selection */
  #define PIPE_CLK_SEL_A                        0x46140
  #define  PIPE_CLK_SEL_DISABLED                (0x0<<29)
  #define  PIPE_CLK_SEL_PORT(x)         ((x+1)<<29)
  
 +#define _PIPEA_MSA_MISC                       0x60410
 +#define _PIPEB_MSA_MISC                       0x61410
 +#define PIPE_MSA_MISC(pipe) _PIPE(pipe, _PIPEA_MSA_MISC, _PIPEB_MSA_MISC)
 +#define  PIPE_MSA_SYNC_CLK            (1<<0)
 +#define  PIPE_MSA_6_BPC                       (0<<5)
 +#define  PIPE_MSA_8_BPC                       (1<<5)
 +#define  PIPE_MSA_10_BPC              (2<<5)
 +#define  PIPE_MSA_12_BPC              (3<<5)
 +#define  PIPE_MSA_16_BPC              (4<<5)
 +
  /* LCPLL Control */
  #define LCPLL_CTL                     0x130040
  #define  LCPLL_PLL_DISABLE            (1<<31)
  #define  LCPLL_PLL_LOCK                       (1<<30)
 +#define  LCPLL_CLK_FREQ_MASK          (3<<26)
 +#define  LCPLL_CLK_FREQ_450           (0<<26)
  #define  LCPLL_CD_CLOCK_DISABLE               (1<<25)
  #define  LCPLL_CD2X_CLOCK_DISABLE     (1<<23)
 +#define  LCPLL_CD_SOURCE_FCLK         (1<<21)
  
  /* Pipe WM_LINETIME - watermark line time */
  #define PIPE_WM_LINETIME_A            0x45270
@@@ -24,9 -24,8 +24,8 @@@
   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "intel_drv.h"
  #include "i915_reg.h"
  
@@@ -395,12 -394,6 +394,12 @@@ static void i915_save_modeset_reg(struc
                break;
        }
  
 +      /* CRT state */
 +      if (HAS_PCH_SPLIT(dev))
 +              dev_priv->saveADPA = I915_READ(PCH_ADPA);
 +      else
 +              dev_priv->saveADPA = I915_READ(ADPA);
 +
        return;
  }
  
@@@ -607,12 -600,6 +606,12 @@@ static void i915_restore_modeset_reg(st
        if (IS_GEN2(dev))
                I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
  
 +      /* CRT state */
 +      if (HAS_PCH_SPLIT(dev))
 +              I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
 +      else
 +              I915_WRITE(ADPA, dev_priv->saveADPA);
 +
        return;
  }
  
@@@ -627,6 -614,13 +626,6 @@@ static void i915_save_display(struct dr
        /* Don't save them in KMS mode */
        i915_save_modeset_reg(dev);
  
 -      /* CRT state */
 -      if (HAS_PCH_SPLIT(dev)) {
 -              dev_priv->saveADPA = I915_READ(PCH_ADPA);
 -      } else {
 -              dev_priv->saveADPA = I915_READ(ADPA);
 -      }
 -
        /* LVDS state */
        if (HAS_PCH_SPLIT(dev)) {
                dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
                dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
        }
  
 -      /* Display Port state */
 -      if (SUPPORTS_INTEGRATED_DP(dev)) {
 -              dev_priv->saveDP_B = I915_READ(DP_B);
 -              dev_priv->saveDP_C = I915_READ(DP_C);
 -              dev_priv->saveDP_D = I915_READ(DP_D);
 -              dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
 -              dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
 -              dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
 -              dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
 -              dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
 -              dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
 -              dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
 -              dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              /* Display Port state */
 +              if (SUPPORTS_INTEGRATED_DP(dev)) {
 +                      dev_priv->saveDP_B = I915_READ(DP_B);
 +                      dev_priv->saveDP_C = I915_READ(DP_C);
 +                      dev_priv->saveDP_D = I915_READ(DP_D);
 +                      dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
 +                      dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
 +                      dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
 +                      dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
 +                      dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
 +                      dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
 +                      dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
 +                      dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
 +              }
 +              /* FIXME: save TV & SDVO state */
        }
 -      /* FIXME: save TV & SDVO state */
  
        /* Only save FBC state on the platform that supports FBC */
        if (I915_HAS_FBC(dev)) {
@@@ -710,24 -702,28 +709,24 @@@ static void i915_restore_display(struc
        /* Display arbitration */
        I915_WRITE(DSPARB, dev_priv->saveDSPARB);
  
 -      /* Display port ratios (must be done before clock is set) */
 -      if (SUPPORTS_INTEGRATED_DP(dev)) {
 -              I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
 -              I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
 -              I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
 -              I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
 -              I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
 -              I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
 -              I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
 -              I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              /* Display port ratios (must be done before clock is set) */
 +              if (SUPPORTS_INTEGRATED_DP(dev)) {
 +                      I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
 +                      I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
 +                      I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
 +                      I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
 +                      I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
 +                      I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
 +                      I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
 +                      I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
 +              }
        }
  
        /* This is only meaningful in non-KMS mode */
        /* Don't restore them in KMS mode */
        i915_restore_modeset_reg(dev);
  
 -      /* CRT state */
 -      if (HAS_PCH_SPLIT(dev))
 -              I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
 -      else
 -              I915_WRITE(ADPA, dev_priv->saveADPA);
 -
        /* LVDS state */
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
                I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
        }
  
 -      /* Display Port state */
 -      if (SUPPORTS_INTEGRATED_DP(dev)) {
 -              I915_WRITE(DP_B, dev_priv->saveDP_B);
 -              I915_WRITE(DP_C, dev_priv->saveDP_C);
 -              I915_WRITE(DP_D, dev_priv->saveDP_D);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              /* Display Port state */
 +              if (SUPPORTS_INTEGRATED_DP(dev)) {
 +                      I915_WRITE(DP_B, dev_priv->saveDP_B);
 +                      I915_WRITE(DP_C, dev_priv->saveDP_C);
 +                      I915_WRITE(DP_D, dev_priv->saveDP_D);
 +              }
 +              /* FIXME: restore TV & SDVO state */
        }
 -      /* FIXME: restore TV & SDVO state */
  
        /* only restore FBC info on the platform that supports FBC*/
        intel_disable_fbc(dev);
@@@ -813,27 -807,24 +812,27 @@@ int i915_save_state(struct drm_device *
        mutex_lock(&dev->struct_mutex);
  
        /* Hardware status page */
 -      dev_priv->saveHWS = I915_READ(HWS_PGA);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              dev_priv->saveHWS = I915_READ(HWS_PGA);
  
        i915_save_display(dev);
  
 -      /* Interrupt state */
 -      if (HAS_PCH_SPLIT(dev)) {
 -              dev_priv->saveDEIER = I915_READ(DEIER);
 -              dev_priv->saveDEIMR = I915_READ(DEIMR);
 -              dev_priv->saveGTIER = I915_READ(GTIER);
 -              dev_priv->saveGTIMR = I915_READ(GTIMR);
 -              dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
 -              dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
 -              dev_priv->saveMCHBAR_RENDER_STANDBY =
 -                      I915_READ(RSTDBYCTL);
 -              dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
 -      } else {
 -              dev_priv->saveIER = I915_READ(IER);
 -              dev_priv->saveIMR = I915_READ(IMR);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              /* Interrupt state */
 +              if (HAS_PCH_SPLIT(dev)) {
 +                      dev_priv->saveDEIER = I915_READ(DEIER);
 +                      dev_priv->saveDEIMR = I915_READ(DEIMR);
 +                      dev_priv->saveGTIER = I915_READ(GTIER);
 +                      dev_priv->saveGTIMR = I915_READ(GTIMR);
 +                      dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
 +                      dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
 +                      dev_priv->saveMCHBAR_RENDER_STANDBY =
 +                              I915_READ(RSTDBYCTL);
 +                      dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
 +              } else {
 +                      dev_priv->saveIER = I915_READ(IER);
 +                      dev_priv->saveIMR = I915_READ(IMR);
 +              }
        }
  
        intel_disable_gt_powersave(dev);
@@@ -867,25 -858,22 +866,25 @@@ int i915_restore_state(struct drm_devic
        mutex_lock(&dev->struct_mutex);
  
        /* Hardware status page */
 -      I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              I915_WRITE(HWS_PGA, dev_priv->saveHWS);
  
        i915_restore_display(dev);
  
 -      /* Interrupt state */
 -      if (HAS_PCH_SPLIT(dev)) {
 -              I915_WRITE(DEIER, dev_priv->saveDEIER);
 -              I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 -              I915_WRITE(GTIER, dev_priv->saveGTIER);
 -              I915_WRITE(GTIMR, dev_priv->saveGTIMR);
 -              I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
 -              I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
 -              I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
 -      } else {
 -              I915_WRITE(IER, dev_priv->saveIER);
 -              I915_WRITE(IMR, dev_priv->saveIMR);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              /* Interrupt state */
 +              if (HAS_PCH_SPLIT(dev)) {
 +                      I915_WRITE(DEIER, dev_priv->saveDEIER);
 +                      I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 +                      I915_WRITE(GTIER, dev_priv->saveGTIER);
 +                      I915_WRITE(GTIMR, dev_priv->saveGTIMR);
 +                      I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
 +                      I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
 +                      I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
 +              } else {
 +                      I915_WRITE(IER, dev_priv->saveIER);
 +                      I915_WRITE(IMR, dev_priv->saveIMR);
 +              }
        }
  
        /* Cache mode state */
  #include <linux/dmi.h>
  #include <linux/i2c.h>
  #include <linux/slab.h>
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc.h"
- #include "drm_crtc_helper.h"
- #include "drm_edid.h"
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_edid.h>
  #include "intel_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
  /* Here's the desired hotplug mode */
@@@ -235,11 -234,7 +234,11 @@@ static void intel_crt_mode_set(struct d
                           dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
        }
  
 -      adpa = ADPA_HOTPLUG_BITS;
 +      if (HAS_PCH_SPLIT(dev))
 +              adpa = ADPA_HOTPLUG_BITS;
 +      else
 +              adpa = 0;
 +
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                adpa |= ADPA_HSYNC_ACTIVE_HIGH;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@@ -662,22 -657,10 +661,22 @@@ static int intel_crt_set_property(struc
  static void intel_crt_reset(struct drm_connector *connector)
  {
        struct drm_device *dev = connector->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crt *crt = intel_attached_crt(connector);
  
 -      if (HAS_PCH_SPLIT(dev))
 +      if (HAS_PCH_SPLIT(dev)) {
 +              u32 adpa;
 +
 +              adpa = I915_READ(PCH_ADPA);
 +              adpa &= ~ADPA_CRT_HOTPLUG_MASK;
 +              adpa |= ADPA_HOTPLUG_BITS;
 +              I915_WRITE(PCH_ADPA, adpa);
 +              POSTING_READ(PCH_ADPA);
 +
 +              DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
                crt->force_hotplug_required = 1;
 +      }
 +
  }
  
  /*
@@@ -796,6 -779,18 +795,6 @@@ void intel_crt_init(struct drm_device *
         * Configure the automatic hotplug detection stuff
         */
        crt->force_hotplug_required = 0;
 -      if (HAS_PCH_SPLIT(dev)) {
 -              u32 adpa;
 -
 -              adpa = I915_READ(PCH_ADPA);
 -              adpa &= ~ADPA_CRT_HOTPLUG_MASK;
 -              adpa |= ADPA_HOTPLUG_BITS;
 -              I915_WRITE(PCH_ADPA, adpa);
 -              POSTING_READ(PCH_ADPA);
 -
 -              DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
 -              crt->force_hotplug_required = 1;
 -      }
  
        dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
  }
  #include <linux/slab.h>
  #include <linux/vgaarb.h>
  #include <drm/drm_edid.h>
- #include "drmP.h"
+ #include <drm/drmP.h>
  #include "intel_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
- #include "drm_dp_helper.h"
- #include "drm_crtc_helper.h"
+ #include <drm/drm_dp_helper.h>
+ #include <drm/drm_crtc_helper.h>
  #include <linux/dma_remapping.h>
  
  #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@@ -380,7 -380,7 +380,7 @@@ static const intel_limit_t intel_limits
  
  static const intel_limit_t intel_limits_vlv_hdmi = {
        .dot = { .min = 20000, .max = 165000 },
 -      .vco = { .min = 5994000, .max = 4000000 },
 +      .vco = { .min = 4000000, .max = 5994000},
        .n = { .min = 1, .max = 7 },
        .m = { .min = 60, .max = 300 }, /* guess */
        .m1 = { .min = 2, .max = 3 },
  };
  
  static const intel_limit_t intel_limits_vlv_dp = {
 -      .dot = { .min = 162000, .max = 270000 },
 -      .vco = { .min = 5994000, .max = 4000000 },
 +      .dot = { .min = 25000, .max = 270000 },
 +      .vco = { .min = 4000000, .max = 6000000 },
        .n = { .min = 1, .max = 7 },
 -      .m = { .min = 60, .max = 300 }, /* guess */
 +      .m = { .min = 22, .max = 450 },
        .m1 = { .min = 2, .max = 3 },
        .m2 = { .min = 11, .max = 156 },
        .p = { .min = 10, .max = 30 },
@@@ -2806,13 -2806,34 +2806,34 @@@ static void ironlake_fdi_disable(struc
        udelay(100);
  }
  
+ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+       bool pending;
+       if (atomic_read(&dev_priv->mm.wedged))
+               return false;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       pending = to_intel_crtc(crtc)->unpin_work != NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       return pending;
+ }
  static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (crtc->fb == NULL)
                return;
  
+       wait_event(dev_priv->pending_flip_queue,
+                  !intel_crtc_has_pending_flip(crtc));
        mutex_lock(&dev->struct_mutex);
        intel_finish_fb(crtc->fb);
        mutex_unlock(&dev->struct_mutex);
@@@ -3197,9 -3218,6 +3218,9 @@@ static void ironlake_crtc_enable(struc
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
  
 +      if (IS_HASWELL(dev))
 +              intel_ddi_enable_pipe_clock(intel_crtc);
 +
        /* Enable panel fitting for LVDS */
        if (dev_priv->pch_pf_size &&
            (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
         */
        intel_crtc_load_lut(crtc);
  
 +      if (IS_HASWELL(dev)) {
 +              intel_ddi_set_pipe_settings(crtc);
 +              intel_ddi_enable_pipe_func(crtc);
 +      }
 +
        intel_enable_pipe(dev_priv, pipe, is_pch_port);
        intel_enable_plane(dev_priv, plane, pipe);
  
  
        if (HAS_PCH_CPT(dev))
                intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+       /*
+        * There seems to be a race in PCH platform hw (at least on some
+        * outputs) where an enabled pipe still completes any pageflip right
+        * away (as if the pipe is off) instead of waiting for vblank. As soon
+        * as the first vblank happend, everything works as expected. Hence just
+        * wait for one vblank before returning to avoid strange things
+        * happening.
+        */
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
  }
  
  static void ironlake_crtc_disable(struct drm_crtc *crtc)
  
        intel_disable_pipe(dev_priv, pipe);
  
 +      if (IS_HASWELL(dev))
 +              intel_ddi_disable_pipe_func(dev_priv, pipe);
 +
        /* Disable PF */
        I915_WRITE(PF_CTL(pipe), 0);
        I915_WRITE(PF_WIN_SZ(pipe), 0);
  
 +      if (IS_HASWELL(dev))
 +              intel_ddi_disable_pipe_clock(intel_crtc);
 +
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
@@@ -3334,11 -3351,6 +3365,11 @@@ static void ironlake_crtc_off(struct dr
        intel_put_pch_pll(intel_crtc);
  }
  
 +static void haswell_crtc_off(struct drm_crtc *crtc)
 +{
 +      intel_ddi_put_crtc_pll(crtc);
 +}
 +
  static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
  {
        if (!enable && intel_crtc->overlay) {
@@@ -4038,7 -4050,7 +4069,7 @@@ static void vlv_update_pll(struct drm_c
                           struct drm_display_mode *mode,
                           struct drm_display_mode *adjusted_mode,
                           intel_clock_t *clock, intel_clock_t *reduced_clock,
 -                         int refclk, int num_connectors)
 +                         int num_connectors)
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = intel_crtc->pipe;
        u32 dpll, mdiv, pdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
 -      bool is_hdmi;
 +      bool is_sdvo;
 +      u32 temp;
  
 -      is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 +      is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
 +              intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 +
 +      dpll = DPLL_VGA_MODE_DIS;
 +      dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
 +      dpll |= DPLL_REFA_CLK_ENABLE_VLV;
 +      dpll |= DPLL_INTEGRATED_CLOCK_VLV;
 +
 +      I915_WRITE(DPLL(pipe), dpll);
 +      POSTING_READ(DPLL(pipe));
  
        bestn = clock->n;
        bestm1 = clock->m1;
        bestp1 = clock->p1;
        bestp2 = clock->p2;
  
 -      /* Enable DPIO clock input */
 -      dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
 -              DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
 -      I915_WRITE(DPLL(pipe), dpll);
 -      POSTING_READ(DPLL(pipe));
 -
 +      /*
 +       * In Valleyview PLL and program lane counter registers are exposed
 +       * through DPIO interface
 +       */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
        mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
        mdiv |= ((bestn << DPIO_N_SHIFT));
  
        intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
  
 -      pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
 +      pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
                (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
 -              (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
 +              (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
 +              (5 << DPIO_CLK_BIAS_CTL_SHIFT);
        intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
  
 -      intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
 +      intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
  
        dpll |= DPLL_VCO_ENABLE;
        I915_WRITE(DPLL(pipe), dpll);
        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("DPLL %d failed to lock\n", pipe);
  
 -      if (is_hdmi) {
 -              u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
 +      intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
 +
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
 +              intel_dp_set_m_n(crtc, mode, adjusted_mode);
  
 +      I915_WRITE(DPLL(pipe), dpll);
 +
 +      /* Wait for the clocks to stabilize. */
 +      POSTING_READ(DPLL(pipe));
 +      udelay(150);
 +
 +      temp = 0;
 +      if (is_sdvo) {
 +              temp = intel_mode_get_pixel_multiplier(adjusted_mode);
                if (temp > 1)
                        temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
                else
                        temp = 0;
 -
 -              I915_WRITE(DPLL_MD(pipe), temp);
 -              POSTING_READ(DPLL_MD(pipe));
        }
 +      I915_WRITE(DPLL_MD(pipe), temp);
 +      POSTING_READ(DPLL_MD(pipe));
  
 -      intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
 +      /* Now program lane control registers */
 +      if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
 +                      || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
 +      {
 +              temp = 0x1000C4;
 +              if(pipe == 1)
 +                      temp |= (1 << 21);
 +              intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
 +      }
 +      if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
 +      {
 +              temp = 0x1000C4;
 +              if(pipe == 1)
 +                      temp |= (1 << 21);
 +              intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
 +      }
  }
  
  static void i9xx_update_pll(struct drm_crtc *crtc,
        u32 dpll;
        bool is_sdvo;
  
 +      i9xx_update_pll_dividers(crtc, clock, reduced_clock);
 +
        is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
                intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
  
  
  static void i8xx_update_pll(struct drm_crtc *crtc,
                            struct drm_display_mode *adjusted_mode,
 -                          intel_clock_t *clock,
 +                          intel_clock_t *clock, intel_clock_t *reduced_clock,
                            int num_connectors)
  {
        struct drm_device *dev = crtc->dev;
        int pipe = intel_crtc->pipe;
        u32 dpll;
  
 +      i9xx_update_pll_dividers(crtc, clock, reduced_clock);
 +
        dpll = DPLL_VGA_MODE_DIS;
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
        I915_WRITE(DPLL(pipe), dpll);
  }
  
 +static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
 +                                 struct drm_display_mode *mode,
 +                                 struct drm_display_mode *adjusted_mode)
 +{
 +      struct drm_device *dev = intel_crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      enum pipe pipe = intel_crtc->pipe;
 +      uint32_t vsyncshift;
 +
 +      if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 +              /* the chip adds 2 halflines automatically */
 +              adjusted_mode->crtc_vtotal -= 1;
 +              adjusted_mode->crtc_vblank_end -= 1;
 +              vsyncshift = adjusted_mode->crtc_hsync_start
 +                           - adjusted_mode->crtc_htotal / 2;
 +      } else {
 +              vsyncshift = 0;
 +      }
 +
 +      if (INTEL_INFO(dev)->gen > 3)
 +              I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
 +
 +      I915_WRITE(HTOTAL(pipe),
 +                 (adjusted_mode->crtc_hdisplay - 1) |
 +                 ((adjusted_mode->crtc_htotal - 1) << 16));
 +      I915_WRITE(HBLANK(pipe),
 +                 (adjusted_mode->crtc_hblank_start - 1) |
 +                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
 +      I915_WRITE(HSYNC(pipe),
 +                 (adjusted_mode->crtc_hsync_start - 1) |
 +                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
 +
 +      I915_WRITE(VTOTAL(pipe),
 +                 (adjusted_mode->crtc_vdisplay - 1) |
 +                 ((adjusted_mode->crtc_vtotal - 1) << 16));
 +      I915_WRITE(VBLANK(pipe),
 +                 (adjusted_mode->crtc_vblank_start - 1) |
 +                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
 +      I915_WRITE(VSYNC(pipe),
 +                 (adjusted_mode->crtc_vsync_start - 1) |
 +                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
 +
 +      /* pipesrc controls the size that is scaled from, which should
 +       * always be the user's requested size.
 +       */
 +      I915_WRITE(PIPESRC(pipe),
 +                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 +}
 +
  static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                              struct drm_display_mode *mode,
                              struct drm_display_mode *adjusted_mode,
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
 -      u32 dspcntr, pipeconf, vsyncshift;
 +      u32 dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false;
        bool is_lvds = false, is_tv = false, is_dp = false;
        struct intel_encoder *encoder;
        if (is_sdvo && is_tv)
                i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
  
 -      i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
 -                               &reduced_clock : NULL);
 -
        if (IS_GEN2(dev))
 -              i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
 +              i8xx_update_pll(crtc, adjusted_mode, &clock,
 +                              has_reduced_clock ? &reduced_clock : NULL,
 +                              num_connectors);
        else if (IS_VALLEYVIEW(dev))
 -              vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
 -                             refclk, num_connectors);
 +              vlv_update_pll(crtc, mode, adjusted_mode, &clock,
 +                              has_reduced_clock ? &reduced_clock : NULL,
 +                              num_connectors);
        else
                i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
                                has_reduced_clock ? &reduced_clock : NULL,
        /* default to 8bpc */
        pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
        if (is_dp) {
-               if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+               if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
                        pipeconf |= PIPECONF_BPP_6 |
                                    PIPECONF_DITHER_EN |
                                    PIPECONF_DITHER_TYPE_SP;
                }
        }
  
 +      if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
 +              if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
 +                      pipeconf |= PIPECONF_BPP_6 |
 +                                      PIPECONF_ENABLE |
 +                                      I965_PIPECONF_ACTIVE;
 +              }
 +      }
 +
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
  
  
        pipeconf &= ~PIPECONF_INTERLACE_MASK;
        if (!IS_GEN2(dev) &&
 -          adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 +          adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 -              /* the chip adds 2 halflines automatically */
 -              adjusted_mode->crtc_vtotal -= 1;
 -              adjusted_mode->crtc_vblank_end -= 1;
 -              vsyncshift = adjusted_mode->crtc_hsync_start
 -                           - adjusted_mode->crtc_htotal/2;
 -      } else {
 +      else
                pipeconf |= PIPECONF_PROGRESSIVE;
 -              vsyncshift = 0;
 -      }
 -
 -      if (!IS_GEN3(dev))
 -              I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
 -
 -      I915_WRITE(HTOTAL(pipe),
 -                 (adjusted_mode->crtc_hdisplay - 1) |
 -                 ((adjusted_mode->crtc_htotal - 1) << 16));
 -      I915_WRITE(HBLANK(pipe),
 -                 (adjusted_mode->crtc_hblank_start - 1) |
 -                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
 -      I915_WRITE(HSYNC(pipe),
 -                 (adjusted_mode->crtc_hsync_start - 1) |
 -                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
  
 -      I915_WRITE(VTOTAL(pipe),
 -                 (adjusted_mode->crtc_vdisplay - 1) |
 -                 ((adjusted_mode->crtc_vtotal - 1) << 16));
 -      I915_WRITE(VBLANK(pipe),
 -                 (adjusted_mode->crtc_vblank_start - 1) |
 -                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
 -      I915_WRITE(VSYNC(pipe),
 -                 (adjusted_mode->crtc_vsync_start - 1) |
 -                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
 +      intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
  
        /* pipesrc and dspsize control the size that is scaled from,
         * which should always be the user's requested size.
                   ((mode->vdisplay - 1) << 16) |
                   (mode->hdisplay - 1));
        I915_WRITE(DSPPOS(plane), 0);
 -      I915_WRITE(PIPESRC(pipe),
 -                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
  
        I915_WRITE(PIPECONF(pipe), pipeconf);
        POSTING_READ(PIPECONF(pipe));
@@@ -4710,8 -4657,8 +4741,8 @@@ static void ironlake_set_pipeconf(struc
                val |= PIPE_12BPC;
                break;
        default:
 -              val |= PIPE_8BPC;
 -              break;
 +              /* Case prevented by intel_choose_pipe_bpp_dither. */
 +              BUG();
        }
  
        val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
        POSTING_READ(PIPECONF(pipe));
  }
  
 +static void haswell_set_pipeconf(struct drm_crtc *crtc,
 +                               struct drm_display_mode *adjusted_mode,
 +                               bool dither)
 +{
 +      struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      uint32_t val;
 +
 +      val = I915_READ(PIPECONF(pipe));
 +
 +      val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
 +      if (dither)
 +              val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 +
 +      val &= ~PIPECONF_INTERLACE_MASK_HSW;
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
 +              val |= PIPECONF_INTERLACED_ILK;
 +      else
 +              val |= PIPECONF_PROGRESSIVE;
 +
 +      I915_WRITE(PIPECONF(pipe), val);
 +      POSTING_READ(PIPECONF(pipe));
 +}
 +
  static bool ironlake_compute_clocks(struct drm_crtc *crtc,
                                    struct drm_display_mode *adjusted_mode,
                                    intel_clock_t *clock,
        return true;
  }
  
 -static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 -                                struct drm_display_mode *mode,
 -                                struct drm_display_mode *adjusted_mode,
 -                                int x, int y,
 -                                struct drm_framebuffer *fb)
 +static void ironlake_set_m_n(struct drm_crtc *crtc,
 +                           struct drm_display_mode *mode,
 +                           struct drm_display_mode *adjusted_mode)
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      int plane = intel_crtc->plane;
 -      int num_connectors = 0;
 -      intel_clock_t clock, reduced_clock;
 -      u32 dpll, fp = 0, fp2 = 0;
 -      bool ok, has_reduced_clock = false, is_sdvo = false;
 -      bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 -      struct intel_encoder *encoder, *edp_encoder = NULL;
 -      int ret;
 +      enum pipe pipe = intel_crtc->pipe;
 +      struct intel_encoder *intel_encoder, *edp_encoder = NULL;
        struct fdi_m_n m_n = {0};
 -      u32 temp;
 -      int target_clock, pixel_multiplier, lane, link_bw, factor;
 -      unsigned int pipe_bpp;
 -      bool dither;
 -      bool is_cpu_edp = false, is_pch_edp = false;
 +      int target_clock, pixel_multiplier, lane, link_bw;
 +      bool is_dp = false, is_cpu_edp = false;
  
 -      for_each_encoder_on_crtc(dev, crtc, encoder) {
 -              switch (encoder->type) {
 -              case INTEL_OUTPUT_LVDS:
 -                      is_lvds = true;
 -                      break;
 -              case INTEL_OUTPUT_SDVO:
 -              case INTEL_OUTPUT_HDMI:
 -                      is_sdvo = true;
 -                      if (encoder->needs_tv_clock)
 -                              is_tv = true;
 -                      break;
 -              case INTEL_OUTPUT_TVOUT:
 -                      is_tv = true;
 -                      break;
 -              case INTEL_OUTPUT_ANALOG:
 -                      is_crt = true;
 -                      break;
 +      for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 +              switch (intel_encoder->type) {
                case INTEL_OUTPUT_DISPLAYPORT:
                        is_dp = true;
                        break;
                case INTEL_OUTPUT_EDP:
                        is_dp = true;
 -                      if (intel_encoder_is_pch_edp(&encoder->base))
 -                              is_pch_edp = true;
 -                      else
 +                      if (!intel_encoder_is_pch_edp(&intel_encoder->base))
                                is_cpu_edp = true;
 -                      edp_encoder = encoder;
 +                      edp_encoder = intel_encoder;
                        break;
                }
 -
 -              num_connectors++;
 -      }
 -
 -      ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
 -                                   &has_reduced_clock, &reduced_clock);
 -      if (!ok) {
 -              DRM_ERROR("Couldn't find PLL settings for mode!\n");
 -              return -EINVAL;
        }
  
 -      /* Ensure that the cursor is valid for the new mode before changing... */
 -      intel_crtc_update_cursor(crtc, true);
 -
        /* FDI link */
        pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
        lane = 0;
        else
                target_clock = adjusted_mode->clock;
  
 -      /* determine panel color depth */
 -      dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
 -                                            adjusted_mode);
 -      if (is_lvds && dev_priv->lvds_dither)
 -              dither = true;
 -
 -      if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
 -          pipe_bpp != 36) {
 -              WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
 -                   pipe_bpp);
 -              pipe_bpp = 24;
 -      }
 -      intel_crtc->bpp = pipe_bpp;
 -
        if (!lane) {
                /*
                 * Account for spread spectrum to avoid
        ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
                             &m_n);
  
 -      fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 -      if (has_reduced_clock)
 -              fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 -                      reduced_clock.m2;
 +      I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 +      I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 +      I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 +      I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 +}
 +
 +static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 +                                    struct drm_display_mode *adjusted_mode,
 +                                    intel_clock_t *clock, u32 fp)
 +{
 +      struct drm_crtc *crtc = &intel_crtc->base;
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_encoder *intel_encoder;
 +      uint32_t dpll;
 +      int factor, pixel_multiplier, num_connectors = 0;
 +      bool is_lvds = false, is_sdvo = false, is_tv = false;
 +      bool is_dp = false, is_cpu_edp = false;
 +
 +      for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 +              switch (intel_encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      is_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_SDVO:
 +              case INTEL_OUTPUT_HDMI:
 +                      is_sdvo = true;
 +                      if (intel_encoder->needs_tv_clock)
 +                              is_tv = true;
 +                      break;
 +              case INTEL_OUTPUT_TVOUT:
 +                      is_tv = true;
 +                      break;
 +              case INTEL_OUTPUT_DISPLAYPORT:
 +                      is_dp = true;
 +                      break;
 +              case INTEL_OUTPUT_EDP:
 +                      is_dp = true;
 +                      if (!intel_encoder_is_pch_edp(&intel_encoder->base))
 +                              is_cpu_edp = true;
 +                      break;
 +              }
 +
 +              num_connectors++;
 +      }
  
        /* Enable autotuning of the PLL clock (if permissible) */
        factor = 21;
        } else if (is_sdvo && is_tv)
                factor = 20;
  
 -      if (clock.m < factor * clock.n)
 +      if (clock->m < factor * clock->n)
                fp |= FP_CB_TUNE;
  
        dpll = 0;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
        if (is_sdvo) {
 -              int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 +              pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
                if (pixel_multiplier > 1) {
                        dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
                }
                dpll |= DPLL_DVO_HIGH_SPEED;
  
        /* compute bitmask from p1 value */
 -      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 +      dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        /* also FPA1 */
 -      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 +      dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  
 -      switch (clock.p2) {
 +      switch (clock->p2) {
        case 5:
                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
                break;
        else
                dpll |= PLL_REF_INPUT_DREFCLK;
  
 +      return dpll;
 +}
 +
 +static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 +                                struct drm_display_mode *mode,
 +                                struct drm_display_mode *adjusted_mode,
 +                                int x, int y,
 +                                struct drm_framebuffer *fb)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int plane = intel_crtc->plane;
 +      int num_connectors = 0;
 +      intel_clock_t clock, reduced_clock;
 +      u32 dpll, fp = 0, fp2 = 0;
 +      bool ok, has_reduced_clock = false;
 +      bool is_lvds = false, is_dp = false, is_cpu_edp = false;
 +      struct intel_encoder *encoder;
 +      u32 temp;
 +      int ret;
 +      bool dither;
 +
 +      for_each_encoder_on_crtc(dev, crtc, encoder) {
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      is_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_DISPLAYPORT:
 +                      is_dp = true;
 +                      break;
 +              case INTEL_OUTPUT_EDP:
 +                      is_dp = true;
 +                      if (!intel_encoder_is_pch_edp(&encoder->base))
 +                              is_cpu_edp = true;
 +                      break;
 +              }
 +
 +              num_connectors++;
 +      }
 +
 +      WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
 +           "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 +
 +      ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
 +                                   &has_reduced_clock, &reduced_clock);
 +      if (!ok) {
 +              DRM_ERROR("Couldn't find PLL settings for mode!\n");
 +              return -EINVAL;
 +      }
 +
 +      /* Ensure that the cursor is valid for the new mode before changing... */
 +      intel_crtc_update_cursor(crtc, true);
 +
 +      /* determine panel color depth */
 +      dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, mode);
 +      if (is_lvds && dev_priv->lvds_dither)
 +              dither = true;
 +
 +      fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 +      if (has_reduced_clock)
 +              fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 +                      reduced_clock.m2;
 +
 +      dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
 +
        DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
        drm_mode_debug_printmodeline(mode);
  
 -      /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
 -       * pre-Haswell/LPT generation */
 -      if (HAS_PCH_LPT(dev)) {
 -              DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
 -                              pipe);
 -      } else if (!is_cpu_edp) {
 +      /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
 +      if (!is_cpu_edp) {
                struct intel_pch_pll *pll;
  
                pll = intel_get_pch_pll(intel_crtc, dpll, fp);
                }
        }
  
 -      if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 -              /* the chip adds 2 halflines automatically */
 -              adjusted_mode->crtc_vtotal -= 1;
 -              adjusted_mode->crtc_vblank_end -= 1;
 -              I915_WRITE(VSYNCSHIFT(pipe),
 -                         adjusted_mode->crtc_hsync_start
 -                         - adjusted_mode->crtc_htotal/2);
 -      } else {
 -              I915_WRITE(VSYNCSHIFT(pipe), 0);
 -      }
 -
 -      I915_WRITE(HTOTAL(pipe),
 -                 (adjusted_mode->crtc_hdisplay - 1) |
 -                 ((adjusted_mode->crtc_htotal - 1) << 16));
 -      I915_WRITE(HBLANK(pipe),
 -                 (adjusted_mode->crtc_hblank_start - 1) |
 -                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
 -      I915_WRITE(HSYNC(pipe),
 -                 (adjusted_mode->crtc_hsync_start - 1) |
 -                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
 -
 -      I915_WRITE(VTOTAL(pipe),
 -                 (adjusted_mode->crtc_vdisplay - 1) |
 -                 ((adjusted_mode->crtc_vtotal - 1) << 16));
 -      I915_WRITE(VBLANK(pipe),
 -                 (adjusted_mode->crtc_vblank_start - 1) |
 -                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
 -      I915_WRITE(VSYNC(pipe),
 -                 (adjusted_mode->crtc_vsync_start - 1) |
 -                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
 -
 -      /* pipesrc controls the size that is scaled from, which should
 -       * always be the user's requested size.
 -       */
 -      I915_WRITE(PIPESRC(pipe),
 -                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 +      intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
  
 -      I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 -      I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 -      I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 -      I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 +      ironlake_set_m_n(crtc, mode, adjusted_mode);
  
        if (is_cpu_edp)
                ironlake_set_pll_edp(crtc, adjusted_mode->clock);
        return ret;
  }
  
 +static int haswell_crtc_mode_set(struct drm_crtc *crtc,
 +                               struct drm_display_mode *mode,
 +                               struct drm_display_mode *adjusted_mode,
 +                               int x, int y,
 +                               struct drm_framebuffer *fb)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int plane = intel_crtc->plane;
 +      int num_connectors = 0;
 +      intel_clock_t clock, reduced_clock;
 +      u32 dpll = 0, fp = 0, fp2 = 0;
 +      bool ok, has_reduced_clock = false;
 +      bool is_lvds = false, is_dp = false, is_cpu_edp = false;
 +      struct intel_encoder *encoder;
 +      u32 temp;
 +      int ret;
 +      bool dither;
 +
 +      for_each_encoder_on_crtc(dev, crtc, encoder) {
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      is_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_DISPLAYPORT:
 +                      is_dp = true;
 +                      break;
 +              case INTEL_OUTPUT_EDP:
 +                      is_dp = true;
 +                      if (!intel_encoder_is_pch_edp(&encoder->base))
 +                              is_cpu_edp = true;
 +                      break;
 +              }
 +
 +              num_connectors++;
 +      }
 +
 +      /* We are not sure yet this won't happen. */
 +      WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
 +           INTEL_PCH_TYPE(dev));
 +
 +      WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
 +           num_connectors, pipe_name(pipe));
 +
 +      WARN_ON(I915_READ(PIPECONF(pipe)) &
 +              (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
 +
 +      WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
 +
 +      if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
 +              return -EINVAL;
 +
 +      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 +              ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
 +                                           &has_reduced_clock,
 +                                           &reduced_clock);
 +              if (!ok) {
 +                      DRM_ERROR("Couldn't find PLL settings for mode!\n");
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      /* Ensure that the cursor is valid for the new mode before changing... */
 +      intel_crtc_update_cursor(crtc, true);
 +
 +      /* determine panel color depth */
 +      dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, mode);
 +      if (is_lvds && dev_priv->lvds_dither)
 +              dither = true;
 +
 +      DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 +      drm_mode_debug_printmodeline(mode);
 +
 +      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 +              fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 +              if (has_reduced_clock)
 +                      fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 +                            reduced_clock.m2;
 +
 +              dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
 +                                           fp);
 +
 +              /* CPU eDP is the only output that doesn't need a PCH PLL of its
 +               * own on pre-Haswell/LPT generation */
 +              if (!is_cpu_edp) {
 +                      struct intel_pch_pll *pll;
 +
 +                      pll = intel_get_pch_pll(intel_crtc, dpll, fp);
 +                      if (pll == NULL) {
 +                              DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
 +                                               pipe);
 +                              return -EINVAL;
 +                      }
 +              } else
 +                      intel_put_pch_pll(intel_crtc);
 +
 +              /* The LVDS pin pair needs to be on before the DPLLs are
 +               * enabled.  This is an exception to the general rule that
 +               * mode_set doesn't turn things on.
 +               */
 +              if (is_lvds) {
 +                      temp = I915_READ(PCH_LVDS);
 +                      temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 +                      if (HAS_PCH_CPT(dev)) {
 +                              temp &= ~PORT_TRANS_SEL_MASK;
 +                              temp |= PORT_TRANS_SEL_CPT(pipe);
 +                      } else {
 +                              if (pipe == 1)
 +                                      temp |= LVDS_PIPEB_SELECT;
 +                              else
 +                                      temp &= ~LVDS_PIPEB_SELECT;
 +                      }
 +
 +                      /* set the corresponsding LVDS_BORDER bit */
 +                      temp |= dev_priv->lvds_border_bits;
 +                      /* Set the B0-B3 data pairs corresponding to whether
 +                       * we're going to set the DPLLs for dual-channel mode or
 +                       * not.
 +                       */
 +                      if (clock.p2 == 7)
 +                              temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
 +                      else
 +                              temp &= ~(LVDS_B0B3_POWER_UP |
 +                                        LVDS_CLKB_POWER_UP);
 +
 +                      /* It would be nice to set 24 vs 18-bit mode
 +                       * (LVDS_A3_POWER_UP) appropriately here, but we need to
 +                       * look more thoroughly into how panels behave in the
 +                       * two modes.
 +                       */
 +                      temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
 +                      if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 +                              temp |= LVDS_HSYNC_POLARITY;
 +                      if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 +                              temp |= LVDS_VSYNC_POLARITY;
 +                      I915_WRITE(PCH_LVDS, temp);
 +              }
 +      }
 +
 +      if (is_dp && !is_cpu_edp) {
 +              intel_dp_set_m_n(crtc, mode, adjusted_mode);
 +      } else {
 +              if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 +                      /* For non-DP output, clear any trans DP clock recovery
 +                       * setting.*/
 +                      I915_WRITE(TRANSDATA_M1(pipe), 0);
 +                      I915_WRITE(TRANSDATA_N1(pipe), 0);
 +                      I915_WRITE(TRANSDPLINK_M1(pipe), 0);
 +                      I915_WRITE(TRANSDPLINK_N1(pipe), 0);
 +              }
 +      }
 +
 +      intel_crtc->lowfreq_avail = false;
 +      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 +              if (intel_crtc->pch_pll) {
 +                      I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 +
 +                      /* Wait for the clocks to stabilize. */
 +                      POSTING_READ(intel_crtc->pch_pll->pll_reg);
 +                      udelay(150);
 +
 +                      /* The pixel multiplier can only be updated once the
 +                       * DPLL is enabled and the clocks are stable.
 +                       *
 +                       * So write it again.
 +                       */
 +                      I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 +              }
 +
 +              if (intel_crtc->pch_pll) {
 +                      if (is_lvds && has_reduced_clock && i915_powersave) {
 +                              I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
 +                              intel_crtc->lowfreq_avail = true;
 +                      } else {
 +                              I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
 +                      }
 +              }
 +      }
 +
 +      intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 +
 +      if (!is_dp || is_cpu_edp)
 +              ironlake_set_m_n(crtc, mode, adjusted_mode);
 +
 +      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 +              if (is_cpu_edp)
 +                      ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 +
 +      haswell_set_pipeconf(crtc, adjusted_mode, dither);
 +
 +      /* Set up the display plane register */
 +      I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
 +      POSTING_READ(DSPCNTR(plane));
 +
 +      ret = intel_pipe_set_base(crtc, x, y, fb);
 +
 +      intel_update_watermarks(dev);
 +
 +      intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 +
 +      return ret;
 +}
 +
  static int intel_crtc_mode_set(struct drm_crtc *crtc,
                               struct drm_display_mode *mode,
                               struct drm_display_mode *adjusted_mode,
@@@ -6485,15 -6191,13 +6516,13 @@@ static void do_intel_finish_page_flip(s
        struct intel_unpin_work *work;
        struct drm_i915_gem_object *obj;
        struct drm_pending_vblank_event *e;
-       struct timeval tnow, tvbl;
+       struct timeval tvbl;
        unsigned long flags;
  
        /* Ignore early vblank irqs */
        if (intel_crtc == NULL)
                return;
  
-       do_gettimeofday(&tnow);
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
        if (work == NULL || !work->pending) {
                e = work->event;
                e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
  
-               /* Called before vblank count and timestamps have
-                * been updated for the vblank interval of flip
-                * completion? Need to increment vblank count and
-                * add one videorefresh duration to returned timestamp
-                * to account for this. We assume this happened if we
-                * get called over 0.9 frame durations after the last
-                * timestamped vblank.
-                *
-                * This calculation can not be used with vrefresh rates
-                * below 5Hz (10Hz to be on the safe side) without
-                * promoting to 64 integers.
-                */
-               if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
-                   9 * crtc->framedur_ns) {
-                       e->event.sequence++;
-                       tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
-                                            crtc->framedur_ns);
-               }
                e->event.tv_sec = tvbl.tv_sec;
                e->event.tv_usec = tvbl.tv_usec;
  
  
        atomic_clear_mask(1 << intel_crtc->plane,
                          &obj->pending_flip.counter);
-       if (atomic_read(&obj->pending_flip) == 0)
-               wake_up(&dev_priv->pending_flip_queue);
  
+       wake_up(&dev_priv->pending_flip_queue);
        schedule_work(&work->work);
  
        trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@@ -7725,12 -7409,6 +7734,12 @@@ static const struct drm_crtc_funcs inte
        .page_flip = intel_crtc_page_flip,
  };
  
 +static void intel_cpu_pll_init(struct drm_device *dev)
 +{
 +      if (IS_HASWELL(dev))
 +              intel_ddi_pll_init(dev);
 +}
 +
  static void intel_pch_pll_init(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -7919,10 -7597,6 +7928,10 @@@ static void intel_setup_outputs(struct 
        } else if (IS_VALLEYVIEW(dev)) {
                int found;
  
 +              /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
 +              if (I915_READ(DP_C) & DP_DETECTED)
 +                      intel_dp_init(dev, DP_C, PORT_C);
 +
                if (I915_READ(SDVOB) & PORT_DETECTED) {
                        /* SDVOB multiplex with HDMIB */
                        found = intel_sdvo_init(dev, SDVOB, true);
                if (I915_READ(SDVOC) & PORT_DETECTED)
                        intel_hdmi_init(dev, SDVOC, PORT_C);
  
 -              /* Shares lanes with HDMI on SDVOC */
 -              if (I915_READ(DP_C) & DP_DETECTED)
 -                      intel_dp_init(dev, DP_C, PORT_C);
        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
                bool found = false;
  
@@@ -8088,13 -7765,7 +8097,13 @@@ static void intel_init_display(struct d
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        /* We always want a DPMS function */
 -      if (HAS_PCH_SPLIT(dev)) {
 +      if (IS_HASWELL(dev)) {
 +              dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
 +              dev_priv->display.crtc_enable = ironlake_crtc_enable;
 +              dev_priv->display.crtc_disable = ironlake_crtc_disable;
 +              dev_priv->display.off = haswell_crtc_off;
 +              dev_priv->display.update_plane = ironlake_update_plane;
 +      } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@@ -8231,8 -7902,7 +8240,7 @@@ static struct intel_quirk intel_quirks[
        /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
        { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  
-       /* 855 & before need to leave pipe A & dpll A up */
-       { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+       /* 830/845 need to leave pipe A & dpll A up */
        { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  
@@@ -8345,7 -8015,6 +8353,7 @@@ void intel_modeset_init(struct drm_devi
                        DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
        }
  
 +      intel_cpu_pll_init(dev);
        intel_pch_pll_init(dev);
  
        /* Just disable it once at startup */
@@@ -8389,29 -8058,42 +8397,42 @@@ static void intel_enable_pipe_a(struct 
  
  }
  
+ static bool
+ intel_check_plane_mapping(struct intel_crtc *crtc)
+ {
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       u32 reg, val;
+       if (dev_priv->num_pipe == 1)
+               return true;
+       reg = DSPCNTR(!crtc->plane);
+       val = I915_READ(reg);
+       if ((val & DISPLAY_PLANE_ENABLE) &&
+           (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+               return false;
+       return true;
+ }
  static void intel_sanitize_crtc(struct intel_crtc *crtc)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg, val;
+       u32 reg;
  
        /* Clear any frame start delays used for debugging left by the BIOS */
        reg = PIPECONF(crtc->pipe);
        I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  
        /* We need to sanitize the plane -> pipe mapping first because this will
-        * disable the crtc (and hence change the state) if it is wrong. */
-       if (!HAS_PCH_SPLIT(dev)) {
+        * disable the crtc (and hence change the state) if it is wrong. Note
+        * that gen4+ has a fixed plane -> pipe mapping.  */
+       if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
                struct intel_connector *connector;
                bool plane;
  
-               reg = DSPCNTR(crtc->plane);
-               val = I915_READ(reg);
-               if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
-                   (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
-                       goto ok;
                DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
                              crtc->base.base.id);
  
                WARN_ON(crtc->active);
                crtc->base.enabled = false;
        }
- ok:
  
        if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
            crtc->pipe == PIPE_A && !crtc->active) {
@@@ -8547,9 -8228,6 +8567,9 @@@ void intel_modeset_setup_hw_state(struc
                              crtc->active ? "enabled" : "disabled");
        }
  
 +      if (IS_HASWELL(dev))
 +              intel_ddi_setup_hw_pll_state(dev);
 +
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
                pipe = 0;
        intel_modeset_update_staged_output_state(dev);
  
        intel_modeset_check_state(dev);
 +
 +      drm_mode_config_reset(dev);
  }
  
  void intel_modeset_gem_init(struct drm_device *dev)
  #include <linux/i2c.h>
  #include <linux/slab.h>
  #include <linux/export.h>
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc.h"
- #include "drm_crtc_helper.h"
- #include "drm_edid.h"
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_edid.h>
  #include "intel_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
  #define DP_RECEIVER_CAP_SIZE  0xf
@@@ -77,6 -76,11 +76,6 @@@ static bool is_cpu_edp(struct intel_dp 
        return is_edp(intel_dp) && !is_pch_edp(intel_dp);
  }
  
 -static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
 -{
 -      return container_of(encoder, struct intel_dp, base.base);
 -}
 -
  static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
  {
        return container_of(intel_attached_encoder(connector),
@@@ -102,6 -106,8 +101,6 @@@ bool intel_encoder_is_pch_edp(struct dr
        return is_pch_edp(intel_dp);
  }
  
 -static void intel_dp_start_link_train(struct intel_dp *intel_dp);
 -static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
  static void intel_dp_link_down(struct intel_dp *intel_dp);
  
  void
@@@ -279,10 -285,6 +278,10 @@@ intel_hrawclk(struct drm_device *dev
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t clkcfg;
  
 +      /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
 +      if (IS_VALLEYVIEW(dev))
 +              return 200;
 +
        clkcfg = I915_READ(CLKCFG);
        switch (clkcfg & CLKCFG_FSB_MASK) {
        case CLKCFG_FSB_400:
@@@ -354,29 -356,6 +353,29 @@@ intel_dp_aux_ch(struct intel_dp *intel_
        uint32_t aux_clock_divider;
        int try, precharge;
  
 +      if (IS_HASWELL(dev)) {
 +              switch (intel_dp->port) {
 +              case PORT_A:
 +                      ch_ctl = DPA_AUX_CH_CTL;
 +                      ch_data = DPA_AUX_CH_DATA1;
 +                      break;
 +              case PORT_B:
 +                      ch_ctl = PCH_DPB_AUX_CH_CTL;
 +                      ch_data = PCH_DPB_AUX_CH_DATA1;
 +                      break;
 +              case PORT_C:
 +                      ch_ctl = PCH_DPC_AUX_CH_CTL;
 +                      ch_data = PCH_DPC_AUX_CH_DATA1;
 +                      break;
 +              case PORT_D:
 +                      ch_ctl = PCH_DPD_AUX_CH_CTL;
 +                      ch_data = PCH_DPD_AUX_CH_DATA1;
 +                      break;
 +              default:
 +                      BUG();
 +              }
 +      }
 +
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
 -              if (IS_GEN6(dev) || IS_GEN7(dev))
 +              if (IS_VALLEYVIEW(dev))
 +                      aux_clock_divider = 100;
 +              else if (IS_GEN6(dev) || IS_GEN7(dev))
                        aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
@@@ -814,45 -791,23 +813,45 @@@ intel_dp_set_m_n(struct drm_crtc *crtc
        intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
                             mode->clock, adjusted_mode->clock, &m_n);
  
 -      if (HAS_PCH_SPLIT(dev)) {
 -              I915_WRITE(TRANSDATA_M1(pipe),
 -                         ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
 -                         m_n.gmch_m);
 +      if (IS_HASWELL(dev)) {
 +              I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 +              I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 +              I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 +              I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 +      } else if (HAS_PCH_SPLIT(dev)) {
 +              I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
                I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
                I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
                I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
 +      } else if (IS_VALLEYVIEW(dev)) {
 +              I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 +              I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 +              I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 +              I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
        } else {
                I915_WRITE(PIPE_GMCH_DATA_M(pipe),
 -                         ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
 -                         m_n.gmch_m);
 +                         TU_SIZE(m_n.tu) | m_n.gmch_m);
                I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
                I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
                I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
        }
  }
  
 +void intel_dp_init_link_config(struct intel_dp *intel_dp)
 +{
 +      memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
 +      intel_dp->link_configuration[0] = intel_dp->link_bw;
 +      intel_dp->link_configuration[1] = intel_dp->lane_count;
 +      intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
 +      /*
 +       * Check for DPCD version > 1.1 and enhanced framing support
 +       */
 +      if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
 +          (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
 +              intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 +      }
 +}
 +
  static void
  intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                  struct drm_display_mode *adjusted_mode)
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
                intel_write_eld(encoder, adjusted_mode);
        }
 -      memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
 -      intel_dp->link_configuration[0] = intel_dp->link_bw;
 -      intel_dp->link_configuration[1] = intel_dp->lane_count;
 -      intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
 -      /*
 -       * Check for DPCD version > 1.1 and enhanced framing support
 -       */
 -      if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
 -          (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
 -              intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 -      }
 +
 +      intel_dp_init_link_config(intel_dp);
  
        /* Split out the IBX/CPU vs CPT settings */
  
 -      if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
 +      if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        intel_dp->DP |= DP_SYNC_HS_HIGH;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@@ -1264,7 -1228,7 +1263,7 @@@ static void ironlake_edp_pll_off(struc
  }
  
  /* If the sink supports it, try to set the power state appropriately */
 -static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 +void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  {
        int ret, i;
  
@@@ -1499,19 -1463,7 +1498,19 @@@ intel_dp_pre_emphasis_max(struct intel_
  {
        struct drm_device *dev = intel_dp->base.base.dev;
  
 -      if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
 +      if (IS_HASWELL(dev)) {
 +              switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 +              case DP_TRAIN_VOLTAGE_SWING_400:
 +                      return DP_TRAIN_PRE_EMPHASIS_9_5;
 +              case DP_TRAIN_VOLTAGE_SWING_600:
 +                      return DP_TRAIN_PRE_EMPHASIS_6;
 +              case DP_TRAIN_VOLTAGE_SWING_800:
 +                      return DP_TRAIN_PRE_EMPHASIS_3_5;
 +              case DP_TRAIN_VOLTAGE_SWING_1200:
 +              default:
 +                      return DP_TRAIN_PRE_EMPHASIS_0;
 +              }
 +      } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_400:
                        return DP_TRAIN_PRE_EMPHASIS_6;
@@@ -1665,40 -1617,6 +1664,40 @@@ intel_gen7_edp_signal_levels(uint8_t tr
        }
  }
  
 +/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
 +static uint32_t
 +intel_dp_signal_levels_hsw(uint8_t train_set)
 +{
 +      int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
 +                                       DP_TRAIN_PRE_EMPHASIS_MASK);
 +      switch (signal_levels) {
 +      case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
 +              return DDI_BUF_EMP_400MV_0DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
 +              return DDI_BUF_EMP_400MV_3_5DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
 +              return DDI_BUF_EMP_400MV_6DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
 +              return DDI_BUF_EMP_400MV_9_5DB_HSW;
 +
 +      case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
 +              return DDI_BUF_EMP_600MV_0DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
 +              return DDI_BUF_EMP_600MV_3_5DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
 +              return DDI_BUF_EMP_600MV_6DB_HSW;
 +
 +      case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
 +              return DDI_BUF_EMP_800MV_0DB_HSW;
 +      case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
 +              return DDI_BUF_EMP_800MV_3_5DB_HSW;
 +      default:
 +              DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
 +                            "0x%x\n", signal_levels);
 +              return DDI_BUF_EMP_400MV_0DB_HSW;
 +      }
 +}
 +
  static uint8_t
  intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
@@@ -1755,44 -1673,8 +1754,44 @@@ intel_dp_set_link_train(struct intel_d
        struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 +      uint32_t temp;
  
 -      if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 +      if (IS_HASWELL(dev)) {
 +              temp = I915_READ(DP_TP_CTL(intel_dp->port));
 +
 +              if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
 +                      temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
 +              else
 +                      temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 +
 +              temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
 +              switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 +              case DP_TRAINING_PATTERN_DISABLE:
 +                      temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
 +                      I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
 +
 +                      if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) &
 +                                    DP_TP_STATUS_IDLE_DONE), 1))
 +                              DRM_ERROR("Timed out waiting for DP idle patterns\n");
 +
 +                      temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
 +                      temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 +
 +                      break;
 +              case DP_TRAINING_PATTERN_1:
 +                      temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
 +                      break;
 +              case DP_TRAINING_PATTERN_2:
 +                      temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
 +                      break;
 +              case DP_TRAINING_PATTERN_3:
 +                      temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
 +                      break;
 +              }
 +              I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
 +
 +      } else if (HAS_PCH_CPT(dev) &&
 +                 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
  
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  }
  
  /* Enable corresponding port and start training pattern 1 */
 -static void
 +void
  intel_dp_start_link_train(struct intel_dp *intel_dp)
  {
 -      struct drm_device *dev = intel_dp->base.base.dev;
 +      struct drm_encoder *encoder = &intel_dp->base.base;
 +      struct drm_device *dev = encoder->dev;
        int i;
        uint8_t voltage;
        bool clock_recovery = false;
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
  
 +      if (IS_HASWELL(dev))
 +              intel_ddi_prepare_link_retrain(encoder);
 +
        /* Write the link configuration data */
        intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
                                  intel_dp->link_configuration,
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
  
 -
 -              if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
 +              if (IS_HASWELL(dev)) {
 +                      signal_levels = intel_dp_signal_levels_hsw(
 +                                                      intel_dp->train_set[0]);
 +                      DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
 +              } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                        signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
                } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
                        signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
 -                      DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 +              DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
 +                            signal_levels);
  
                if (!intel_dp_set_link_train(intel_dp, DP,
                                             DP_TRAINING_PATTERN_1 |
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
                                break;
                if (i == intel_dp->lane_count && voltage_tries == 5) {
-                       ++loop_tries;
-                       if (loop_tries == 5) {
+                       if (++loop_tries == 5) {
                                DRM_DEBUG_KMS("too many full retries, give up\n");
                                break;
                        }
                }
  
                /* Check to see if we've tried the same voltage 5 times */
-               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-                       ++voltage_tries;
-                       if (voltage_tries == 5) {
-                               DRM_DEBUG_KMS("too many voltage retries, give up\n");
-                               break;
-                       }
-               } else
+               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+                       voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
                        voltage_tries = 0;
-               voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+               } else
+                       ++voltage_tries;
  
                /* Compute new intel_dp->train_set as requested by target */
                intel_get_adjust_train(intel_dp, link_status);
        intel_dp->DP = DP;
  }
  
 -static void
 +void
  intel_dp_complete_link_train(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp->base.base.dev;
                        break;
                }
  
 -              if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
 +              if (IS_HASWELL(dev)) {
 +                      signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
 +                      DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
 +              } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                        signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
                } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                ++tries;
        }
  
 +      if (channel_eq)
 +              DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
 +
        intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
  }
  
@@@ -2037,24 -1900,6 +2031,24 @@@ intel_dp_link_down(struct intel_dp *int
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t DP = intel_dp->DP;
  
 +      /*
 +       * DDI code has a strict mode set sequence and we should try to respect
 +       * it, otherwise we might hang the machine in many different ways. So we
 +       * really should be disabling the port only on a complete crtc_disable
 +       * sequence. This function is just called under two conditions on DDI
 +       * code:
 +       * - Link train failed while doing crtc_enable, and on this case we
 +       *   really should respect the mode set sequence and wait for a
 +       *   crtc_disable.
 +       * - Someone turned the monitor off and intel_dp_check_link_status
 +       *   called us. We don't need to disable the whole port on this case, so
 +       *   when someone turns the monitor on again,
 +       *   intel_ddi_prepare_link_retrain will take care of redoing the link
 +       *   train.
 +       */
 +      if (IS_HASWELL(dev))
 +              return;
 +
        if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
                return;
  
@@@ -2524,8 -2369,9 +2518,9 @@@ static voi
  intel_dp_destroy(struct drm_connector *connector)
  {
        struct drm_device *dev = connector->dev;
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
  
-       if (intel_dpd_is_edp(dev))
+       if (is_edp(intel_dp))
                intel_panel_destroy_backlight(dev);
  
        drm_sysfs_connector_remove(connector);
@@@ -2553,12 -2399,6 +2548,12 @@@ static const struct drm_encoder_helper_
        .disable = intel_encoder_noop,
  };
  
 +static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = {
 +      .mode_fixup = intel_dp_mode_fixup,
 +      .mode_set = intel_ddi_mode_set,
 +      .disable = intel_encoder_noop,
 +};
 +
  static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_dp_detect,
@@@ -2661,14 -2501,7 +2656,14 @@@ intel_dp_init(struct drm_device *dev, i
                if (intel_dpd_is_edp(dev))
                        intel_dp->is_pch_edp = true;
  
 -      if (output_reg == DP_A || is_pch_edp(intel_dp)) {
 +      /*
 +       * FIXME : We need to initialize built-in panels before external panels.
 +       * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
 +       */
 +      if (IS_VALLEYVIEW(dev) && output_reg == DP_C) {
 +              type = DRM_MODE_CONNECTOR_eDP;
 +              intel_encoder->type = INTEL_OUTPUT_EDP;
 +      } else if (output_reg == DP_A || is_pch_edp(intel_dp)) {
                type = DRM_MODE_CONNECTOR_eDP;
                intel_encoder->type = INTEL_OUTPUT_EDP;
        } else {
  
        drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
                         DRM_MODE_ENCODER_TMDS);
 -      drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 +
 +      if (IS_HASWELL(dev))
 +              drm_encoder_helper_add(&intel_encoder->base,
 +                                     &intel_dp_helper_funcs_hsw);
 +      else
 +              drm_encoder_helper_add(&intel_encoder->base,
 +                                     &intel_dp_helper_funcs);
  
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
  
 -      intel_encoder->enable = intel_enable_dp;
 -      intel_encoder->pre_enable = intel_pre_enable_dp;
 -      intel_encoder->disable = intel_disable_dp;
 -      intel_encoder->post_disable = intel_post_disable_dp;
 -      intel_encoder->get_hw_state = intel_dp_get_hw_state;
 +      if (IS_HASWELL(dev)) {
 +              intel_encoder->enable = intel_enable_ddi;
 +              intel_encoder->pre_enable = intel_ddi_pre_enable;
 +              intel_encoder->disable = intel_disable_ddi;
 +              intel_encoder->post_disable = intel_ddi_post_disable;
 +              intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 +      } else {
 +              intel_encoder->enable = intel_enable_dp;
 +              intel_encoder->pre_enable = intel_pre_enable_dp;
 +              intel_encoder->disable = intel_disable_dp;
 +              intel_encoder->post_disable = intel_post_disable_dp;
 +              intel_encoder->get_hw_state = intel_dp_get_hw_state;
 +      }
        intel_connector->get_hw_state = intel_connector_get_hw_state;
  
        /* Set up the DDC bus. */
  #define __INTEL_DRV_H__
  
  #include <linux/i2c.h>
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
- #include "drm_crtc.h"
- #include "drm_crtc_helper.h"
- #include "drm_fb_helper.h"
- #include "drm_dp_helper.h"
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_dp_helper.h>
  
  #define _wait_for(COND, MS, W) ({ \
        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
@@@ -212,7 -212,6 +212,7 @@@ struct intel_crtc 
  
        /* We can share PLLs across outputs if the timings match */
        struct intel_pch_pll *pch_pll;
 +      uint32_t ddi_pll_sel;
  };
  
  struct intel_plane {
@@@ -422,10 -421,6 +422,10 @@@ extern void intel_dp_init(struct drm_de
  void
  intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode);
 +extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
 +extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
 +extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 +extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
  extern bool intel_dpd_is_edp(struct drm_device *dev);
  extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
  extern int intel_edp_target_clock(struct intel_encoder *,
@@@ -478,11 -473,6 +478,11 @@@ static inline struct intel_encoder *int
        return to_intel_connector(connector)->encoder;
  }
  
 +static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
 +{
 +      return container_of(encoder, struct intel_dp, base.base);
 +}
 +
  extern void intel_connector_attach_encoder(struct intel_connector *connector,
                                           struct intel_encoder *encoder);
  extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@@ -590,18 -580,5 +590,18 @@@ extern bool intel_ddi_get_hw_state(stru
  extern void intel_ddi_mode_set(struct drm_encoder *encoder,
                                struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode);
 +extern void intel_ddi_pll_init(struct drm_device *dev);
 +extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
 +extern void intel_ddi_disable_pipe_func(struct drm_i915_private *dev_priv,
 +                                      enum pipe pipe);
 +extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
 +extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
 +extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
 +extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
 +extern void intel_ddi_pre_enable(struct intel_encoder *intel_encoder);
 +extern void intel_ddi_post_disable(struct intel_encoder *intel_encoder);
 +extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
 +extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 +extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
  
  #endif /* __INTEL_DRV_H__ */
  #include <linux/i2c.h>
  #include <linux/slab.h>
  #include <linux/delay.h>
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc.h"
- #include "drm_edid.h"
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_edid.h>
  #include "intel_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
  static void
@@@ -151,6 -150,9 +150,9 @@@ static void g4x_write_infoframe(struct 
                I915_WRITE(VIDEO_DIP_DATA, *data);
                data++;
        }
+       /* Write every possible data byte to force correct ECC calculation. */
+       for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+               I915_WRITE(VIDEO_DIP_DATA, 0);
        mmiowb();
  
        val |= g4x_infoframe_enable(frame);
@@@ -186,6 -188,9 +188,9 @@@ static void ibx_write_infoframe(struct 
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        }
+       /* Write every possible data byte to force correct ECC calculation. */
+       for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+               I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
  
        val |= g4x_infoframe_enable(frame);
@@@ -224,6 -229,9 +229,9 @@@ static void cpt_write_infoframe(struct 
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        }
+       /* Write every possible data byte to force correct ECC calculation. */
+       for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+               I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
  
        val |= g4x_infoframe_enable(frame);
@@@ -259,6 -267,9 +267,9 @@@ static void vlv_write_infoframe(struct 
                I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        }
+       /* Write every possible data byte to force correct ECC calculation. */
+       for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+               I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
  
        val |= g4x_infoframe_enable(frame);
@@@ -292,6 -303,9 +303,9 @@@ static void hsw_write_infoframe(struct 
                I915_WRITE(data_reg + i, *data);
                data++;
        }
+       /* Write every possible data byte to force correct ECC calculation. */
+       for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+               I915_WRITE(data_reg + i, 0);
        mmiowb();
  
        val |= hsw_infoframe_enable(frame);
@@@ -1013,10 -1027,8 +1027,10 @@@ void intel_hdmi_init(struct drm_device 
        }
  
        if (IS_HASWELL(dev)) {
 +              intel_encoder->pre_enable = intel_ddi_pre_enable;
                intel_encoder->enable = intel_enable_ddi;
                intel_encoder->disable = intel_disable_ddi;
 +              intel_encoder->post_disable = intel_ddi_post_disable;
                intel_encoder->get_hw_state = intel_ddi_get_hw_state;
                drm_encoder_helper_add(&intel_encoder->base,
                                       &intel_hdmi_helper_funcs_hsw);
@@@ -31,8 -31,8 +31,8 @@@
  #include <linux/acpi_io.h>
  #include <acpi/video.h>
  
- #include "drmP.h"
- #include "i915_drm.h"
+ #include <drm/drmP.h>
+ #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "intel_drv.h"
  
@@@ -154,8 -154,6 +154,8 @@@ static u32 asle_set_backlight(struct dr
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
        u32 max;
  
 +      DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 +
        if (!(bclp & ASLE_BCLP_VALID))
                return ASLE_BACKLIGHT_FAILED;
  
@@@ -2404,10 -2404,10 +2404,10 @@@ static void gen6_enable_rps(struct drm_
        struct intel_ring_buffer *ring;
        u32 rp_state_cap;
        u32 gt_perf_status;
 -      u32 pcu_mbox, rc6_mask = 0;
 +      u32 rc6vids, pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
        int rc6_mode;
 -      int i;
 +      int i, ret;
  
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  
                   GEN6_RP_UP_BUSY_AVG |
                   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
  
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 -
 -      I915_WRITE(GEN6_PCODE_DATA, 0);
 -      I915_WRITE(GEN6_PCODE_MAILBOX,
 -                 GEN6_PCODE_READY |
 -                 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 -
 -      /* Check for overclock support */
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 -      I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
 -      pcu_mbox = I915_READ(GEN6_PCODE_DATA);
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 -      if (pcu_mbox & (1<<31)) { /* OC supported */
 -              dev_priv->rps.max_delay = pcu_mbox & 0xff;
 -              DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 +      ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
 +      if (!ret) {
 +              pcu_mbox = 0;
 +              ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
 +              if (ret && pcu_mbox & (1<<31)) { /* OC supported */
 +                      dev_priv->rps.max_delay = pcu_mbox & 0xff;
 +                      DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 +              }
 +      } else {
 +              DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
        }
  
        gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
  
 +      rc6vids = 0;
 +      ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
 +      if (IS_GEN6(dev) && ret) {
 +              DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
 +      } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
 +              DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
 +                        GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
 +              rc6vids &= 0xffff00;
 +              rc6vids |= GEN6_ENCODE_RC6_VID(450);
 +              ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
 +              if (ret)
 +                      DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
 +      }
 +
        gen6_gt_force_wake_put(dev_priv);
  }
  
@@@ -2581,11 -2581,17 +2581,11 @@@ static void gen6_update_ring_freq(struc
                else
                        ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
                ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
 +              ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
  
 -              I915_WRITE(GEN6_PCODE_DATA,
 -                         (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
 -                         gpu_freq);
 -              I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
 -                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
 -              if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
 -                            GEN6_PCODE_READY) == 0, 10)) {
 -                      DRM_ERROR("pcode write of freq table timed out\n");
 -                      continue;
 -              }
 +              sandybridge_pcode_write(dev_priv,
 +                                      GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
 +                                      ia_freq | gpu_freq);
        }
  }
  
@@@ -2728,7 -2734,7 +2728,7 @@@ static const struct cparams 
        { 0, 800, 231, 23784 },
  };
  
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  {
        u64 total_count, diff, ret;
        u32 count1, count2, count3, m = 0, c = 0;
        return ret;
  }
  
+ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+ {
+       unsigned long val;
+       if (dev_priv->info->gen != 5)
+               return 0;
+       spin_lock_irq(&mchdev_lock);
+       val = __i915_chipset_val(dev_priv);
+       spin_unlock_irq(&mchdev_lock);
+       return val;
+ }
  unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
  {
        unsigned long m, x, b;
@@@ -2985,7 -3007,7 +3001,7 @@@ void i915_update_gfx_val(struct drm_i91
        spin_unlock_irq(&mchdev_lock);
  }
  
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
  {
        unsigned long t, corr, state1, corr2, state2;
        u32 pxvid, ext_v;
        return dev_priv->ips.gfx_power + state2;
  }
  
+ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+ {
+       unsigned long val;
+       if (dev_priv->info->gen != 5)
+               return 0;
+       spin_lock_irq(&mchdev_lock);
+       val = __i915_gfx_val(dev_priv);
+       spin_unlock_irq(&mchdev_lock);
+       return val;
+ }
  /**
   * i915_read_mch_val - return value for IPS use
   *
@@@ -3038,8 -3076,8 +3070,8 @@@ unsigned long i915_read_mch_val(void
                goto out_unlock;
        dev_priv = i915_mch_dev;
  
-       chipset_val = i915_chipset_val(dev_priv);
-       graphics_val = i915_gfx_val(dev_priv);
+       chipset_val = __i915_chipset_val(dev_priv);
+       graphics_val = __i915_gfx_val(dev_priv);
  
        ret = chipset_val + graphics_val;
  
@@@ -3295,12 -3333,14 +3327,12 @@@ void intel_enable_gt_powersave(struct d
  static void ironlake_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 +      uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  
        /* Required for FBC */
 -      dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
 -              DPFCRUNIT_CLOCK_GATE_DISABLE |
 -              DPFDUNIT_CLOCK_GATE_DISABLE;
 -      /* Required for CxSR */
 -      dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 +      dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
 +                 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
 +                 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
  
        I915_WRITE(PCH_3DCGDIS0,
                   MARIUNIT_CLOCK_GATE_DISABLE |
        I915_WRITE(PCH_3DCGDIS1,
                   VFMUNIT_CLOCK_GATE_DISABLE);
  
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 -
        /*
         * According to the spec the following bits should be set in
         * order to enable memory self-refresh
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   (I915_READ(ILK_DISPLAY_CHICKEN2) |
                    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
 -      I915_WRITE(ILK_DSPCLK_GATE,
 -                 (I915_READ(ILK_DSPCLK_GATE) |
 -                  ILK_DPARB_CLK_GATE));
 +      dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
        I915_WRITE(DISP_ARB_CTL,
                   (I915_READ(DISP_ARB_CTL) |
                    DISP_FBC_WM_DIS));
                I915_WRITE(ILK_DISPLAY_CHICKEN2,
                           I915_READ(ILK_DISPLAY_CHICKEN2) |
                           ILK_DPARB_GATE);
 -              I915_WRITE(ILK_DSPCLK_GATE,
 -                         I915_READ(ILK_DSPCLK_GATE) |
 -                         ILK_DPFC_DIS1 |
 -                         ILK_DPFC_DIS2 |
 -                         ILK_CLK_FBC);
        }
  
 +      I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 +
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
@@@ -3356,9 -3403,9 +3388,9 @@@ static void gen6_init_clock_gating(stru
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 +      uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 +      I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
  
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  
        /* Bspec says we need to always set all mask bits. */
-       I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
-                  _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+       I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
+                  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
  
        /*
         * According to the spec the following bits should be
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
 -      I915_WRITE(ILK_DSPCLK_GATE,
 -                 I915_READ(ILK_DSPCLK_GATE) |
 -                 ILK_DPARB_CLK_GATE  |
 -                 ILK_DPFD_CLK_GATE);
 +      I915_WRITE(ILK_DSPCLK_GATE_D,
 +                 I915_READ(ILK_DSPCLK_GATE_D) |
 +                 ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
 +                 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
  
        I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
                   GEN6_MBCTL_ENABLE_BOOT_FETCH);
                           DISPPLANE_TRICKLE_FEED_DISABLE);
                intel_flush_display_plane(dev_priv, pipe);
        }
+       /* The default value should be 0x200 according to docs, but the two
+        * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+       I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
+       I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
  }
  
  static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@@ -3445,6 -3497,9 +3482,6 @@@ static void haswell_init_clock_gating(s
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 -
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
  
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
         */
        I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  
 -      I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 -
 -      I915_WRITE(IVB_CHICKEN3,
 -                 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 -                 CHICKEN3_DGMG_DONE_FIX_DISABLE);
 -
        /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
                   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@@ -3498,19 -3559,17 +3535,19 @@@ static void ivybridge_init_clock_gating
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
        uint32_t snpcr;
  
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 -
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
  
 -      I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 +      I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 +
 +      /* WaDisableEarlyCull */
 +      I915_WRITE(_3D_CHICKEN3,
 +                 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  
 +      /* WaDisableBackToBackFlipFix */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
                        GEN7_WA_L3_CHICKEN_MODE);
  
 +      /* WaForceL3Serialization */
 +      I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
 +                 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 +
        /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
         * gating disable must be set.  Failure to set it results in
         * flickering pixels due to Z write ordering failures after
@@@ -3577,18 -3632,16 +3614,18 @@@ static void valleyview_init_clock_gatin
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 -
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
  
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
  
 -      I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 +      I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 +
 +      /* WaDisableEarlyCull */
 +      I915_WRITE(_3D_CHICKEN3,
 +                 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  
 +      /* WaDisableBackToBackFlipFix */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
        I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
  
 +      /* WaForceL3Serialization */
 +      I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
 +                 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 +
        /* This is required by WaCatErrorRejectionIssue */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@@ -3950,12 -3999,6 +3987,12 @@@ static void __gen6_gt_wait_for_thread_c
                DRM_ERROR("GT thread status wait timed out\n");
  }
  
 +static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
 +{
 +      I915_WRITE_NOTRACE(FORCEWAKE, 0);
 +      POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 +}
 +
  static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  {
        u32 forcewake_ack;
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  
 -      I915_WRITE_NOTRACE(FORCEWAKE, 1);
 +      I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
        POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
  
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
        __gen6_gt_wait_for_thread_c0(dev_priv);
  }
  
 +static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
 +{
 +      I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
 +      POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 +}
 +
  static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  {
        u32 forcewake_ack;
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  
 -      I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
 +      I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
        POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
  
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@@ -4042,7 -4079,7 +4079,7 @@@ static void __gen6_gt_force_wake_put(st
  
  static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  {
 -      I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
 +      I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
  }
@@@ -4080,18 -4117,13 +4117,18 @@@ int __gen6_gt_wait_for_fifo(struct drm_
        return ret;
  }
  
 +static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 +{
 +      I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
 +}
 +
  static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  {
        if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  
 -      I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
 +      I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  
        if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
                            FORCEWAKE_ACK_TIMEOUT_MS))
  
  static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  {
 -      I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
 +      I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
  }
  
 +void intel_gt_reset(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (IS_VALLEYVIEW(dev)) {
 +              vlv_force_wake_reset(dev_priv);
 +      } else if (INTEL_INFO(dev)->gen >= 6) {
 +              __gen6_gt_force_wake_reset(dev_priv);
 +              if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 +                      __gen6_gt_force_wake_mt_reset(dev_priv);
 +      }
 +}
 +
  void intel_gt_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        spin_lock_init(&dev_priv->gt_lock);
  
 +      intel_gt_reset(dev);
 +
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->gt.force_wake_get = vlv_force_wake_get;
                dev_priv->gt.force_wake_put = vlv_force_wake_put;
 -      } else if (INTEL_INFO(dev)->gen >= 6) {
 +      } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 +              dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
 +              dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
 +      } else if (IS_GEN6(dev)) {
                dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
                dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
 +      }
 +}
  
 -              /* IVB configs may use multi-threaded forcewake */
 -              if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 -                      u32 ecobus;
 -
 -                      /* A small trick here - if the bios hasn't configured
 -                       * MT forcewake, and if the device is in RC6, then
 -                       * force_wake_mt_get will not wake the device and the
 -                       * ECOBUS read will return zero. Which will be
 -                       * (correctly) interpreted by the test below as MT
 -                       * forcewake being disabled.
 -                       */
 -                      mutex_lock(&dev->struct_mutex);
 -                      __gen6_gt_force_wake_mt_get(dev_priv);
 -                      ecobus = I915_READ_NOTRACE(ECOBUS);
 -                      __gen6_gt_force_wake_mt_put(dev_priv);
 -                      mutex_unlock(&dev->struct_mutex);
 -
 -                      if (ecobus & FORCEWAKE_MT_ENABLE) {
 -                              DRM_DEBUG_KMS("Using MT version of forcewake\n");
 -                              dev_priv->gt.force_wake_get =
 -                                      __gen6_gt_force_wake_mt_get;
 -                              dev_priv->gt.force_wake_put =
 -                                      __gen6_gt_force_wake_mt_put;
 -                      }
 -              }
 +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
 +{
 +      WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 +
 +      if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
 +              DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
 +              return -EAGAIN;
 +      }
 +
 +      I915_WRITE(GEN6_PCODE_DATA, *val);
 +      I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 +
 +      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 +                   500)) {
 +              DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
 +              return -ETIMEDOUT;
        }
 +
 +      *val = I915_READ(GEN6_PCODE_DATA);
 +      I915_WRITE(GEN6_PCODE_DATA, 0);
 +
 +      return 0;
  }
  
 +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
 +{
 +      WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 +
 +      if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
 +              DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
 +              return -EAGAIN;
 +      }
 +
 +      I915_WRITE(GEN6_PCODE_DATA, val);
 +      I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 +
 +      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 +                   500)) {
 +              DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
 +              return -ETIMEDOUT;
 +      }
 +
 +      I915_WRITE(GEN6_PCODE_DATA, 0);
 +
 +      return 0;
 +}
   *
   */
  
- #include "drmP.h"
- #include "drm.h"
+ #include <drm/drmP.h>
  #include "i915_drv.h"
- #include "i915_drm.h"
+ #include <drm/i915_drm.h>
  #include "i915_trace.h"
  #include "intel_drv.h"
  
@@@ -965,9 -964,7 +964,9 @@@ gen6_ring_put_irq(struct intel_ring_buf
  }
  
  static int
 -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 +i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
 +                       u32 offset, u32 length,
 +                       unsigned flags)
  {
        int ret;
  
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
 -                      MI_BATCH_NON_SECURE_I965);
 +                      (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
  
  
  static int
  i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 -                              u32 offset, u32 len)
 +                              u32 offset, u32 len,
 +                              unsigned flags)
  {
        int ret;
  
                return ret;
  
        intel_ring_emit(ring, MI_BATCH_BUFFER);
 -      intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 +      intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_emit(ring, offset + len - 8);
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
  
  static int
  i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
 -                              u32 offset, u32 len)
 +                       u32 offset, u32 len,
 +                       unsigned flags)
  {
        int ret;
  
                return ret;
  
        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
 -      intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 +      intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_advance(ring);
  
        return 0;
@@@ -1407,30 -1402,8 +1406,30 @@@ static int gen6_ring_flush(struct intel
  }
  
  static int
 +hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 +                            u32 offset, u32 len,
 +                            unsigned flags)
 +{
 +      int ret;
 +
 +      ret = intel_ring_begin(ring, 2);
 +      if (ret)
 +              return ret;
 +
 +      intel_ring_emit(ring,
 +                      MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
 +                      (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
 +      /* bit0-7 is the length on GEN6+ */
 +      intel_ring_emit(ring, offset);
 +      intel_ring_advance(ring);
 +
 +      return 0;
 +}
 +
 +static int
  gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 -                            u32 offset, u32 len)
 +                            u32 offset, u32 len,
 +                            unsigned flags)
  {
        int ret;
  
        if (ret)
                return ret;
  
 -      intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
 +      intel_ring_emit(ring,
 +                      MI_BATCH_BUFFER_START |
 +                      (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@@ -1519,9 -1490,7 +1518,9 @@@ int intel_init_render_ring_buffer(struc
                ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
        ring->write_tail = ring_write_tail;
 -      if (INTEL_INFO(dev)->gen >= 6)
 +      if (IS_HASWELL(dev))
 +              ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
 +      else if (INTEL_INFO(dev)->gen >= 6)
                ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
        else if (INTEL_INFO(dev)->gen >= 4)
                ring->dispatch_execbuffer = i965_dispatch_execbuffer;
index 0000000,4322b1e..b746a3c
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,947 +1,953 @@@
+ /*
+  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+  * "Software"), to deal in the Software without restriction, including
+  * without limitation the rights to use, copy, modify, merge, publish,
+  * distribute, sub license, and/or sell copies of the Software, and to
+  * permit persons to whom the Software is furnished to do so, subject to
+  * the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the
+  * next paragraph) shall be included in all copies or substantial portions
+  * of the Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+  *
+  */
+ #ifndef _UAPI_I915_DRM_H_
+ #define _UAPI_I915_DRM_H_
+ #include <drm/drm.h>
+ /* Please note that modifications to all structs defined here are
+  * subject to backwards-compatibility constraints.
+  */
+ /* Each region is a minimum of 16k, and there are at most 255 of them.
+  */
+ #define I915_NR_TEX_REGIONS 255       /* table size 2k - maximum due to use
+                                * of chars for next/prev indices */
+ #define I915_LOG_MIN_TEX_REGION_SIZE 14
+ typedef struct _drm_i915_init {
+       enum {
+               I915_INIT_DMA = 0x01,
+               I915_CLEANUP_DMA = 0x02,
+               I915_RESUME_DMA = 0x03
+       } func;
+       unsigned int mmio_offset;
+       int sarea_priv_offset;
+       unsigned int ring_start;
+       unsigned int ring_end;
+       unsigned int ring_size;
+       unsigned int front_offset;
+       unsigned int back_offset;
+       unsigned int depth_offset;
+       unsigned int w;
+       unsigned int h;
+       unsigned int pitch;
+       unsigned int pitch_bits;
+       unsigned int back_pitch;
+       unsigned int depth_pitch;
+       unsigned int cpp;
+       unsigned int chipset;
+ } drm_i915_init_t;
+ typedef struct _drm_i915_sarea {
+       struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+       int last_upload;        /* last time texture was uploaded */
+       int last_enqueue;       /* last time a buffer was enqueued */
+       int last_dispatch;      /* age of the most recently dispatched buffer */
+       int ctxOwner;           /* last context to upload state */
+       int texAge;
+       int pf_enabled;         /* is pageflipping allowed? */
+       int pf_active;
+       int pf_current_page;    /* which buffer is being displayed? */
+       int perf_boxes;         /* performance boxes to be displayed */
+       int width, height;      /* screen size in pixels */
+       drm_handle_t front_handle;
+       int front_offset;
+       int front_size;
+       drm_handle_t back_handle;
+       int back_offset;
+       int back_size;
+       drm_handle_t depth_handle;
+       int depth_offset;
+       int depth_size;
+       drm_handle_t tex_handle;
+       int tex_offset;
+       int tex_size;
+       int log_tex_granularity;
+       int pitch;
+       int rotation;           /* 0, 90, 180 or 270 */
+       int rotated_offset;
+       int rotated_size;
+       int rotated_pitch;
+       int virtualX, virtualY;
+       unsigned int front_tiled;
+       unsigned int back_tiled;
+       unsigned int depth_tiled;
+       unsigned int rotated_tiled;
+       unsigned int rotated2_tiled;
+       int pipeA_x;
+       int pipeA_y;
+       int pipeA_w;
+       int pipeA_h;
+       int pipeB_x;
+       int pipeB_y;
+       int pipeB_w;
+       int pipeB_h;
+       /* fill out some space for old userspace triple buffer */
+       drm_handle_t unused_handle;
+       __u32 unused1, unused2, unused3;
+       /* buffer object handles for static buffers. May change
+        * over the lifetime of the client.
+        */
+       __u32 front_bo_handle;
+       __u32 back_bo_handle;
+       __u32 unused_bo_handle;
+       __u32 depth_bo_handle;
+ } drm_i915_sarea_t;
+ /* due to userspace building against these headers we need some compat here */
+ #define planeA_x pipeA_x
+ #define planeA_y pipeA_y
+ #define planeA_w pipeA_w
+ #define planeA_h pipeA_h
+ #define planeB_x pipeB_x
+ #define planeB_y pipeB_y
+ #define planeB_w pipeB_w
+ #define planeB_h pipeB_h
+ /* Flags for perf_boxes
+  */
+ #define I915_BOX_RING_EMPTY    0x1
+ #define I915_BOX_FLIP          0x2
+ #define I915_BOX_WAIT          0x4
+ #define I915_BOX_TEXTURE_LOAD  0x8
+ #define I915_BOX_LOST_CONTEXT  0x10
+ /* I915 specific ioctls
+  * The device specific ioctl range is 0x40 to 0x79.
+  */
+ #define DRM_I915_INIT         0x00
+ #define DRM_I915_FLUSH                0x01
+ #define DRM_I915_FLIP         0x02
+ #define DRM_I915_BATCHBUFFER  0x03
+ #define DRM_I915_IRQ_EMIT     0x04
+ #define DRM_I915_IRQ_WAIT     0x05
+ #define DRM_I915_GETPARAM     0x06
+ #define DRM_I915_SETPARAM     0x07
+ #define DRM_I915_ALLOC                0x08
+ #define DRM_I915_FREE         0x09
+ #define DRM_I915_INIT_HEAP    0x0a
+ #define DRM_I915_CMDBUFFER    0x0b
+ #define DRM_I915_DESTROY_HEAP 0x0c
+ #define DRM_I915_SET_VBLANK_PIPE      0x0d
+ #define DRM_I915_GET_VBLANK_PIPE      0x0e
+ #define DRM_I915_VBLANK_SWAP  0x0f
+ #define DRM_I915_HWS_ADDR     0x11
+ #define DRM_I915_GEM_INIT     0x13
+ #define DRM_I915_GEM_EXECBUFFER       0x14
+ #define DRM_I915_GEM_PIN      0x15
+ #define DRM_I915_GEM_UNPIN    0x16
+ #define DRM_I915_GEM_BUSY     0x17
+ #define DRM_I915_GEM_THROTTLE 0x18
+ #define DRM_I915_GEM_ENTERVT  0x19
+ #define DRM_I915_GEM_LEAVEVT  0x1a
+ #define DRM_I915_GEM_CREATE   0x1b
+ #define DRM_I915_GEM_PREAD    0x1c
+ #define DRM_I915_GEM_PWRITE   0x1d
+ #define DRM_I915_GEM_MMAP     0x1e
+ #define DRM_I915_GEM_SET_DOMAIN       0x1f
+ #define DRM_I915_GEM_SW_FINISH        0x20
+ #define DRM_I915_GEM_SET_TILING       0x21
+ #define DRM_I915_GEM_GET_TILING       0x22
+ #define DRM_I915_GEM_GET_APERTURE 0x23
+ #define DRM_I915_GEM_MMAP_GTT 0x24
+ #define DRM_I915_GET_PIPE_FROM_CRTC_ID        0x25
+ #define DRM_I915_GEM_MADVISE  0x26
+ #define DRM_I915_OVERLAY_PUT_IMAGE    0x27
+ #define DRM_I915_OVERLAY_ATTRS        0x28
+ #define DRM_I915_GEM_EXECBUFFER2      0x29
+ #define DRM_I915_GET_SPRITE_COLORKEY  0x2a
+ #define DRM_I915_SET_SPRITE_COLORKEY  0x2b
+ #define DRM_I915_GEM_WAIT     0x2c
+ #define DRM_I915_GEM_CONTEXT_CREATE   0x2d
+ #define DRM_I915_GEM_CONTEXT_DESTROY  0x2e
+ #define DRM_I915_GEM_SET_CACHING      0x2f
+ #define DRM_I915_GEM_GET_CACHING      0x30
+ #define DRM_I915_REG_READ             0x31
+ #define DRM_IOCTL_I915_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH          DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+ #define DRM_IOCTL_I915_FLIP           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+ #define DRM_IOCTL_I915_BATCHBUFFER    DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+ #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+ #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+ #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+ #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+ #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+ #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+ #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+ #define DRM_IOCTL_I915_CMDBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+ #define DRM_IOCTL_I915_DESTROY_HEAP   DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+ #define DRM_IOCTL_I915_SET_VBLANK_PIPE        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_GET_VBLANK_PIPE        DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_VBLANK_SWAP    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+ #define DRM_IOCTL_I915_HWS_ADDR               DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
+ #define DRM_IOCTL_I915_GEM_INIT               DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+ #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+ #define DRM_IOCTL_I915_GEM_EXECBUFFER2        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+ #define DRM_IOCTL_I915_GEM_PIN                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+ #define DRM_IOCTL_I915_GEM_UNPIN      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+ #define DRM_IOCTL_I915_GEM_BUSY               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+ #define DRM_IOCTL_I915_GEM_SET_CACHING                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+ #define DRM_IOCTL_I915_GEM_GET_CACHING                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
+ #define DRM_IOCTL_I915_GEM_THROTTLE   DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+ #define DRM_IOCTL_I915_GEM_ENTERVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+ #define DRM_IOCTL_I915_GEM_LEAVEVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+ #define DRM_IOCTL_I915_GEM_CREATE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+ #define DRM_IOCTL_I915_GEM_PREAD      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+ #define DRM_IOCTL_I915_GEM_PWRITE     DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+ #define DRM_IOCTL_I915_GEM_MMAP               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+ #define DRM_IOCTL_I915_GEM_MMAP_GTT   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+ #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+ #define DRM_IOCTL_I915_GEM_SW_FINISH  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+ #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+ #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+ #define DRM_IOCTL_I915_GEM_GET_APERTURE       DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+ #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+ #define DRM_IOCTL_I915_GEM_MADVISE    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+ #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+ #define DRM_IOCTL_I915_OVERLAY_ATTRS  DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+ #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+ #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+ #define DRM_IOCTL_I915_GEM_WAIT               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+ #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE     DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+ #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY    DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+ #define DRM_IOCTL_I915_REG_READ                       DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+ /* Allow drivers to submit batchbuffers directly to hardware, relying
+  * on the security mechanisms provided by hardware.
+  */
+ typedef struct drm_i915_batchbuffer {
+       int start;              /* agp offset */
+       int used;               /* nr bytes in use */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+ } drm_i915_batchbuffer_t;
+ /* As above, but pass a pointer to userspace buffer which can be
+  * validated by the kernel prior to sending to hardware.
+  */
+ typedef struct _drm_i915_cmdbuffer {
+       char __user *buf;       /* pointer to userspace command buffer */
+       int sz;                 /* nr bytes in buf */
+       int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
+       int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
+       int num_cliprects;      /* mulitpass with multiple cliprects? */
+       struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+ } drm_i915_cmdbuffer_t;
+ /* Userspace can request & wait on irq's:
+  */
+ typedef struct drm_i915_irq_emit {
+       int __user *irq_seq;
+ } drm_i915_irq_emit_t;
+ typedef struct drm_i915_irq_wait {
+       int irq_seq;
+ } drm_i915_irq_wait_t;
+ /* Ioctl to query kernel params:
+  */
+ #define I915_PARAM_IRQ_ACTIVE            1
+ #define I915_PARAM_ALLOW_BATCHBUFFER     2
+ #define I915_PARAM_LAST_DISPATCH         3
+ #define I915_PARAM_CHIPSET_ID            4
+ #define I915_PARAM_HAS_GEM               5
+ #define I915_PARAM_NUM_FENCES_AVAIL      6
+ #define I915_PARAM_HAS_OVERLAY           7
+ #define I915_PARAM_HAS_PAGEFLIPPING    8
+ #define I915_PARAM_HAS_EXECBUF2          9
+ #define I915_PARAM_HAS_BSD             10
+ #define I915_PARAM_HAS_BLT             11
+ #define I915_PARAM_HAS_RELAXED_FENCING         12
+ #define I915_PARAM_HAS_COHERENT_RINGS  13
+ #define I915_PARAM_HAS_EXEC_CONSTANTS  14
+ #define I915_PARAM_HAS_RELAXED_DELTA   15
+ #define I915_PARAM_HAS_GEN7_SOL_RESET  16
+ #define I915_PARAM_HAS_LLC                     17
+ #define I915_PARAM_HAS_ALIASING_PPGTT  18
+ #define I915_PARAM_HAS_WAIT_TIMEOUT    19
+ #define I915_PARAM_HAS_SEMAPHORES      20
+ #define I915_PARAM_HAS_PRIME_VMAP_FLUSH        21
+ #define I915_PARAM_RSVD_FOR_FUTURE_USE         22
++#define I915_PARAM_HAS_SECURE_BATCHES  23
+ typedef struct drm_i915_getparam {
+       int param;
+       int __user *value;
+ } drm_i915_getparam_t;
+ /* Ioctl to set kernel params:
+  */
+ #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
+ #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
+ #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
+ #define I915_SETPARAM_NUM_USED_FENCES                     4
+ typedef struct drm_i915_setparam {
+       int param;
+       int value;
+ } drm_i915_setparam_t;
+ /* A memory manager for regions of shared memory:
+  */
+ #define I915_MEM_REGION_AGP 1
+ typedef struct drm_i915_mem_alloc {
+       int region;
+       int alignment;
+       int size;
+       int __user *region_offset;      /* offset from start of fb or agp */
+ } drm_i915_mem_alloc_t;
+ typedef struct drm_i915_mem_free {
+       int region;
+       int region_offset;
+ } drm_i915_mem_free_t;
+ typedef struct drm_i915_mem_init_heap {
+       int region;
+       int size;
+       int start;
+ } drm_i915_mem_init_heap_t;
+ /* Allow memory manager to be torn down and re-initialized (eg on
+  * rotate):
+  */
+ typedef struct drm_i915_mem_destroy_heap {
+       int region;
+ } drm_i915_mem_destroy_heap_t;
+ /* Allow X server to configure which pipes to monitor for vblank signals
+  */
+ #define       DRM_I915_VBLANK_PIPE_A  1
+ #define       DRM_I915_VBLANK_PIPE_B  2
+ typedef struct drm_i915_vblank_pipe {
+       int pipe;
+ } drm_i915_vblank_pipe_t;
+ /* Schedule buffer swap at given vertical blank:
+  */
+ typedef struct drm_i915_vblank_swap {
+       drm_drawable_t drawable;
+       enum drm_vblank_seq_type seqtype;
+       unsigned int sequence;
+ } drm_i915_vblank_swap_t;
+ typedef struct drm_i915_hws_addr {
+       __u64 addr;
+ } drm_i915_hws_addr_t;
+ struct drm_i915_gem_init {
+       /**
+        * Beginning offset in the GTT to be managed by the DRM memory
+        * manager.
+        */
+       __u64 gtt_start;
+       /**
+        * Ending offset in the GTT to be managed by the DRM memory
+        * manager.
+        */
+       __u64 gtt_end;
+ };
+ struct drm_i915_gem_create {
+       /**
+        * Requested size for the object.
+        *
+        * The (page-aligned) allocated size for the object will be returned.
+        */
+       __u64 size;
+       /**
+        * Returned handle for the object.
+        *
+        * Object handles are nonzero.
+        */
+       __u32 handle;
+       __u32 pad;
+ };
+ struct drm_i915_gem_pread {
+       /** Handle for the object being read. */
+       __u32 handle;
+       __u32 pad;
+       /** Offset into the object to read from */
+       __u64 offset;
+       /** Length of data to read */
+       __u64 size;
+       /**
+        * Pointer to write the data into.
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 data_ptr;
+ };
+ struct drm_i915_gem_pwrite {
+       /** Handle for the object being written to. */
+       __u32 handle;
+       __u32 pad;
+       /** Offset into the object to write to */
+       __u64 offset;
+       /** Length of data to write */
+       __u64 size;
+       /**
+        * Pointer to read the data from.
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 data_ptr;
+ };
+ struct drm_i915_gem_mmap {
+       /** Handle for the object being mapped. */
+       __u32 handle;
+       __u32 pad;
+       /** Offset in the object to map. */
+       __u64 offset;
+       /**
+        * Length of data to map.
+        *
+        * The value will be page-aligned.
+        */
+       __u64 size;
+       /**
+        * Returned pointer the data was mapped at.
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 addr_ptr;
+ };
+ struct drm_i915_gem_mmap_gtt {
+       /** Handle for the object being mapped. */
+       __u32 handle;
+       __u32 pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 offset;
+ };
+ struct drm_i915_gem_set_domain {
+       /** Handle for the object */
+       __u32 handle;
+       /** New read domains */
+       __u32 read_domains;
+       /** New write domain */
+       __u32 write_domain;
+ };
+ struct drm_i915_gem_sw_finish {
+       /** Handle for the object */
+       __u32 handle;
+ };
+ struct drm_i915_gem_relocation_entry {
+       /**
+        * Handle of the buffer being pointed to by this relocation entry.
+        *
+        * It's appealing to make this be an index into the mm_validate_entry
+        * list to refer to the buffer, but this allows the driver to create
+        * a relocation list for state buffers and not re-write it per
+        * exec using the buffer.
+        */
+       __u32 target_handle;
+       /**
+        * Value to be added to the offset of the target buffer to make up
+        * the relocation entry.
+        */
+       __u32 delta;
+       /** Offset in the buffer the relocation entry will be written into */
+       __u64 offset;
+       /**
+        * Offset value of the target buffer that the relocation entry was last
+        * written as.
+        *
+        * If the buffer has the same offset as last time, we can skip syncing
+        * and writing the relocation.  This value is written back out by
+        * the execbuffer ioctl when the relocation is written.
+        */
+       __u64 presumed_offset;
+       /**
+        * Target memory domains read by this operation.
+        */
+       __u32 read_domains;
+       /**
+        * Target memory domains written by this operation.
+        *
+        * Note that only one domain may be written by the whole
+        * execbuffer operation, so that where there are conflicts,
+        * the application will get -EINVAL back.
+        */
+       __u32 write_domain;
+ };
+ /** @{
+  * Intel memory domains
+  *
+  * Most of these just align with the various caches in
+  * the system and are used to flush and invalidate as
+  * objects end up cached in different domains.
+  */
+ /** CPU cache */
+ #define I915_GEM_DOMAIN_CPU           0x00000001
+ /** Render cache, used by 2D and 3D drawing */
+ #define I915_GEM_DOMAIN_RENDER                0x00000002
+ /** Sampler cache, used by texture engine */
+ #define I915_GEM_DOMAIN_SAMPLER               0x00000004
+ /** Command queue, used to load batch buffers */
+ #define I915_GEM_DOMAIN_COMMAND               0x00000008
+ /** Instruction cache, used by shader programs */
+ #define I915_GEM_DOMAIN_INSTRUCTION   0x00000010
+ /** Vertex address cache */
+ #define I915_GEM_DOMAIN_VERTEX                0x00000020
+ /** GTT domain - aperture and scanout */
+ #define I915_GEM_DOMAIN_GTT           0x00000040
+ /** @} */
+ struct drm_i915_gem_exec_object {
+       /**
+        * User's handle for a buffer to be bound into the GTT for this
+        * operation.
+        */
+       __u32 handle;
+       /** Number of relocations to be performed on this buffer */
+       __u32 relocation_count;
+       /**
+        * Pointer to array of struct drm_i915_gem_relocation_entry containing
+        * the relocations to be performed in this buffer.
+        */
+       __u64 relocs_ptr;
+       /** Required alignment in graphics aperture */
+       __u64 alignment;
+       /**
+        * Returned value of the updated offset of the object, for future
+        * presumed_offset writes.
+        */
+       __u64 offset;
+ };
+ struct drm_i915_gem_execbuffer {
+       /**
+        * List of buffers to be validated with their relocations to be
+        * performend on them.
+        *
+        * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+        *
+        * These buffers must be listed in an order such that all relocations
+        * a buffer is performing refer to buffers that have already appeared
+        * in the validate list.
+        */
+       __u64 buffers_ptr;
+       __u32 buffer_count;
+       /** Offset in the batchbuffer to start execution from. */
+       __u32 batch_start_offset;
+       /** Bytes used in batchbuffer from batch_start_offset */
+       __u32 batch_len;
+       __u32 DR1;
+       __u32 DR4;
+       __u32 num_cliprects;
+       /** This is a struct drm_clip_rect *cliprects */
+       __u64 cliprects_ptr;
+ };
+ struct drm_i915_gem_exec_object2 {
+       /**
+        * User's handle for a buffer to be bound into the GTT for this
+        * operation.
+        */
+       __u32 handle;
+       /** Number of relocations to be performed on this buffer */
+       __u32 relocation_count;
+       /**
+        * Pointer to array of struct drm_i915_gem_relocation_entry containing
+        * the relocations to be performed in this buffer.
+        */
+       __u64 relocs_ptr;
+       /** Required alignment in graphics aperture */
+       __u64 alignment;
+       /**
+        * Returned value of the updated offset of the object, for future
+        * presumed_offset writes.
+        */
+       __u64 offset;
+ #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+       __u64 flags;
+       __u64 rsvd1;
+       __u64 rsvd2;
+ };
+ struct drm_i915_gem_execbuffer2 {
+       /**
+        * List of gem_exec_object2 structs
+        */
+       __u64 buffers_ptr;
+       __u32 buffer_count;
+       /** Offset in the batchbuffer to start execution from. */
+       __u32 batch_start_offset;
+       /** Bytes used in batchbuffer from batch_start_offset */
+       __u32 batch_len;
+       __u32 DR1;
+       __u32 DR4;
+       __u32 num_cliprects;
+       /** This is a struct drm_clip_rect *cliprects */
+       __u64 cliprects_ptr;
+ #define I915_EXEC_RING_MASK              (7<<0)
+ #define I915_EXEC_DEFAULT                (0<<0)
+ #define I915_EXEC_RENDER                 (1<<0)
+ #define I915_EXEC_BSD                    (2<<0)
+ #define I915_EXEC_BLT                    (3<<0)
+ /* Used for switching the constants addressing mode on gen4+ RENDER ring.
+  * Gen6+ only supports relative addressing to dynamic state (default) and
+  * absolute addressing.
+  *
+  * These flags are ignored for the BSD and BLT rings.
+  */
+ #define I915_EXEC_CONSTANTS_MASK      (3<<6)
+ #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+ #define I915_EXEC_CONSTANTS_ABSOLUTE  (1<<6)
+ #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+       __u64 flags;
+       __u64 rsvd1; /* now used for context info */
+       __u64 rsvd2;
+ };
+ /** Resets the SO write offset registers for transform feedback on gen7. */
+ #define I915_EXEC_GEN7_SOL_RESET      (1<<8)
++/** Request a privileged ("secure") batch buffer. Note only available for
++ * DRM_ROOT_ONLY | DRM_MASTER processes.
++ */
++#define I915_EXEC_SECURE              (1<<9)
++
+ #define I915_EXEC_CONTEXT_ID_MASK     (0xffffffff)
+ #define i915_execbuffer2_set_context_id(eb2, context) \
+       (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+ #define i915_execbuffer2_get_context_id(eb2) \
+       ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+ struct drm_i915_gem_pin {
+       /** Handle of the buffer to be pinned. */
+       __u32 handle;
+       __u32 pad;
+       /** alignment required within the aperture */
+       __u64 alignment;
+       /** Returned GTT offset of the buffer. */
+       __u64 offset;
+ };
+ struct drm_i915_gem_unpin {
+       /** Handle of the buffer to be unpinned. */
+       __u32 handle;
+       __u32 pad;
+ };
+ struct drm_i915_gem_busy {
+       /** Handle of the buffer to check for busy */
+       __u32 handle;
+       /** Return busy status (1 if busy, 0 if idle).
+        * The high word is used to indicate on which rings the object
+        * currently resides:
+        *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+        */
+       __u32 busy;
+ };
+ #define I915_CACHING_NONE             0
+ #define I915_CACHING_CACHED           1
+ struct drm_i915_gem_caching {
+       /**
+        * Handle of the buffer to set/get the caching level of. */
+       __u32 handle;
+       /**
+        * Cacheing level to apply or return value
+        *
+        * bits0-15 are for generic caching control (i.e. the above defined
+        * values). bits16-31 are reserved for platform-specific variations
+        * (e.g. l3$ caching on gen7). */
+       __u32 caching;
+ };
+ #define I915_TILING_NONE      0
+ #define I915_TILING_X         1
+ #define I915_TILING_Y         2
+ #define I915_BIT_6_SWIZZLE_NONE               0
+ #define I915_BIT_6_SWIZZLE_9          1
+ #define I915_BIT_6_SWIZZLE_9_10               2
+ #define I915_BIT_6_SWIZZLE_9_11               3
+ #define I915_BIT_6_SWIZZLE_9_10_11    4
+ /* Not seen by userland */
+ #define I915_BIT_6_SWIZZLE_UNKNOWN    5
+ /* Seen by userland. */
+ #define I915_BIT_6_SWIZZLE_9_17               6
+ #define I915_BIT_6_SWIZZLE_9_10_17    7
+ struct drm_i915_gem_set_tiling {
+       /** Handle of the buffer to have its tiling state updated */
+       __u32 handle;
+       /**
+        * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+        * I915_TILING_Y).
+        *
+        * This value is to be set on request, and will be updated by the
+        * kernel on successful return with the actual chosen tiling layout.
+        *
+        * The tiling mode may be demoted to I915_TILING_NONE when the system
+        * has bit 6 swizzling that can't be managed correctly by GEM.
+        *
+        * Buffer contents become undefined when changing tiling_mode.
+        */
+       __u32 tiling_mode;
+       /**
+        * Stride in bytes for the object when in I915_TILING_X or
+        * I915_TILING_Y.
+        */
+       __u32 stride;
+       /**
+        * Returned address bit 6 swizzling required for CPU access through
+        * mmap mapping.
+        */
+       __u32 swizzle_mode;
+ };
+ struct drm_i915_gem_get_tiling {
+       /** Handle of the buffer to get tiling state for. */
+       __u32 handle;
+       /**
+        * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+        * I915_TILING_Y).
+        */
+       __u32 tiling_mode;
+       /**
+        * Returned address bit 6 swizzling required for CPU access through
+        * mmap mapping.
+        */
+       __u32 swizzle_mode;
+ };
+ struct drm_i915_gem_get_aperture {
+       /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+       __u64 aper_size;
+       /**
+        * Available space in the aperture used by i915_gem_execbuffer, in
+        * bytes
+        */
+       __u64 aper_available_size;
+ };
+ struct drm_i915_get_pipe_from_crtc_id {
+       /** ID of CRTC being requested **/
+       __u32 crtc_id;
+       /** pipe of requested CRTC **/
+       __u32 pipe;
+ };
+ #define I915_MADV_WILLNEED 0
+ #define I915_MADV_DONTNEED 1
+ #define __I915_MADV_PURGED 2 /* internal state */
+ struct drm_i915_gem_madvise {
+       /** Handle of the buffer to change the backing store advice */
+       __u32 handle;
+       /* Advice: either the buffer will be needed again in the near future,
+        *         or wont be and could be discarded under memory pressure.
+        */
+       __u32 madv;
+       /** Whether the backing store still exists. */
+       __u32 retained;
+ };
+ /* flags */
+ #define I915_OVERLAY_TYPE_MASK                0xff
+ #define I915_OVERLAY_YUV_PLANAR       0x01
+ #define I915_OVERLAY_YUV_PACKED       0x02
+ #define I915_OVERLAY_RGB              0x03
+ #define I915_OVERLAY_DEPTH_MASK               0xff00
+ #define I915_OVERLAY_RGB24            0x1000
+ #define I915_OVERLAY_RGB16            0x2000
+ #define I915_OVERLAY_RGB15            0x3000
+ #define I915_OVERLAY_YUV422           0x0100
+ #define I915_OVERLAY_YUV411           0x0200
+ #define I915_OVERLAY_YUV420           0x0300
+ #define I915_OVERLAY_YUV410           0x0400
+ #define I915_OVERLAY_SWAP_MASK                0xff0000
+ #define I915_OVERLAY_NO_SWAP          0x000000
+ #define I915_OVERLAY_UV_SWAP          0x010000
+ #define I915_OVERLAY_Y_SWAP           0x020000
+ #define I915_OVERLAY_Y_AND_UV_SWAP    0x030000
+ #define I915_OVERLAY_FLAGS_MASK               0xff000000
+ #define I915_OVERLAY_ENABLE           0x01000000
+ struct drm_intel_overlay_put_image {
+       /* various flags and src format description */
+       __u32 flags;
+       /* source picture description */
+       __u32 bo_handle;
+       /* stride values and offsets are in bytes, buffer relative */
+       __u16 stride_Y; /* stride for packed formats */
+       __u16 stride_UV;
+       __u32 offset_Y; /* offset for packet formats */
+       __u32 offset_U;
+       __u32 offset_V;
+       /* in pixels */
+       __u16 src_width;
+       __u16 src_height;
+       /* to compensate the scaling factors for partially covered surfaces */
+       __u16 src_scan_width;
+       __u16 src_scan_height;
+       /* output crtc description */
+       __u32 crtc_id;
+       __u16 dst_x;
+       __u16 dst_y;
+       __u16 dst_width;
+       __u16 dst_height;
+ };
+ /* flags */
+ #define I915_OVERLAY_UPDATE_ATTRS     (1<<0)
+ #define I915_OVERLAY_UPDATE_GAMMA     (1<<1)
+ struct drm_intel_overlay_attrs {
+       __u32 flags;
+       __u32 color_key;
+       __s32 brightness;
+       __u32 contrast;
+       __u32 saturation;
+       __u32 gamma0;
+       __u32 gamma1;
+       __u32 gamma2;
+       __u32 gamma3;
+       __u32 gamma4;
+       __u32 gamma5;
+ };
+ /*
+  * Intel sprite handling
+  *
+  * Color keying works with a min/mask/max tuple.  Both source and destination
+  * color keying is allowed.
+  *
+  * Source keying:
+  * Sprite pixels within the min & max values, masked against the color channels
+  * specified in the mask field, will be transparent.  All other pixels will
+  * be displayed on top of the primary plane.  For RGB surfaces, only the min
+  * and mask fields will be used; ranged compares are not allowed.
+  *
+  * Destination keying:
+  * Primary plane pixels that match the min value, masked against the color
+  * channels specified in the mask field, will be replaced by corresponding
+  * pixels from the sprite plane.
+  *
+  * Note that source & destination keying are exclusive; only one can be
+  * active on a given plane.
+  */
+ #define I915_SET_COLORKEY_NONE                (1<<0) /* disable color key matching */
+ #define I915_SET_COLORKEY_DESTINATION (1<<1)
+ #define I915_SET_COLORKEY_SOURCE      (1<<2)
+ struct drm_intel_sprite_colorkey {
+       __u32 plane_id;
+       __u32 min_value;
+       __u32 channel_mask;
+       __u32 max_value;
+       __u32 flags;
+ };
+ struct drm_i915_gem_wait {
+       /** Handle of BO we shall wait on */
+       __u32 bo_handle;
+       __u32 flags;
+       /** Number of nanoseconds to wait, Returns time remaining. */
+       __s64 timeout_ns;
+ };
+ struct drm_i915_gem_context_create {
+       /*  output: id of new context*/
+       __u32 ctx_id;
+       __u32 pad;
+ };
+ struct drm_i915_gem_context_destroy {
+       __u32 ctx_id;
+       __u32 pad;
+ };
+ struct drm_i915_reg_read {
+       __u64 offset;
+       __u64 val; /* Return value */
+ };
+ #endif /* _UAPI_I915_DRM_H_ */