Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
authorDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 06:05:03 +0000 (06:05 +0000)
committerDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 06:05:03 +0000 (06:05 +0000)
Daniel writes:
A few leftover fixes for 3.8:
- VIC support for hdmi infoframes with the associated drm helper, fixes
  some black TVs (Paulo Zanoni)
- Modeset state check (and fixup if the BIOS messed with the hw) for
  lid-open. modeset-rework fallout. Somehow the original reporter went
  awol, so this stalled for way too long until we've found a new
  victim^Wreporter with broken BIOS.
- seqno wrap fixes from Mika and Chris.
- Some minor fixes all over from various people.
- Another race fix in the pageflip vs. unpin code from Chris.
- hsw vga resume support and a few more fdi link fixes (only used for vga
  on hsw) from Paulo.
- Regression fix for DMAR from Zhenyu Wang - I've scavenged memory from my
  DMAR for a while and it broke right away :(
- Regression fix from Takashi Iwai for ivb lvds - some w/a needs to be
  (partially) moved back into place. Note that these are regressions in
  -next.
- One more fix for ivb 3 pipe support - it now actually seems to work.

* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (25 commits)
  drm/i915: Fix missed needs_dmar setting
  drm/i915: Fix shifted screen on top of LVDS on IVY laptop
  drm/i915: disable cpt phase pointer fdi rx workaround
  drm/i915: set the LPT FDI RX polarity reversal bit when needed
  drm/i915: add lpt_init_pch_refclk
  drm/i915: add support for mPHY destination on intel_sbi_{read, write}
  drm/i915: reject modes the LPT FDI receiver can't handle
  drm/i915: fix hsw_fdi_link_train "retry" code
  drm/i915: Close race between processing unpin task and queueing the flip
  drm/i915: fixup l3 parity sysfs access check
  drm/i915: Clear the existing watermarks for g4x when modifying the cursor sr
  drm/i915: do not access BLC_PWM_CTL2 on pre-gen4 hardware
  drm/i915: Don't allow ring tail to reach the same cacheline as head
  drm/i915: Decouple the object from the unbound list before freeing pages
  drm/i915: Set sync_seqno properly after seqno wrap
  drm/i915: Include the last semaphore sync point in the error-state
  drm/i915: Rearrange code to only have a single method for waiting upon the ring
  drm/i915: Simplify flushing activity on the ring
  drm/i915: Preallocate next seqno before touching the ring
  drm/i915: force restore on lid open
  ...

1  2 
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
include/drm/drm_crtc.h

@@@ -1639,7 -1639,7 +1639,7 @@@ parse_hdmi_vsdb(struct drm_connector *c
        if (len >= 12)
                connector->audio_latency[1] = db[12];
  
 -      DRM_LOG_KMS("HDMI: DVI dual %d, "
 +      DRM_DEBUG_KMS("HDMI: DVI dual %d, "
                    "max TMDS clock %d, "
                    "latency present %d %d, "
                    "video latency %d %d, "
@@@ -2079,3 -2079,22 +2079,22 @@@ int drm_add_modes_noedid(struct drm_con
        return num_modes;
  }
  EXPORT_SYMBOL(drm_add_modes_noedid);
+ /**
+  * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+  * @mode: mode
+  *
+  * RETURNS:
+  * The VIC number, 0 in case it's not a CEA-861 mode.
+  */
+ uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+ {
+       uint8_t i;
+       for (i = 0; i < drm_num_cea_modes; i++)
+               if (drm_mode_equal(mode, &edid_cea_modes[i]))
+                       return i + 1;
+       return 0;
+ }
+ EXPORT_SYMBOL(drm_mode_cea_vic);
@@@ -143,7 -143,7 +143,7 @@@ static void intel_crt_dpms(struct drm_c
        int old_dpms;
  
        /* PCH platforms and VLV only support on/off. */
 -      if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
 +      if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
  
        if (mode == connector->dpms)
@@@ -198,6 -198,11 +198,11 @@@ static int intel_crt_mode_valid(struct 
        if (mode->clock > max_clock)
                return MODE_CLOCK_HIGH;
  
+       /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+       if (HAS_PCH_LPT(dev) &&
+           (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+               return MODE_CLOCK_HIGH;
        return MODE_OK;
  }
  
@@@ -793,4 -798,12 +798,12 @@@ void intel_crt_init(struct drm_device *
        crt->force_hotplug_required = 0;
  
        dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+       /*
+        * TODO: find a proper way to discover whether we need to set the
+        * polarity reversal bit or not, instead of relying on the BIOS.
+        */
+       if (HAS_PCH_LPT(dev))
+               dev_priv->fdi_rx_polarity_reversed =
+                    !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
  }
@@@ -1506,24 -1506,26 +1506,26 @@@ static void intel_disable_pll(struct dr
  
  /* SBI access */
  static void
- intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+               enum intel_sbi_destination destination)
  {
        unsigned long flags;
+       u32 tmp;
  
        spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                goto out_unlock;
        }
  
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_DATA,
-                       value);
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRWR);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+       I915_WRITE(SBI_DATA, value);
+       if (destination == SBI_ICLK)
+               tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+       else
+               tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+       I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
  
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@@ -1536,23 -1538,25 +1538,25 @@@ out_unlock
  }
  
  static u32
- intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+              enum intel_sbi_destination destination)
  {
        unsigned long flags;
        u32 value = 0;
  
        spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                goto out_unlock;
        }
  
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRRD);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+       if (destination == SBI_ICLK)
+               value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+       else
+               value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+       I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
  
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@@ -2424,18 -2428,6 +2428,6 @@@ static void intel_fdi_normal_train(stru
                           FDI_FE_ERRC_ENABLE);
  }
  
- static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
-       flags |= FDI_PHASE_SYNC_OVR(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
-       flags |= FDI_PHASE_SYNC_EN(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
-       POSTING_READ(SOUTH_CHICKEN1);
- }
  static void ivb_modeset_global_resources(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -2610,8 -2602,6 +2602,6 @@@ static void gen6_fdi_link_train(struct 
        POSTING_READ(reg);
        udelay(150);
  
-       cpt_phase_pointer_enable(dev, pipe);
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@@ -2744,8 -2734,6 +2734,6 @@@ static void ivb_manual_fdi_link_train(s
        POSTING_READ(reg);
        udelay(150);
  
-       cpt_phase_pointer_enable(dev, pipe);
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@@ -2884,17 -2872,6 +2872,6 @@@ static void ironlake_fdi_pll_disable(st
        udelay(100);
  }
  
- static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
-       flags &= ~(FDI_PHASE_SYNC_EN(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
-       flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
-       POSTING_READ(SOUTH_CHICKEN1);
- }
  static void ironlake_fdi_disable(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
        /* Ironlake workaround, disable clock pointer after downing FDI */
        if (HAS_PCH_IBX(dev)) {
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-       } else if (HAS_PCH_CPT(dev)) {
-               cpt_phase_pointer_disable(dev, pipe);
        }
  
        /* still set train pattern 1 */
@@@ -3024,8 -2999,9 +2999,9 @@@ static void lpt_program_iclkip(struct d
  
        /* Disable SSCCTL */
        intel_sbi_write(dev_priv, SBI_SSCCTL6,
-                               intel_sbi_read(dev_priv, SBI_SSCCTL6) |
-                                       SBI_SSCCTL_DISABLE);
+                       intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+                               SBI_SSCCTL_DISABLE,
+                       SBI_ICLK);
  
        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
        if (crtc->mode.clock == 20000) {
                        phaseinc);
  
        /* Program SSCDIVINTPHASE6 */
-       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-       intel_sbi_write(dev_priv,
-                       SBI_SSCDIVINTPHASE6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  
        /* Program SSCAUXDIV */
-       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-       intel_sbi_write(dev_priv,
-                       SBI_SSCAUXDIV6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  
        /* Enable modulator and associated divider */
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
        temp &= ~SBI_SSCCTL_DISABLE;
-       intel_sbi_write(dev_priv,
-                       SBI_SSCCTL6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  
        /* Wait for initialization time */
        udelay(24);
@@@ -4150,17 -4118,6 +4118,17 @@@ static bool intel_choose_pipe_bpp_dithe
                        }
                }
  
 +              if (intel_encoder->type == INTEL_OUTPUT_EDP) {
 +                      /* Use VBT settings if we have an eDP panel */
 +                      unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 +
 +                      if (edp_bpc && edp_bpc < display_bpc) {
 +                              DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
 +                              display_bpc = edp_bpc;
 +                      }
 +                      continue;
 +              }
 +
                /*
                 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
                 * through, clamp it down.  (Note: >12bpc will be caught below.)
@@@ -4878,10 -4835,7 +4846,7 @@@ static int i9xx_crtc_mode_set(struct dr
        return ret;
  }
  
- /*
-  * Initialize reference clocks when the driver loads
-  */
- void ironlake_init_pch_refclk(struct drm_device *dev)
+ static void ironlake_init_pch_refclk(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        }
  }
  
+ /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+ static void lpt_init_pch_refclk(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+       bool has_vga = false;
+       bool is_sdv = false;
+       u32 tmp;
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_vga = true;
+                       break;
+               }
+       }
+       if (!has_vga)
+               return;
+       /* XXX: Rip out SDV support once Haswell ships for real. */
+       if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+               is_sdv = true;
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       udelay(24);
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       if (!is_sdv) {
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+               if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+                                      FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+                       DRM_ERROR("FDI mPHY reset assert timeout\n");
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+               if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+                                       FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+                                      100))
+                       DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+       }
+       tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+       tmp &= ~(0xFF << 24);
+       tmp |= (0x12 << 24);
+       intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+               tmp &= ~(0x3 << 6);
+               tmp |= (1 << 6) | (1 << 0);
+               intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+       }
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+               tmp |= 0x7FFF;
+               intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+       }
+       tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+       }
+       tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+       }
+       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+               tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+       }
+       /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+       tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+       tmp |= SBI_DBUFF0_ENABLE;
+       intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+ }
+ /*
+  * Initialize reference clocks when the driver loads
+  */
+ void intel_init_pch_refclk(struct drm_device *dev)
+ {
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               ironlake_init_pch_refclk(dev);
+       else if (HAS_PCH_LPT(dev))
+               lpt_init_pch_refclk(dev);
+ }
  static int ironlake_get_refclk(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
@@@ -5239,6 -5369,17 +5380,17 @@@ static bool ironlake_check_fdi_lanes(st
        }
  }
  
+ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+ {
+       /*
+        * Account for spread spectrum to avoid
+        * oversubscribing the link. Max center spread
+        * is 2.5%; use 5% for safety's sake.
+        */
+       u32 bps = target_clock * bpp * 21 / 20;
+       return bps / (link_bw * 8) + 1;
+ }
  static void ironlake_set_m_n(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode)
        else
                target_clock = adjusted_mode->clock;
  
-       if (!lane) {
-               /*
-                * Account for spread spectrum to avoid
-                * oversubscribing the link. Max center spread
-                * is 2.5%; use 5% for safety's sake.
-                */
-               u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
-               lane = bps / (link_bw * 8) + 1;
-       }
+       if (!lane)
+               lane = ironlake_get_lanes_required(target_clock, link_bw,
+                                                  intel_crtc->bpp);
  
        intel_crtc->fdi_lanes = lane;
  
@@@ -6940,11 -7075,18 +7086,18 @@@ static void do_intel_finish_page_flip(s
  
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
-       if (work == NULL || !work->pending) {
+       /* Ensure we don't miss a work->pending update ... */
+       smp_rmb();
+       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
  
+       /* and that the unpin work is consistent wrt ->pending. */
+       smp_rmb();
        intel_crtc->unpin_work = NULL;
  
        if (work->event)
@@@ -6988,16 -7130,25 +7141,25 @@@ void intel_prepare_page_flip(struct drm
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
  
+       /* NB: An MMIO update of the plane base pointer will also
+        * generate a page-flip completion irq, i.e. every modeset
+        * is also accompanied by a spurious intel_prepare_page_flip().
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work) {
-               if ((++intel_crtc->unpin_work->pending) > 1)
-                       DRM_ERROR("Prepared flip multiple times\n");
-       } else {
-               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-       }
+       if (intel_crtc->unpin_work)
+               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
  }
  
+ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+ {
+       /* Ensure that the work item is consistent when activating it ... */
+       smp_wmb();
+       atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+       /* and that it is marked active as soon as the irq could fire. */
+       smp_wmb();
+ }
  static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7071,6 -7224,7 +7235,7 @@@ static int intel_gen3_queue_flip(struc
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
  
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7117,6 -7271,8 +7282,8 @@@ static int intel_gen4_queue_flip(struc
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7159,6 -7315,8 +7326,8 @@@ static int intel_gen6_queue_flip(struc
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7213,6 -7371,8 +7382,8 @@@ static int intel_gen7_queue_flip(struc
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -8394,8 -8554,7 +8565,7 @@@ static void intel_setup_outputs(struct 
                        intel_encoder_clones(encoder);
        }
  
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               ironlake_init_pch_refclk(dev);
+       intel_init_pch_refclk(dev);
  
        drm_helper_move_panel_connectors_to_head(dev);
  }
@@@ -8999,7 -9158,8 +9169,8 @@@ static void intel_sanitize_encoder(stru
  
  /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
   * and i915 state tracking structures. */
- void intel_modeset_setup_hw_state(struct drm_device *dev)
+ void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                 bool force_restore)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
                intel_sanitize_crtc(crtc);
        }
  
-       intel_modeset_update_staged_output_state(dev);
+       if (force_restore) {
+               for_each_pipe(pipe) {
+                       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+                       intel_set_mode(&crtc->base, &crtc->base.mode,
+                                      crtc->base.x, crtc->base.y, crtc->base.fb);
+               }
+       } else {
+               intel_modeset_update_staged_output_state(dev);
+       }
  
        intel_modeset_check_state(dev);
  
@@@ -9111,7 -9279,7 +9290,7 @@@ void intel_modeset_gem_init(struct drm_
  
        intel_setup_overlay(dev);
  
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, false);
  }
  
  void intel_modeset_cleanup(struct drm_device *dev)
@@@ -1325,10 -1325,11 +1325,11 @@@ static void valleyview_update_wm(struc
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
-                  (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+                  (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        I915_WRITE(DSPFW3,
-                  (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+                  (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+                  (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  }
  
  static void g4x_update_wm(struct drm_device *dev)
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
-                  (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+                  (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        /* HPLL off in SR has some issues on G4x... disable it */
        I915_WRITE(DSPFW3,
-                  (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+                  (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  }
  
@@@ -2378,9 -2379,15 +2379,9 @@@ int intel_enable_rc6(const struct drm_d
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
  
 -      if (INTEL_INFO(dev)->gen == 5) {
 -#ifdef CONFIG_INTEL_IOMMU
 -              /* Disable rc6 on ilk if VT-d is on. */
 -              if (intel_iommu_gfx_mapped)
 -                      return false;
 -#endif
 -              DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
 -              return INTEL_RC6_ENABLE;
 -      }
 +      /* Disable RC6 on Ironlake */
 +      if (INTEL_INFO(dev)->gen == 5)
 +              return 0;
  
        if (IS_HASWELL(dev)) {
                DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
@@@ -2647,6 -2654,7 +2648,7 @@@ static void ironlake_enable_rc6(struct 
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       bool was_interruptible;
        int ret;
  
        /* rc6 disabled by default due to repeated reports of hanging during
        if (ret)
                return;
  
+       was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
        /*
         * GPU can automatically power down the render unit if given a page
         * to save state.
        ret = intel_ring_begin(ring, 6);
        if (ret) {
                ironlake_teardown_rc6(dev);
+               dev_priv->mm.interruptible = was_interruptible;
                return;
        }
  
         * does an implicit flush, combined with MI_FLUSH above, it should be
         * safe to assume that renderctx is valid
         */
-       ret = intel_wait_ring_idle(ring);
+       ret = intel_ring_idle(ring);
+       dev_priv->mm.interruptible = was_interruptible;
        if (ret) {
                DRM_ERROR("failed to enable ironlake power power savings\n");
                ironlake_teardown_rc6(dev);
@@@ -3440,6 -3453,11 +3447,11 @@@ static void cpt_init_clock_gating(struc
        I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
+       /* The below fixes the weird display corruption, a few pixels shifted
+        * downward, on (only) LVDS of some HP laptops with IVY.
+        */
+       for_each_pipe(pipe)
+               I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
        /* WADP0ClockGatingDisable */
        for_each_pipe(pipe) {
                I915_WRITE(TRANS_CHICKEN1(pipe),
@@@ -509,7 -509,7 +509,7 @@@ out
  static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                                     void *response, int response_len)
  {
-       u8 retry = 5;
+       u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
        u8 status;
        int i;
  
         * command to be complete.
         *
         * Check 5 times in case the hardware failed to read the docs.
+        *
+        * Also beware that the first response by many devices is to
+        * reply PENDING and stall for time. TVs are notorious for
+        * requiring longer than specified to complete their replies.
+        * Originally (in the DDX long ago), the delay was only ever 15ms
+        * with an additional delay of 30ms applied for TVs added later after
+        * many experiments. To accommodate both sets of delays, we do a
+        * sequence of slow checks if the device is falling behind and fails
+        * to reply within 5*15µs.
         */
        if (!intel_sdvo_read_byte(intel_sdvo,
                                  SDVO_I2C_CMD_STATUS,
                                  &status))
                goto log_fail;
  
-       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
-               udelay(15);
+       while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+               if (retry < 10)
+                       msleep(15);
+               else
+                       udelay(15);
                if (!intel_sdvo_read_byte(intel_sdvo,
                                          SDVO_I2C_CMD_STATUS,
                                          &status))
@@@ -1535,15 -1548,9 +1548,9 @@@ intel_sdvo_detect(struct drm_connector 
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
  
-       if (!intel_sdvo_write_cmd(intel_sdvo,
-                                 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
-               return connector_status_unknown;
-       /* add 30ms delay when the output type might be TV */
-       if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
-               msleep(30);
-       if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+       if (!intel_sdvo_get_value(intel_sdvo,
+                                 SDVO_CMD_GET_ATTACHED_DISPLAYS,
+                                 &response, 2))
                return connector_status_unknown;
  
        DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@@ -2244,6 -2251,7 +2251,6 @@@ intel_sdvo_dvi_init(struct intel_sdvo *
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
                intel_sdvo->is_hdmi = true;
        }
 -      intel_sdvo->base.cloneable = true;
  
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (intel_sdvo->is_hdmi)
@@@ -2274,6 -2282,7 +2281,6 @@@ intel_sdvo_tv_init(struct intel_sdvo *i
  
        intel_sdvo->is_tv = true;
        intel_sdvo->base.needs_tv_clock = true;
 -      intel_sdvo->base.cloneable = false;
  
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
  
@@@ -2316,6 -2325,8 +2323,6 @@@ intel_sdvo_analog_init(struct intel_sdv
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
        }
  
 -      intel_sdvo->base.cloneable = true;
 -
        intel_sdvo_connector_init(intel_sdvo_connector,
                                  intel_sdvo);
        return true;
@@@ -2346,6 -2357,9 +2353,6 @@@ intel_sdvo_lvds_init(struct intel_sdvo 
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
        }
  
 -      /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
 -      intel_sdvo->base.cloneable = false;
 -
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
@@@ -2418,18 -2432,6 +2425,18 @@@ intel_sdvo_output_setup(struct intel_sd
        return true;
  }
  
 +static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
 +{
 +      struct drm_device *dev = intel_sdvo->base.base.dev;
 +      struct drm_connector *connector, *tmp;
 +
 +      list_for_each_entry_safe(connector, tmp,
 +                               &dev->mode_config.connector_list, head) {
 +              if (intel_attached_encoder(connector) == &intel_sdvo->base)
 +                      intel_sdvo_destroy(connector);
 +      }
 +}
 +
  static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                                          struct intel_sdvo_connector *intel_sdvo_connector,
                                          int type)
@@@ -2751,20 -2753,9 +2758,20 @@@ bool intel_sdvo_init(struct drm_device 
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
                              SDVO_NAME(intel_sdvo));
 -              goto err;
 +              /* Output_setup can leave behind connectors! */
 +              goto err_output;
        }
  
 +      /*
 +       * Cloning SDVO with anything is often impossible, since the SDVO
 +       * encoder can request a special input timing mode. And even if that's
 +       * not the case we have evidence that cloning a plain unscaled mode with
 +       * VGA doesn't really work. Furthermore the cloning flags are way too
 +       * simplistic anyway to express such constraints, so just give up on
 +       * cloning for SDVO encoders.
 +       */
 +      intel_sdvo->base.cloneable = false;
 +
        /* Only enable the hotplug irq if we need it, to work around noisy
         * hotplug lines.
         */
  
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
 -              goto err;
 +              goto err_output;
  
        if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
                                                    &intel_sdvo->pixel_clock_min,
                                                    &intel_sdvo->pixel_clock_max))
 -              goto err;
 +              goto err_output;
  
        DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
                        "clock range %dMHz - %dMHz, "
                        (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
        return true;
  
 +err_output:
 +      intel_sdvo_output_cleanup(intel_sdvo);
 +
  err:
        drm_encoder_cleanup(&intel_encoder->base);
        i2c_del_adapter(&intel_sdvo->ddc);
diff --combined include/drm/drm_crtc.h
@@@ -920,6 -920,12 +920,6 @@@ extern void drm_mode_set_crtcinfo(struc
  extern void drm_mode_connector_list_update(struct drm_connector *connector);
  extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                                struct edid *edid);
 -extern int drm_connector_property_set_value(struct drm_connector *connector,
 -                                       struct drm_property *property,
 -                                       uint64_t value);
 -extern int drm_connector_property_get_value(struct drm_connector *connector,
 -                                       struct drm_property *property,
 -                                       uint64_t *value);
  extern int drm_object_property_set_value(struct drm_mode_object *obj,
                                         struct drm_property *property,
                                         uint64_t val);
@@@ -941,6 -947,8 +941,6 @@@ extern int drmfb_remove(struct drm_devi
  extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
  extern bool drm_crtc_in_use(struct drm_crtc *crtc);
  
 -extern void drm_connector_attach_property(struct drm_connector *connector,
 -                                        struct drm_property *property, uint64_t init_val);
  extern void drm_object_attach_property(struct drm_mode_object *obj,
                                       struct drm_property *property,
                                       uint64_t init_val);
@@@ -1047,6 -1055,7 +1047,7 @@@ extern struct drm_display_mode *drm_gtf
                                int GTF_2C, int GTF_K, int GTF_2J);
  extern int drm_add_modes_noedid(struct drm_connector *connector,
                                int hdisplay, int vdisplay);
+ extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
  
  extern int drm_edid_header_is_valid(const u8 *raw_edid);
  extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);