Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 19:04:36 +0000 (11:04 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 19:04:36 +0000 (11:04 -0800)
* 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (307 commits)
  drm/nouveau/pm: fix build with HWMON off
  gma500: silence gcc warnings in mid_get_vbt_data()
  drm/ttm: fix condition (and vs or)
  drm/radeon: double lock typo in radeon_vm_bo_rmv()
  drm/radeon: use after free in radeon_vm_bo_add()
  drm/sis|via: don't return stack garbage from free_mem ioctl
  drm/radeon/kms: remove pointless CS flags priority struct
  drm/radeon/kms: check if vm is supported in VA ioctl
  drm: introduce drm_can_sleep and use in intel/radeon drivers. (v2)
  radeon: Fix disabling PCI bus mastering on big endian hosts.
  ttm: fix agp since ttm tt rework
  agp: Fix multi-line warning message whitespace
  drm/ttm/dma: Fix accounting error when calling ttm_mem_global_free_page and don't try to free freed pages.
  drm/ttm/dma: Only call set_pages_array_wb when the page is not in WB pool.
  drm/radeon/kms: sync across multiple rings when doing bo moves v3
  drm/radeon/kms: Add support for multi-ring sync in CS ioctl (v2)
  drm/radeon: GPU virtual memory support v22
  drm: make DRM_UNLOCKED ioctls with their own mutex
  drm: no need to hold global mutex for static data
  drm/radeon/benchmark: common modes sweep ignores 640x480@32
  ...

Fix up trivial conflicts in radeon/evergreen.c and vmwgfx/vmwgfx_kms.c

1  2 
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
include/drm/drmP.h

@@@ -756,9 -756,9 +756,9 @@@ intel_enable_semaphores(struct drm_devi
        if (i915_semaphores >= 0)
                return i915_semaphores;
  
 -      /* Enable semaphores on SNB when IO remapping is off */
 +      /* Disable semaphores on SNB */
        if (INTEL_INFO(dev)->gen == 6)
 -              return !intel_iommu_enabled;
 +              return 0;
  
        return 1;
  }
@@@ -971,6 -971,31 +971,31 @@@ i915_gem_execbuffer_retire_commands(str
  }
  
  static int
+ i915_reset_gen7_sol_offsets(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret, i;
+       if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+               return 0;
+       ret = intel_ring_begin(ring, 4 * 3);
+       if (ret)
+               return ret;
+       for (i = 0; i < 4; i++) {
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+               intel_ring_emit(ring, 0);
+       }
+       intel_ring_advance(ring);
+       return 0;
+ }
+ static int
  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
        struct intel_ring_buffer *ring;
        u32 exec_start, exec_len;
        u32 seqno;
+       u32 mask;
        int ret, mode, i;
  
        if (!i915_gem_check_execbuffer(args)) {
        }
  
        mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+       mask = I915_EXEC_CONSTANTS_MASK;
        switch (mode) {
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
                            mode == I915_EXEC_CONSTANTS_REL_SURFACE)
                                return -EINVAL;
  
-                       ret = intel_ring_begin(ring, 4);
-                       if (ret)
-                               return ret;
-                       intel_ring_emit(ring, MI_NOOP);
-                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-                       intel_ring_emit(ring, INSTPM);
-                       intel_ring_emit(ring,
-                                       I915_EXEC_CONSTANTS_MASK << 16 | mode);
-                       intel_ring_advance(ring);
-                       dev_priv->relative_constants_mode = mode;
+                       /* The HW changed the meaning on this bit on gen6 */
+                       if (INTEL_INFO(dev)->gen >= 6)
+                               mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
                }
                break;
        default:
                }
        }
  
+       if (ring == &dev_priv->ring[RCS] &&
+           mode != dev_priv->relative_constants_mode) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                               goto err;
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, INSTPM);
+               intel_ring_emit(ring, mask << 16 | mode);
+               intel_ring_advance(ring);
+               dev_priv->relative_constants_mode = mode;
+       }
+       if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+               ret = i915_reset_gen7_sol_offsets(dev, ring);
+               if (ret)
+                       goto err;
+       }
        trace_i915_gem_ring_dispatch(ring, seqno);
  
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
@@@ -915,8 -915,8 +915,8 @@@ static void assert_panel_unlocked(struc
             pipe_name(pipe));
  }
  
static void assert_pipe(struct drm_i915_private *dev_priv,
-                       enum pipe pipe, bool state)
+ void assert_pipe(struct drm_i915_private *dev_priv,
+                enum pipe pipe, bool state)
  {
        int reg;
        u32 val;
             "pipe %c assertion failure (expected %s, current %s)\n",
             pipe_name(pipe), state_string(state), state_string(cur_state));
  }
- #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
- #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
  
  static void assert_plane_enabled(struct drm_i915_private *dev_priv,
                                 enum plane plane)
@@@ -1206,7 -1204,8 +1204,8 @@@ static void intel_disable_pch_pll(struc
                                  enum pipe pipe)
  {
        int reg;
-       u32 val;
+       u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+               pll_sel = TRANSC_DPLL_ENABLE;
  
        if (pipe > 1)
                return;
        /* Make sure transcoder isn't still depending on us */
        assert_transcoder_disabled(dev_priv, pipe);
  
+       if (pipe == 0)
+               pll_sel |= TRANSC_DPLLA_SEL;
+       else if (pipe == 1)
+               pll_sel |= TRANSC_DPLLB_SEL;
+       if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+               return;
        reg = PCH_DPLL(pipe);
        val = I915_READ(reg);
        val &= ~DPLL_VCO_ENABLE;
@@@ -1511,8 -1519,8 +1519,8 @@@ static void i8xx_enable_fbc(struct drm_
        u32 fbc_ctl, fbc_ctl2;
  
        cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-       if (fb->pitch < cfb_pitch)
-               cfb_pitch = fb->pitch;
+       if (fb->pitches[0] < cfb_pitch)
+               cfb_pitch = fb->pitches[0];
  
        /* FBC_CTL wants 64B units */
        cfb_pitch = (cfb_pitch / 64) - 1;
@@@ -2073,11 -2081,11 +2081,11 @@@ static int i9xx_update_plane(struct drm
        I915_WRITE(reg, dspcntr);
  
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
  
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPSURF(plane), Start);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@@ -2154,11 -2162,11 +2162,11 @@@ static int ironlake_update_plane(struc
        I915_WRITE(reg, dspcntr);
  
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
  
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane), Start);
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
        I915_WRITE(DSPADDR(plane), Offset);
@@@ -4509,7 -4517,7 +4517,7 @@@ static void ironlake_update_wm(struct d
         */
  }
  
static void sandybridge_update_wm(struct drm_device *dev)
+ void sandybridge_update_wm(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
  
-       if (!single_plane_enabled(enabled))
+       if (!single_plane_enabled(enabled) ||
+           dev_priv->sprite_scaling_enabled)
                return;
        enabled = ffs(enabled) - 1;
  
                   cursor_wm);
  }
  
+ static bool
+ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+                             uint32_t sprite_width, int pixel_size,
+                             const struct intel_watermark_params *display,
+                             int display_latency_ns, int *sprite_wm)
+ {
+       struct drm_crtc *crtc;
+       int clock;
+       int entries, tlb_miss;
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       if (crtc->fb == NULL || !crtc->enabled) {
+               *sprite_wm = display->guard_size;
+               return false;
+       }
+       clock = crtc->mode.clock;
+       /* Use the small buffer method to calculate the sprite watermark */
+       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+       tlb_miss = display->fifo_size*display->cacheline_size -
+               sprite_width * 8;
+       if (tlb_miss > 0)
+               entries += tlb_miss;
+       entries = DIV_ROUND_UP(entries, display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+       if (*sprite_wm > (int)display->max_wm)
+               *sprite_wm = display->max_wm;
+       return true;
+ }
+ static bool
+ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+                               uint32_t sprite_width, int pixel_size,
+                               const struct intel_watermark_params *display,
+                               int latency_ns, int *sprite_wm)
+ {
+       struct drm_crtc *crtc;
+       unsigned long line_time_us;
+       int clock;
+       int line_count, line_size;
+       int small, large;
+       int entries;
+       if (!latency_ns) {
+               *sprite_wm = 0;
+               return false;
+       }
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       clock = crtc->mode.clock;
+       line_time_us = (sprite_width * 1000) / clock;
+       line_count = (latency_ns / line_time_us + 1000) / 1000;
+       line_size = sprite_width * pixel_size;
+       /* Use the minimum of the small and large buffer method for primary */
+       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+       large = line_count * line_size;
+       entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+       return *sprite_wm > 0x3ff ? false : true;
+ }
+ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+                                        uint32_t sprite_width, int pixel_size)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int sprite_wm, reg;
+       int ret;
+       switch (pipe) {
+       case 0:
+               reg = WM0_PIPEA_ILK;
+               break;
+       case 1:
+               reg = WM0_PIPEB_ILK;
+               break;
+       case 2:
+               reg = WM0_PIPEC_IVB;
+               break;
+       default:
+               return; /* bad pipe */
+       }
+       ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+                                           &sandybridge_display_wm_info,
+                                           latency, &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+       DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM1_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM1S_LP_ILK, sprite_wm);
+       /* Only IVB has two more LP watermarks for sprite */
+       if (!IS_IVYBRIDGE(dev))
+               return;
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM2_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM2S_LP_IVB, sprite_wm);
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM3_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM3S_LP_IVB, sprite_wm);
+ }
  /**
   * intel_update_watermarks - update FIFO watermark values based on current modes
   *
@@@ -4659,6 -4811,16 +4811,16 @@@ static void intel_update_watermarks(str
                dev_priv->display.update_wm(dev);
  }
  
+ void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+                                   uint32_t sprite_width, int pixel_size)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (dev_priv->display.update_sprite_wm)
+               dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+                                                  pixel_size);
+ }
  static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  {
        if (i915_panel_use_ssc >= 0)
@@@ -5155,7 -5317,7 +5317,7 @@@ static int i9xx_crtc_mode_set(struct dr
                adjusted_mode->crtc_vsync_end -= 1;
                adjusted_mode->crtc_vsync_start -= 1;
        } else
-               pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+               pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
  
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
@@@ -5822,14 -5984,45 +5984,45 @@@ static int intel_crtc_mode_set(struct d
  
        ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
                                              x, y, old_fb);
        drm_vblank_post_modeset(dev, pipe);
  
-       intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+       if (ret)
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       else
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
  
        return ret;
  }
  
+ static bool intel_eld_uptodate(struct drm_connector *connector,
+                              int reg_eldv, uint32_t bits_eldv,
+                              int reg_elda, uint32_t bits_elda,
+                              int reg_edid)
+ {
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       uint8_t *eld = connector->eld;
+       uint32_t i;
+       i = I915_READ(reg_eldv);
+       i &= bits_eldv;
+       if (!eld[0])
+               return !i;
+       if (!i)
+               return false;
+       i = I915_READ(reg_elda);
+       i &= ~bits_elda;
+       I915_WRITE(reg_elda, i);
+       for (i = 0; i < eld[2]; i++)
+               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+                       return false;
+       return true;
+ }
  static void g4x_write_eld(struct drm_connector *connector,
                          struct drm_crtc *crtc)
  {
        else
                eldv = G4X_ELDV_DEVCTG;
  
+       if (intel_eld_uptodate(connector,
+                              G4X_AUD_CNTL_ST, eldv,
+                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+                              G4X_HDMIW_HDMIEDID))
+               return;
        i = I915_READ(G4X_AUD_CNTL_ST);
        i &= ~(eldv | G4X_ELD_ADDR);
        len = (i >> 9) & 0x1f;          /* ELD buffer size */
@@@ -5876,14 -6075,14 +6075,14 @@@ static void ironlake_write_eld(struct d
        int aud_cntl_st;
        int aud_cntrl_st2;
  
-       if (IS_IVYBRIDGE(connector->dev)) {
-               hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
-               aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+       if (HAS_PCH_IBX(connector->dev)) {
+               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+               aud_cntl_st = IBX_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else {
-               hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN5_AUD_CNTL_ST_A;
-               aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+               aud_cntl_st = CPT_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
  
        i = to_intel_crtc(crtc)->pipe;
        if (!i) {
                DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
                /* operate blindly on all ports */
-               eldv = GEN5_ELD_VALIDB;
-               eldv |= GEN5_ELD_VALIDB << 4;
-               eldv |= GEN5_ELD_VALIDB << 8;
+               eldv = IBX_ELD_VALIDB;
+               eldv |= IBX_ELD_VALIDB << 4;
+               eldv |= IBX_ELD_VALIDB << 8;
        } else {
                DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
-               eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+               eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+       }
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
        }
  
+       if (intel_eld_uptodate(connector,
+                              aud_cntrl_st2, eldv,
+                              aud_cntl_st, IBX_ELD_ADDRESS,
+                              hdmiw_hdmiedid))
+               return;
        i = I915_READ(aud_cntrl_st2);
        i &= ~eldv;
        I915_WRITE(aud_cntrl_st2, i);
        if (!eld[0])
                return;
  
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-       }
        i = I915_READ(aud_cntl_st);
-       i &= ~GEN5_ELD_ADDRESS;
+       i &= ~IBX_ELD_ADDRESS;
        I915_WRITE(aud_cntl_st, i);
  
        len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
@@@ -6298,7 -6503,7 +6503,7 @@@ static struct drm_display_mode load_det
  
  static struct drm_framebuffer *
  intel_framebuffer_create(struct drm_device *dev,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct drm_i915_gem_object *obj)
  {
        struct intel_framebuffer *intel_fb;
@@@ -6340,7 -6545,7 +6545,7 @@@ intel_framebuffer_create_for_mode(struc
                                  int depth, int bpp)
  {
        struct drm_i915_gem_object *obj;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
  
        obj = i915_gem_alloc_object(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
  
        mode_cmd.width = mode->hdisplay;
        mode_cmd.height = mode->vdisplay;
-       mode_cmd.depth = depth;
-       mode_cmd.bpp = bpp;
-       mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+       mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+                                                               bpp);
+       mode_cmd.pixel_format = 0;
  
        return intel_framebuffer_create(dev, &mode_cmd, obj);
  }
@@@ -6372,11 -6577,11 +6577,11 @@@ mode_fits_in_fbdev(struct drm_device *d
                return NULL;
  
        fb = &dev_priv->fbdev->ifb.base;
-       if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
-                                                         fb->bits_per_pixel))
+       if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+                                                              fb->bits_per_pixel))
                return NULL;
  
-       if (obj->base.size < mode->vdisplay * fb->pitch)
+       if (obj->base.size < mode->vdisplay * fb->pitches[0])
                return NULL;
  
        return fb;
@@@ -7009,7 -7214,7 +7214,7 @@@ static int intel_gen2_queue_flip(struc
                goto out;
  
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  
        ret = BEGIN_LP_RING(6);
        if (ret)
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();
@@@ -7050,7 -7255,7 +7255,7 @@@ static int intel_gen3_queue_flip(struc
                goto out;
  
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  
        ret = BEGIN_LP_RING(6);
        if (ret)
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP_I915 |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
  
@@@ -7097,7 -7302,7 +7302,7 @@@ static int intel_gen4_queue_flip(struc
         */
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset | obj->tiling_mode);
  
        /* XXX Enabling the panel-fitter across page-flip is so far
@@@ -7132,7 -7337,7 +7337,7 @@@ static int intel_gen6_queue_flip(struc
  
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch | obj->tiling_mode);
+       OUT_RING(fb->pitches[0] | obj->tiling_mode);
        OUT_RING(obj->gtt_offset);
  
        pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@@ -7168,7 -7373,7 +7373,7 @@@ static int intel_gen7_queue_flip(struc
                goto out;
  
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
-       intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+       intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, (obj->gtt_offset));
        intel_ring_emit(ring, (MI_NOOP));
        intel_ring_advance(ring);
@@@ -7594,7 -7799,7 +7799,7 @@@ static const struct drm_framebuffer_fun
  
  int intel_framebuffer_init(struct drm_device *dev,
                           struct intel_framebuffer *intel_fb,
-                          struct drm_mode_fb_cmd *mode_cmd,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj)
  {
        int ret;
        if (obj->tiling_mode == I915_TILING_Y)
                return -EINVAL;
  
-       if (mode_cmd->pitch & 63)
+       if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
  
-       switch (mode_cmd->bpp) {
-       case 8:
-       case 16:
-               /* Only pre-ILK can handle 5:5:5 */
-               if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
-                       return -EINVAL;
+       switch (mode_cmd->pixel_format) {
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               /* RGB formats are common across chipsets */
                break;
-       case 24:
-       case 32:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_VYUY:
                break;
        default:
+               DRM_ERROR("unsupported pixel format\n");
                return -EINVAL;
        }
  
  static struct drm_framebuffer *
  intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
-                             struct drm_mode_fb_cmd *mode_cmd)
+                             struct drm_mode_fb_cmd2 *mode_cmd)
  {
        struct drm_i915_gem_object *obj;
  
-       obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+                                               mode_cmd->handles[0]));
        if (&obj->base == NULL)
                return ERR_PTR(-ENOENT);
  
@@@ -7922,11 -8132,13 +8132,11 @@@ static bool intel_enable_rc6(struct drm
                return 0;
  
        /*
 -       * Enable rc6 on Sandybridge if DMA remapping is disabled
 +       * Disable rc6 on Sandybridge
         */
        if (INTEL_INFO(dev)->gen == 6) {
 -              DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
 -                               intel_iommu_enabled ? "true" : "false",
 -                               !intel_iommu_enabled ? "en" : "dis");
 -              return !intel_iommu_enabled;
 +              DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
 +              return 0;
        }
        DRM_DEBUG_DRIVER("RC6 enabled\n");
        return 1;
@@@ -7995,7 -8207,7 +8205,7 @@@ void gen6_enable_rps(struct drm_i915_pr
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_USE_NORMAL_FREQ |
+                  GEN6_RP_MEDIA_HW_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
@@@ -8250,6 -8462,10 +8460,10 @@@ static void ivybridge_init_clock_gating
  
        I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
  
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
        for_each_pipe(pipe) {
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
@@@ -8543,9 -8759,15 +8757,15 @@@ static void intel_init_display(struct d
                if (IS_IVYBRIDGE(dev)) {
                        u32     ecobus;
  
+                       /* A small trick here - if the bios hasn't configured MT forcewake,
+                        * and if the device is in RC6, then force_wake_mt_get will not wake
+                        * the device and the ECOBUS read will return zero. Which will be
+                        * (correctly) interpreted by the test below as MT forcewake being
+                        * disabled.
+                        */
                        mutex_lock(&dev->struct_mutex);
                        __gen6_gt_force_wake_mt_get(dev_priv);
-                       ecobus = I915_READ(ECOBUS);
+                       ecobus = I915_READ_NOTRACE(ECOBUS);
                        __gen6_gt_force_wake_mt_put(dev_priv);
                        mutex_unlock(&dev->struct_mutex);
  
                } else if (IS_GEN6(dev)) {
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
@@@ -8773,7 -8997,7 +8995,7 @@@ static void i915_disable_vga(struct drm
  void intel_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       int i, ret;
  
        drm_mode_config_init(dev);
  
  
        for (i = 0; i < dev_priv->num_pipe; i++) {
                intel_crtc_init(dev, i);
+               if (HAS_PCH_SPLIT(dev)) {
+                       ret = intel_plane_init(dev, i);
+                       if (ret)
+                               DRM_ERROR("plane %d init failed: %d\n",
+                                         i, ret);
+               }
        }
  
        /* Just disable it once at startup */
@@@ -40,6 -40,8 +40,8 @@@
  static void evergreen_gpu_init(struct radeon_device *rdev);
  void evergreen_fini(struct radeon_device *rdev);
  void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+                                    int ring, u32 cp_int_cntl);
  
  void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  {
@@@ -1311,18 -1313,20 +1313,20 @@@ void evergreen_mc_program(struct radeon
   */
  void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
        /* set to DX10/11 mode */
-       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
        /* FIXME: implement */
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
  #ifdef __BIG_ENDIAN
                          (2 << 0) |
  #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, ib->length_dw);
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw);
  }
  
  
@@@ -1360,71 -1364,73 +1364,73 @@@ static int evergreen_cp_load_microcode(
  
  static int evergreen_cp_start(struct radeon_device *rdev)
  {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
        uint32_t cp_me;
  
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
-       radeon_ring_write(rdev, 0x0);
-       radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
+       radeon_ring_write(ring, 0x0);
+       radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
  
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
  
-       r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+       r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
  
        /* setup clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  
        for (i = 0; i < evergreen_default_size; i++)
-               radeon_ring_write(rdev, evergreen_default_state[i]);
+               radeon_ring_write(ring, evergreen_default_state[i]);
  
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
  
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
  
        /* Clear consts */
-       radeon_ring_write(rdev, 0xc0036f00);
-       radeon_ring_write(rdev, 0x00000bc4);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(ring, 0xc0036f00);
+       radeon_ring_write(ring, 0x00000bc4);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
  
-       radeon_ring_write(rdev, 0xc0026900);
-       radeon_ring_write(rdev, 0x00000316);
-       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       radeon_ring_write(rdev, 0x00000010); /*  */
+       radeon_ring_write(ring, 0xc0026900);
+       radeon_ring_write(ring, 0x00000316);
+       radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(ring, 0x00000010); /*  */
  
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
  
        return 0;
  }
  
  int evergreen_cp_resume(struct radeon_device *rdev)
  {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
        RREG32(GRBM_SOFT_RESET);
  
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
  #endif
        WREG32(CP_RB_CNTL, tmp);
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
  
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB_WPTR, ring->wptr);
  
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
  
-       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
  
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       ring->rptr = RREG32(CP_RB_RPTR);
  
        evergreen_cp_start(rdev);
-       rdev->cp.ready = true;
-       r = radeon_ring_test(rdev);
+       ring->ready = true;
+       r = radeon_ring_test(rdev, ring);
        if (r) {
-               rdev->cp.ready = false;
+               ring->ready = false;
                return r;
        }
        return 0;
@@@ -2353,7 -2359,7 +2359,7 @@@ int evergreen_mc_init(struct radeon_dev
        return 0;
  }
  
- bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  {
        u32 srbm_status;
        u32 grbm_status;
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
  }
  
  static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@@ -2470,7 -2476,13 +2476,13 @@@ void evergreen_disable_interrupt_state(
  {
        u32 tmp;
  
-       WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0,
+                                        CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+               cayman_cp_int_cntl_setup(rdev, 1, 0);
+               cayman_cp_int_cntl_setup(rdev, 2, 0);
+       } else
+               WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  int evergreen_irq_set(struct radeon_device *rdev)
  {
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+       u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
  
-       if (rdev->irq.sw_int) {
-               DRM_DEBUG("evergreen_irq_set: sw int\n");
-               cp_int_cntl |= RB_INT_ENABLE;
-               cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+       if (rdev->family >= CHIP_CAYMAN) {
+               /* enable CP interrupts on all rings */
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+                       cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+                       cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+               }
+       } else {
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= RB_INT_ENABLE;
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
        }
        if (rdev->irq.crtc_vblank_int[0] ||
            rdev->irq.pflip[0]) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
                grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
        }
  
-       WREG32(CP_INT_CNTL, cp_int_cntl);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+               cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+               cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+       } else
+               WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@@ -3018,11 -3053,24 +3053,24 @@@ restart_ih
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
-                       radeon_fence_process(rdev);
+                       if (rdev->family >= CHIP_CAYMAN) {
+                               switch (src_data) {
+                               case 0:
+                                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+                                       break;
+                               case 1:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+                                       break;
+                               case 2:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+                                       break;
+                               }
+                       } else
+                               radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
  
  static int evergreen_startup(struct radeon_device *rdev)
  {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
  
        /* enable pcie gen2 link */
        if (r)
                return r;
  
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
        }
        evergreen_irq_set(rdev);
  
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
  
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+       }
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
        return 0;
  }
  
@@@ -3144,31 -3217,30 +3217,30 @@@ int evergreen_resume(struct radeon_devi
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
  
+       rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
                DRM_ERROR("evergreen startup failed on resume\n");
                return r;
        }
  
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
        return r;
  
  }
  
  int evergreen_suspend(struct radeon_device *rdev)
  {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        r700_cp_stop(rdev);
-       rdev->cp.ready = false;
+       ring->ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
  
        return 0;
  }
@@@ -3243,8 -3315,8 +3315,8 @@@ int evergreen_init(struct radeon_devic
        if (r)
                return r;
  
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
  
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
        if (r)
                return r;
  
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
        r = evergreen_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-       }
 +
 +      /* Don't start up if the MC ucode is missing on BTC parts.
 +       * The default clocks and voltages before the MC ucode
 +       * is loaded are not suffient for advanced operations.
 +       */
 +      if (ASIC_IS_DCE5(rdev)) {
 +              if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
 +                      DRM_ERROR("radeon: MC ucode required for NI+.\n");
 +                      return -EINVAL;
 +              }
 +      }
 +
        return 0;
  }
  
  void evergreen_fini(struct radeon_device *rdev)
  {
+       r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
@@@ -690,7 -690,7 +690,7 @@@ static int vmw_kms_new_framebuffer_surf
  
        /* XXX get the first 3 from the surface info */
        vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbs->base.base.pitch = mode_cmd->pitch;
+       vfbs->base.base.pitches[0] = mode_cmd->pitch;
        vfbs->base.base.depth = mode_cmd->depth;
        vfbs->base.base.width = mode_cmd->width;
        vfbs->base.base.height = mode_cmd->height;
@@@ -804,7 -804,7 +804,7 @@@ static int do_dmabuf_define_gmrfb(struc
        cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
        cmd->body.format.colorDepth = depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = framebuffer->base.pitch;
+       cmd->body.bytesPerLine = framebuffer->base.pitches[0];
        cmd->body.ptr.gmrId = framebuffer->user_handle;
        cmd->body.ptr.offset = 0;
  
@@@ -1056,7 -1056,7 +1056,7 @@@ static int vmw_kms_new_framebuffer_dmab
        }
  
        vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbd->base.base.pitch = mode_cmd->pitch;
+       vfbd->base.base.pitches[0] = mode_cmd->pitch;
        vfbd->base.base.depth = mode_cmd->depth;
        vfbd->base.base.width = mode_cmd->width;
        vfbd->base.base.height = mode_cmd->height;
@@@ -1085,7 -1085,7 +1085,7 @@@ out_err1
  
  static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                                                 struct drm_file *file_priv,
-                                                struct drm_mode_fb_cmd *mode_cmd)
+                                                struct drm_mode_fb_cmd2 *mode_cmd2)
  {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
 -      u64 required_size;
+       struct drm_mode_fb_cmd mode_cmd;
        int ret;
  
+       mode_cmd.width = mode_cmd2->width;
+       mode_cmd.height = mode_cmd2->height;
+       mode_cmd.pitch = mode_cmd2->pitches[0];
+       mode_cmd.handle = mode_cmd2->handles[0];
+       drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+                                   &mode_cmd.bpp);
        /**
         * This code should be conditioned on Screen Objects not being used.
         * If screen objects are used, we can allocate a GMR to hold the
         * requested framebuffer.
         */
  
 -      required_size = mode_cmd.pitch * mode_cmd.height;
 -      if (unlikely(required_size > (u64) dev_priv->vram_size)) {
 +      if (!vmw_kms_validate_mode_vram(dev_priv,
-                                       mode_cmd->pitch,
-                                       mode_cmd->height)) {
++                                      mode_cmd.pitch,
++                                      mode_cmd.height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
         * command stream using user-space handles.
         */
  
-       user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+       user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
        if (unlikely(user_obj == NULL)) {
                DRM_ERROR("Could not locate requested kms frame buffer.\n");
                return ERR_PTR(-ENOENT);
        }
  
+       /**
+        * End conditioned code.
+        */
        /* returns either a dmabuf or surface */
        ret = vmw_user_lookup_handle(dev_priv, tfile,
-                                    mode_cmd->handle,
+                                    mode_cmd.handle,
                                     &surface, &bo);
        if (ret)
                goto err_out;
        /* Create the new framebuffer depending one what we got back */
        if (bo)
                ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                                    mode_cmd);
+                                                    &mode_cmd);
        else if (surface)
                ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-                                                     surface, &vfb, mode_cmd);
+                                                     surface, &vfb, &mode_cmd);
        else
                BUG();
  
@@@ -1344,7 -1356,7 +1356,7 @@@ int vmw_kms_readback(struct vmw_privat
        cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
        cmd->body.format.colorDepth = vfb->base.depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = vfb->base.pitch;
+       cmd->body.bytesPerLine = vfb->base.pitches[0];
        cmd->body.ptr.gmrId = vfb->user_handle;
        cmd->body.ptr.offset = 0;
  
diff --combined include/drm/drmP.h
@@@ -820,7 -820,7 +820,7 @@@ struct drm_driver 
         * Specifically, the timestamp in @vblank_time should correspond as
         * closely as possible to the time when the first video scanline of
         * the video frame after the end of VBLANK will start scanning out,
 -       * the time immmediately after end of the VBLANK interval. If the
 +       * the time immediately after end of the VBLANK interval. If the
         * @crtc is currently inside VBLANK, this will be a time in the future.
         * If the @crtc is currently scanning out a frame, this will be the
         * past start time of the current scanout. This is meant to adhere
        int dev_priv_size;
        struct drm_ioctl_desc *ioctls;
        int num_ioctls;
-       struct file_operations fops;
+       const struct file_operations *fops;
        union {
                struct pci_driver *pci;
                struct platform_device *platform_device;
@@@ -1696,5 -1696,13 +1696,13 @@@ extern void drm_platform_exit(struct dr
  extern int drm_get_platform_dev(struct platform_device *pdev,
                                struct drm_driver *driver);
  
+ /* returns true if currently okay to sleep */
+ static __inline__ bool drm_can_sleep(void)
+ {
+       if (in_atomic() || in_dbg_master() || irqs_disabled())
+               return false;
+       return true;
+ }
  #endif                                /* __KERNEL__ */
  #endif