fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound mm.suspended!
- */
- dev_priv->mm.suspended = 1;
- i915_gem_reset_fences(dev);
-
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->mm.stolen_base == 0)
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->cfb_size)
+ if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ int ret;
- if (dev_priv->mm.stolen_base == 0)
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",