/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
- unsigned num_entries)
+ unsigned num_entries,
+ bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
}
ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
+ ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
{
ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ obj->base.size >> PAGE_SHIFT,
+ true);
}
extern int intel_iommu_gfx_mapped;
dev_priv->mm.interruptible = interruptible;
}
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ for_each_ring(ring, dev_priv, i) {
+ u32 fault_reg;
+ fault_reg = I915_READ(RING_FAULT_REG(ring));
+ if (fault_reg & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault_reg & PAGE_MASK,
+ fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault_reg),
+ RING_FAULT_FAULT_TYPE(fault_reg));
+ I915_WRITE(RING_FAULT_REG(ring),
+ fault_reg & ~RING_FAULT_VALID);
+ }
+ }
+ POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ i915_check_and_clear_faults(dev);
+
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE,
+ false);
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ i915_check_and_clear_faults(dev);
+
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE);
+ dev_priv->gtt.base.total / PAGE_SIZE,
+ true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj, obj->pin_display);
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
- unsigned int num_entries)
+ unsigned int num_entries,
+ bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, >t_base[i]);
readl(gtt_base);
static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
- unsigned int num_entries)
+ unsigned int num_entries,
+ bool unused)
{
intel_gtt_clear_range(first_entry, num_entries);
}
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry,
- obj->base.size >> PAGE_SHIFT);
+ obj->base.size >> PAGE_SHIFT,
+ true);
obj->has_global_gtt_mapping = 0;
}
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
+ ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
+ ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
}
static bool