drm/i915/selftests: Tweak igt_ggtt_page to speed it up
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 23 Dec 2017 11:04:06 +0000 (11:04 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 2 Jan 2018 15:27:36 +0000 (15:27 +0000)
Reduce the number of GGTT PTE operations to speed the test up, but we
reduce the likelihood of spotting a coherency error in those operations.
However, Broxton is sporadically timing on this test, presumably because
its GGTT operations are all uncached.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171223110407.21402-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c

index 4a28d71..bb7cf99 100644 (file)
@@ -1052,35 +1052,38 @@ static int igt_ggtt_page(void *arg)
 
        memset(&tmp, 0, sizeof(tmp));
        err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
-                                         1024 * PAGE_SIZE, 0,
+                                         count * PAGE_SIZE, 0,
                                          I915_COLOR_UNEVICTABLE,
                                          0, ggtt->mappable_end,
                                          DRM_MM_INSERT_LOW);
        if (err)
                goto out_unpin;
 
+       intel_runtime_pm_get(i915);
+
+       for (n = 0; n < count; n++) {
+               u64 offset = tmp.start + n * PAGE_SIZE;
+
+               ggtt->base.insert_page(&ggtt->base,
+                                      i915_gem_object_get_dma_address(obj, 0),
+                                      offset, I915_CACHE_NONE, 0);
+       }
+
        order = i915_random_order(count, &prng);
        if (!order) {
                err = -ENOMEM;
                goto out_remove;
        }
 
-       intel_runtime_pm_get(i915);
        for (n = 0; n < count; n++) {
                u64 offset = tmp.start + order[n] * PAGE_SIZE;
                u32 __iomem *vaddr;
 
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, 0),
-                                      offset, I915_CACHE_NONE, 0);
-
                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                iowrite32(n, vaddr + n);
                io_mapping_unmap_atomic(vaddr);
-
-               wmb();
-               ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
        }
+       i915_gem_flush_ggtt_writes(i915);
 
        i915_random_reorder(order, count, &prng);
        for (n = 0; n < count; n++) {
@@ -1088,16 +1091,10 @@ static int igt_ggtt_page(void *arg)
                u32 __iomem *vaddr;
                u32 val;
 
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, 0),
-                                      offset, I915_CACHE_NONE, 0);
-
                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                val = ioread32(vaddr + n);
                io_mapping_unmap_atomic(vaddr);
 
-               ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
-
                if (val != n) {
                        pr_err("insert page failed: found %d, expected %d\n",
                               val, n);
@@ -1105,10 +1102,11 @@ static int igt_ggtt_page(void *arg)
                        break;
                }
        }
-       intel_runtime_pm_put(i915);
 
        kfree(order);
 out_remove:
+       ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+       intel_runtime_pm_put(i915);
        drm_mm_remove_node(&tmp);
 out_unpin:
        i915_gem_object_unpin_pages(obj);