drm/i915/selftests: Replace opencoded clflush with drm_clflush_virt_range
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 30 Jul 2018 07:53:51 +0000 (08:53 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 30 Jul 2018 09:33:21 +0000 (10:33 +0100)
We occasionally see that the clflush prior to a read of GPU data is
returning stale data, reminiscent of much earlier bugs fixed by adding a
second clflush for serialisation. As drm_clflush_virt_range() already
supplies the workaround, use it rather than open code the clflush
instruction.

References: 396f5d62d1a5 ("drm: Restore double clflush on the last partial cacheline")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180730075351.15569-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c

index 3a095c3..4e6a221 100644 (file)
@@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj,
 {
        unsigned int needs_clflush;
        struct page *page;
-       u32 *map;
+       void *map;
+       u32 *cpu;
        int err;
 
        err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
@@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
+       cpu = map + offset_in_page(offset);
 
-       if (needs_clflush & CLFLUSH_BEFORE) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_BEFORE)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
-       map[offset_in_page(offset) / sizeof(*map)] = v;
+       *cpu = v;
 
-       if (needs_clflush & CLFLUSH_AFTER) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_AFTER)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
        kunmap_atomic(map);
-
        i915_gem_obj_finish_shmem_access(obj);
+
        return 0;
 }
 
@@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj,
 {
        unsigned int needs_clflush;
        struct page *page;
-       u32 *map;
+       void *map;
+       u32 *cpu;
        int err;
 
        err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
@@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
+       cpu = map + offset_in_page(offset);
 
-       if (needs_clflush & CLFLUSH_BEFORE) {
-               mb();
-               clflush(map+offset_in_page(offset) / sizeof(*map));
-               mb();
-       }
+       if (needs_clflush & CLFLUSH_BEFORE)
+               drm_clflush_virt_range(cpu, sizeof(*cpu));
 
-       *v = map[offset_in_page(offset) / sizeof(*map)];
-       kunmap_atomic(map);
+       *v = *cpu;
 
+       kunmap_atomic(map);
        i915_gem_obj_finish_shmem_access(obj);
+
        return 0;
 }