drm/i915: Always flush the dirty CPU cache when pinning the scanout
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 18 Nov 2016 21:17:46 +0000 (21:17 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 18 Nov 2016 22:33:22 +0000 (22:33 +0000)
Currently we only clflush the scanout if it is in the CPU domain. Also
flush if we have a pending CPU clflush. We also want to treat the
dirtyfb path similar, and flush any pending writes there as well.

v2: Only send the fb flush message if flushing the dirt on flip
v3: Make flush-for-flip and dirtyfb look more alike since they serve
similar roles as end-of-frame marker.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> #v2
Link: http://patchwork.freedesktop.org/patch/msgid/20161118211747.25197-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_display.c

index 4500731..b6ad1ba 100644 (file)
@@ -3204,12 +3204,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
        struct i915_vma *vma;
-       int ret = 0;
+       int ret;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
        if (obj->cache_level == cache_level)
-               goto out;
+               return 0;
 
        /* Inspect the list of currently bound VMA and unbind any that would
         * be invalid given the new cache-level. This is principally to
@@ -3304,18 +3304,14 @@ restart:
                }
        }
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+           cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+               obj->cache_dirty = true;
+
        list_for_each_entry(vma, &obj->vma_list, obj_link)
                vma->node.color = cache_level;
        obj->cache_level = cache_level;
 
-out:
-       /* Flush the dirty CPU caches to the backing storage so that the
-        * object is now coherent at its new cache level (with respect
-        * to the access domain).
-        */
-       if (obj->cache_dirty && cpu_write_needs_clflush(obj))
-               i915_gem_clflush_object(obj, true);
-
        return 0;
 }
 
@@ -3471,7 +3467,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
-       i915_gem_object_flush_cpu_write_domain(obj);
+       /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+       if (obj->cache_dirty) {
+               i915_gem_clflush_object(obj, true);
+               intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+       }
 
        old_write_domain = obj->base.write_domain;
        old_read_domains = obj->base.read_domains;
index 3bd0e67..8d270f7 100644 (file)
@@ -15679,6 +15679,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
        struct drm_i915_gem_object *obj = intel_fb->obj;
 
        mutex_lock(&dev->struct_mutex);
+       if (obj->pin_display && obj->cache_dirty)
+               i915_gem_clflush_object(obj, true);
        intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
        mutex_unlock(&dev->struct_mutex);