struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
unsigned long uncompressed_size;
unsigned threshold;
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;