I tried to avoid having to track the write for every VMA by only
tracking writes to the ggtt. However, for the purposes of frontbuffer
tracking this is insufficient as we need to invalidate around writes not
just to the the ggtt but all aliased ppgtt views of the framebuffer. By
moving the critical section to the object and only doing so for
framebuffer writes we can reduce the tracking even further by only
watching framebuffers and not vma.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161116190704.5293-1-chris@chris-wilson.co.uk
Tested-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
return err;
}
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
+}
+
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&vma->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
- err->wseqno = __active_get_seqno(&vma->last_write);
- err->engine = __active_get_engine_id(&vma->last_write);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
}
}
-static void
-i915_ggtt_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_write);
-
- intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
-}
-
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_write,
- i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
struct i915_gem_active last_fence;
/**
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**