static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
- err->seqno,
+ err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 seqno;
+ u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
obj->last_fenced_seqno = 0;
}
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
if (obj->base.write_domain != 0)
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
{
+ u32 seqno;
int ret;
/* This function only exists to support waiting for existing rendering,
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
- if (obj->active) {
- ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
- if (ret)
- return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ if (readonly)
+ seqno = obj->last_write_seqno;
+ else
+ seqno = obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ /* Manually manage the write flush as we may have not yet retired
+ * the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
+ i915_gem_retire_requests_ring(obj->ring);
return 0;
}
if (ret)
return ret;
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
+
i915_gem_retire_requests_ring(obj->ring);
}
goto out;
if (obj->active) {
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
ring = obj->ring;
}
return 0;
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_gem_object_wait_rendering(obj, false);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
i915_gem_object_flush_cpu_write_domain(obj);
return ret;
}
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
if (ret)
return ret;
- if (write || obj->pending_gpu_write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
i915_gem_object_flush_gtt_write_domain(obj);