list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
return obj->madv == I915_MADV_DONTNEED;
}
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
- uint32_t old_write_domain = obj->base.write_domain;
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(ring));
-
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- old_write_domain);
- }
- }
-}
-
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
&dev_priv->mm.retire_work, HZ);
}
- WARN_ON(!list_empty(&ring->gpu_write_list));
-
return 0;
}
struct drm_i915_gem_object,
ring_list);
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
u32 seqno;
int ret;
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
if (ret)
return ret;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
-
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
- int ret;
-
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ if (list_empty(&ring->active_list))
return 0;
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
- }
-
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
if (ret)
return ret;
- /* Is the device fubar? */
- if (WARN_ON(!list_empty(&ring->gpu_write_list)))
- return -EBUSY;
-
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
if (ret)
return ret;
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
- INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
obj->map_and_fenceable = true;
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
void
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
- u32 old_read = obj->base.read_domains;
- u32 old_write = obj->base.write_domain;
-
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = seqno;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
if (obj->pin_count) /* check for potential scanout */
intel_mark_busy(ring->dev, obj);
}