if (i915_vma_is_pinned(vma))
return false;
- if (WARN_ON(!list_empty(&vma->exec_list)))
- return false;
-
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
-
- INIT_LIST_HEAD(&vma->exec_list);
}
/* Can we unpin some objects such as idle hw contents,
if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma);
else
- list_del_init(&vma->exec_list);
+ list_del(&vma->exec_list);
}
/* Unbinding will emit any required flushes */
ret = 0;
- while (!list_empty(&eviction_list)) {
- vma = list_first_entry(&eviction_list,
- struct i915_vma,
- exec_list);
-
- list_del_init(&vma->exec_list);
+ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
}
/* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
if (vma->exec_entry &&
vma->exec_entry->flags & EXEC_OBJECT_PINNED)
}
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
- list_del_init(&vma->exec_list);
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
eb->and = -eb->args->buffer_count;
}
- INIT_LIST_HEAD(&eb->vmas);
return 0;
}
+static inline void
+__eb_unreserve_vma(struct i915_vma *vma,
+ const struct drm_i915_gem_exec_object2 *entry)
+{
+ if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
+ i915_vma_unpin_fence(vma);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ __i915_vma_unpin(vma);
+}
+
+static void
+eb_unreserve_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+
+ __eb_unreserve_vma(vma, entry);
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
static void
eb_reset(struct i915_execbuffer *eb)
{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ eb_unreserve_vma(vma);
+ i915_vma_put(vma);
+ vma->exec_entry = NULL;
+ }
+
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
struct list_head objects;
int i, ret;
+ INIT_LIST_HEAD(&eb->vmas);
+
INIT_LIST_HEAD(&objects);
spin_lock(&eb->file->table_lock);
/* Grab a reference to the object and release the lock so we can lookup
}
}
-static void
-eb_unreserve_vma(struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!drm_mm_node_allocated(&vma->node))
- return;
-
- entry = vma->exec_entry;
-
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_vma_unpin_fence(vma);
-
- if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- __i915_vma_unpin(vma);
-
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
static void eb_destroy(struct i915_execbuffer *eb)
{
- i915_gem_context_put(eb->ctx);
+ struct i915_vma *vma;
- while (!list_empty(&eb->vmas)) {
- struct i915_vma *vma;
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ if (!vma->exec_entry)
+ continue;
- vma = list_first_entry(&eb->vmas,
- struct i915_vma,
- exec_list);
- list_del_init(&vma->exec_list);
- eb_unreserve_vma(vma);
+ __eb_unreserve_vma(vma, vma->exec_entry);
vma->exec_entry = NULL;
i915_vma_put(vma);
}
+
+ i915_gem_context_put(eb->ctx);
+
+ if (eb->buckets)
+ kfree(eb->buckets);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
int i, total, ret;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(&eb->vmas)) {
- vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
- list_del_init(&vma->exec_list);
- eb_unreserve_vma(vma);
- i915_vma_put(vma);
- }
-
+ eb_reset(eb);
mutex_unlock(&dev->struct_mutex);
total = 0;
}
/* reacquire the objects */
- eb_reset(eb);
ret = eb_lookup_vmas(eb);
if (ret)
goto err;