struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return;
mutex_lock(&i915->ggtt.vm.mutex);
}
obj->mmo.offsets = RB_ROOT;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
#define STRIDE_MASK (~TILING_MASK)
- /** Count of VMA actually bound by this object */
- atomic_t bind_count;
-
struct {
/*
* Protects the pages and their use. Do not use directly, but
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
-
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
if (!i915_gem_object_is_shrinkable(obj))
return false;
- /*
- * Only report true if by unbinding the object and putting its pages
- * we can actually make forward progress towards freeing physical
- * pages.
- *
- * If the pages are pinned for any other reason than being bound
- * to the GPU, simply unbinding from the GPU is not going to succeed
- * in releasing our pin count on the pages themselves.
- */
- if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
- return false;
-
/*
* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ if (!(shrink & I915_SHRINK_BOUND))
+ flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj);
i915_gem_object_is_framebuffer(obj))
continue;
- if (!(shrink & I915_SHRINK_BOUND) &&
- atomic_read(&obj->bind_count))
- continue;
-
if (!can_release_pages(obj))
continue;
if (err)
goto out_unmap;
- GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
- !atomic_read(&obj->bind_count));
-
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
pr_err("Failed to unbind object!\n");
goto out_unmap;
}
- GEM_BUG_ON(atomic_read(&obj->bind_count));
if (type != I915_MMAP_TYPE_GTT) {
__i915_gem_object_put_pages(obj);
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
- u64 total, unbound;
+ u64 total;
u64 active, inactive;
u64 closed;
};
stats->count++;
stats->total += obj->base.size;
- if (!atomic_read(&obj->bind_count))
- stats->unbound += obj->base.size;
spin_lock(&obj->vma.lock);
if (!stats->vm) {
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.unbound, \
stats.closed); \
} while (0)
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
struct i915_vma *vma;
int ret;
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return 0;
/*
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
+ if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+ ret = -EBUSY;
+ break;
+ }
+
ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
return true;
}
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
- /*
- * Combine the assertion that the object is bound and that we have
- * pinned its pages. But we should never have bound the object
- * more than we have pinned its pages. (For complete accuracy, we
- * assume that no else is pinning the pages, but as a rough assertion
- * that we will not run into problems later, this will do!)
- */
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- atomic_inc(&obj->bind_count);
- assert_bind_count(obj);
- }
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
* it to be reaped by the shrinker.
*/
list_del(&vma->vm_link);
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_bind_count(obj);
- atomic_dec(&obj->bind_count);
- }
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
- unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
+ unsigned long count;
count = 0;
do {
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
count, ggtt->vm.total / PAGE_SIZE);
- bound = 0;
- unbound = 0;
- list_for_each_entry(obj, objects, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
-
- if (atomic_read(&obj->bind_count))
- bound++;
- else
- unbound++;
- }
- GEM_BUG_ON(bound + unbound != count);
-
- if (unbound) {
- pr_err("%s: Found %lu objects unbound, expected %u!\n",
- __func__, unbound, 0);
- return -EINVAL;
- }
-
- if (bound != count) {
- pr_err("%s: Found %lu objects bound, expected %lu!\n",
- __func__, bound, count);
- return -EINVAL;
- }
-
if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
{
struct drm_i915_gem_object *obj = vma->obj;
- atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
GEM_BUG_ON(vma->pages);