drm/i915/gem: Drop cached obj->bind_count
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 1 Apr 2020 22:39:24 +0000 (23:39 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 2 Apr 2020 00:17:39 +0000 (01:17 +0100)
We cached the number of vma bound to the object in order to speed up
shrinker decisions. This has been superseded by being more proactive in
removing objects we cannot shrink from the shrinker lists, and so we can
drop the clumsy attempt at atomically counting the bind count and
comparing it to the number of pinned mappings of the object. This will
only get more clumsier with asynchronous binding and unbinding.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200401223924.16667-1-chris@chris-wilson.co.uk
12 files changed:
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c

index 0cc40e77bbd2fcb2bb85a01768993a57a08e9a88..af43e82f45c7059206d02355a608b779600afed6 100644 (file)
@@ -369,7 +369,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        struct i915_vma *vma;
 
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-       if (!atomic_read(&obj->bind_count))
+       if (list_empty(&obj->vma.list))
                return;
 
        mutex_lock(&i915->ggtt.vm.mutex);
index 5da9f9e534b94981c65e81ef6ba1f4eb5f634852..3f01cdd1a39bc012ab6464a3676d61508f57563a 100644 (file)
@@ -206,7 +206,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                }
                obj->mmo.offsets = RB_ROOT;
 
-               GEM_BUG_ON(atomic_read(&obj->bind_count));
                GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
index a0b10bcd8d8acbd94e81f004a07428636d54bf7a..54ee658bb168b60865ebec9ae0e90976bfeb4c8f 100644 (file)
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 #define STRIDE_MASK (~TILING_MASK)
 
-       /** Count of VMA actually bound by this object */
-       atomic_t bind_count;
-
        struct {
                /*
                 * Protects the pages and their use. Do not use directly, but
index 24f4cadea1149fd3aa921cf8b30b743f3853c92d..5d855fcd5c0f0596555183af35a359bd9bc7dd08 100644 (file)
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (i915_gem_object_has_pinned_pages(obj))
                return -EBUSY;
 
-       GEM_BUG_ON(atomic_read(&obj->bind_count));
-
        /* May be called by shrinker from within get_pages() (on another bo) */
        mutex_lock(&obj->mm.lock);
        if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
index 03e5eb4c99d11464988e2594801afb06d92454ad..5b65ce738b160d39252b73ea6e4eef56892a77d9 100644 (file)
@@ -26,18 +26,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
        if (!i915_gem_object_is_shrinkable(obj))
                return false;
 
-       /*
-        * Only report true if by unbinding the object and putting its pages
-        * we can actually make forward progress towards freeing physical
-        * pages.
-        *
-        * If the pages are pinned for any other reason than being bound
-        * to the GPU, simply unbinding from the GPU is not going to succeed
-        * in releasing our pin count on the pages themselves.
-        */
-       if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
-               return false;
-
        /*
         * We can only return physical pages to the system if we can either
         * discard the contents (because the user has marked them as being
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
        flags = 0;
        if (shrink & I915_SHRINK_ACTIVE)
                flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+       if (!(shrink & I915_SHRINK_BOUND))
+               flags = I915_GEM_OBJECT_UNBIND_TEST;
 
        if (i915_gem_object_unbind(obj, flags) == 0)
                __i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
                            i915_gem_object_is_framebuffer(obj))
                                continue;
 
-                       if (!(shrink & I915_SHRINK_BOUND) &&
-                           atomic_read(&obj->bind_count))
-                               continue;
-
                        if (!can_release_pages(obj))
                                continue;
 
index 43912e9b683dccb1a43dae9f5a31890baedd831e..ef7abcb3f4eeff8e87d143a91cd1485c4a882f01 100644 (file)
@@ -1156,9 +1156,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
        if (err)
                goto out_unmap;
 
-       GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
-                  !atomic_read(&obj->bind_count));
-
        err = check_present(addr, obj->base.size);
        if (err) {
                pr_err("%s: was not present\n", obj->mm.region->name);
@@ -1175,7 +1172,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
                pr_err("Failed to unbind object!\n");
                goto out_unmap;
        }
-       GEM_BUG_ON(atomic_read(&obj->bind_count));
 
        if (type != I915_MMAP_TYPE_GTT) {
                __i915_gem_object_put_pages(obj);
index e60a5750ea44250620694c89fe922b9c4c85b964..074707cd1fba32f3a467951246f55012017f289f 100644 (file)
@@ -217,7 +217,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 struct file_stats {
        struct i915_address_space *vm;
        unsigned long count;
-       u64 total, unbound;
+       u64 total;
        u64 active, inactive;
        u64 closed;
 };
@@ -233,8 +233,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 
        stats->count++;
        stats->total += obj->base.size;
-       if (!atomic_read(&obj->bind_count))
-               stats->unbound += obj->base.size;
 
        spin_lock(&obj->vma.lock);
        if (!stats->vm) {
@@ -284,13 +282,12 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
        if (stats.count) \
-               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+               seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
                           name, \
                           stats.count, \
                           stats.total, \
                           stats.active, \
                           stats.inactive, \
-                          stats.unbound, \
                           stats.closed); \
 } while (0)
 
index b09a1c929c94e9a6f3a9de46f78e8af69aea0111..82cc06d3e185b02cbdcf7171fc64ade68cdc6d9d 100644 (file)
@@ -1736,6 +1736,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
                           unsigned long flags);
 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
index b0836fc47ae6c5aa2620a86b2ba7570400236a1d..0cbcb9f54e7d25c7e862b326182c0031a78182b6 100644 (file)
@@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
        struct i915_vma *vma;
        int ret;
 
-       if (!atomic_read(&obj->bind_count))
+       if (list_empty(&obj->vma.list))
                return 0;
 
        /*
@@ -141,6 +141,11 @@ try_again:
                if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
                        continue;
 
+               if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+                       ret = -EBUSY;
+                       break;
+               }
+
                ret = -EAGAIN;
                if (!i915_vm_tryopen(vm))
                        break;
index b5f78b0acf5df5354b991c55a692da80f4f95440..4cdd883f9d662389dc9eeb9f7626b29070081b47 100644 (file)
@@ -608,18 +608,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
        return true;
 }
 
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
-       /*
-        * Combine the assertion that the object is bound and that we have
-        * pinned its pages. But we should never have bound the object
-        * more than we have pinned its pages. (For complete accuracy, we
-        * assume that no else is pinning the pages, but as a rough assertion
-        * that we will not run into problems later, this will do!)
-        */
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
 /**
  * i915_vma_insert - finds a slot for the vma in its address space
  * @vma: the vma
@@ -738,12 +726,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
-       if (vma->obj) {
-               struct drm_i915_gem_object *obj = vma->obj;
-
-               atomic_inc(&obj->bind_count);
-               assert_bind_count(obj);
-       }
        list_add_tail(&vma->vm_link, &vma->vm->bound_list);
 
        return 0;
@@ -761,12 +743,6 @@ i915_vma_detach(struct i915_vma *vma)
         * it to be reaped by the shrinker.
         */
        list_del(&vma->vm_link);
-       if (vma->obj) {
-               struct drm_i915_gem_object *obj = vma->obj;
-
-               assert_bind_count(obj);
-               atomic_dec(&obj->bind_count);
-       }
 }
 
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
index 06ef88510209faacaa18454e9e1bdd9aac70a0cd..028baae9631f273d40983bad29cb24848351ded5 100644 (file)
@@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
 
 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
 {
-       unsigned long unbound, bound, count;
        struct drm_i915_gem_object *obj;
+       unsigned long count;
 
        count = 0;
        do {
@@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
        pr_debug("Filled GGTT with %lu pages [%llu total]\n",
                 count, ggtt->vm.total / PAGE_SIZE);
 
-       bound = 0;
-       unbound = 0;
-       list_for_each_entry(obj, objects, st_link) {
-               GEM_BUG_ON(!obj->mm.quirked);
-
-               if (atomic_read(&obj->bind_count))
-                       bound++;
-               else
-                       unbound++;
-       }
-       GEM_BUG_ON(bound + unbound != count);
-
-       if (unbound) {
-               pr_err("%s: Found %lu objects unbound, expected %u!\n",
-                      __func__, unbound, 0);
-               return -EINVAL;
-       }
-
-       if (bound != count) {
-               pr_err("%s: Found %lu objects bound, expected %lu!\n",
-                      __func__, bound, count);
-               return -EINVAL;
-       }
-
        if (list_empty(&ggtt->vm.bound_list)) {
                pr_err("No objects on the GGTT inactive list!\n");
                return -EINVAL;
index b342bef5e7c95613397d618ae87145375fcb7ec2..5d2a02fcf595795b6c6bf7686c174704ae75bce2 100644 (file)
@@ -1229,7 +1229,6 @@ static void track_vma_bind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
 
-       atomic_inc(&obj->bind_count); /* track for eviction later */
        __i915_gem_object_pin_pages(obj);
 
        GEM_BUG_ON(vma->pages);