drm/i915: Make shrink/unshrink be atomic
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 10 Sep 2019 21:22:04 +0000 (22:22 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 11 Sep 2019 07:14:23 +0000 (08:14 +0100)
Add an atomic counter and always take the spinlock around the pin/unpin
events, so that we can perform the list manipulation concurrently.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910212204.17190-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gt/intel_context.c

index da3e7cf..55c3ab5 100644 (file)
@@ -494,7 +494,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 
                spin_lock_irqsave(&i915->mm.obj_lock, flags);
 
-               if (obj->mm.madv == I915_MADV_WILLNEED)
+               if (obj->mm.madv == I915_MADV_WILLNEED &&
+                   !atomic_read(&obj->mm.shrink_pin))
                        list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
 
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
index a558edf..d695f18 100644 (file)
@@ -156,6 +156,7 @@ struct drm_i915_gem_object {
        struct {
                struct mutex lock; /* protects the pages and their use */
                atomic_t pages_pin_count;
+               atomic_t shrink_pin;
 
                struct sg_table *pages;
                void *mapping;
index 18f0ce0..2e941f0 100644 (file)
@@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                        list = &i915->mm.shrink_list;
                list_add_tail(&obj->mm.link, list);
 
+               atomic_set(&obj->mm.shrink_pin, 0);
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
        }
 }
index 4e55cfc..d2c05d7 100644 (file)
@@ -516,46 +516,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
 
 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
 {
+       struct drm_i915_private *i915 = obj_to_i915(obj);
+       unsigned long flags;
+
        /*
         * We can only be called while the pages are pinned or when
         * the pages are released. If pinned, we should only be called
         * from a single caller under controlled conditions; and on release
         * only one caller may release us. Neither the two may cross.
         */
-       if (!list_empty(&obj->mm.link)) { /* pinned by caller */
-               struct drm_i915_private *i915 = obj_to_i915(obj);
-               unsigned long flags;
-
-               spin_lock_irqsave(&i915->mm.obj_lock, flags);
-               GEM_BUG_ON(list_empty(&obj->mm.link));
+       if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+               return;
 
+       spin_lock_irqsave(&i915->mm.obj_lock, flags);
+       if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
+           !list_empty(&obj->mm.link)) {
                list_del_init(&obj->mm.link);
                i915->mm.shrink_count--;
                i915->mm.shrink_memory -= obj->base.size;
-
-               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
        }
+       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 }
 
 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
                                              struct list_head *head)
 {
+       struct drm_i915_private *i915 = obj_to_i915(obj);
+       unsigned long flags;
+
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-       GEM_BUG_ON(!list_empty(&obj->mm.link));
+       if (!i915_gem_object_is_shrinkable(obj))
+               return;
 
-       if (i915_gem_object_is_shrinkable(obj)) {
-               struct drm_i915_private *i915 = obj_to_i915(obj);
-               unsigned long flags;
+       if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+               return;
 
-               spin_lock_irqsave(&i915->mm.obj_lock, flags);
-               GEM_BUG_ON(!kref_read(&obj->base.refcount));
+       spin_lock_irqsave(&i915->mm.obj_lock, flags);
+       GEM_BUG_ON(!kref_read(&obj->base.refcount));
+       if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
+               GEM_BUG_ON(!list_empty(&obj->mm.link));
 
                list_add_tail(&obj->mm.link, head);
                i915->mm.shrink_count++;
                i915->mm.shrink_memory += obj->base.size;
 
-               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
        }
+       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 }
 
 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
index f55691d..c049581 100644 (file)
@@ -134,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma)
 
 static void __context_unpin_state(struct i915_vma *vma)
 {
-       __i915_vma_unpin(vma);
        i915_vma_make_shrinkable(vma);
+       __i915_vma_unpin(vma);
 }
 
 static void __intel_context_retire(struct i915_active *active)