drm/i915: Prevent use-after-free in invalidate_range_start callback
authorMichał Winiarski <michal.winiarski@intel.com>
Tue, 3 Feb 2015 14:48:17 +0000 (15:48 +0100)
committerJani Nikula <jani.nikula@intel.com>
Thu, 5 Feb 2015 14:31:30 +0000 (16:31 +0200)
It's possible for invalidate_range_start mmu notifier callback to race
against userptr object release. If the gem object was released prior to
obtaining the spinlock in invalidate_range_start we're hitting null
pointer dereference.

Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close
Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close-overlap
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: stable@vger.kernel.org
[Jani: added code comment suggested by Chris]
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_gem_userptr.c

index d182058..1719078 100644 (file)
@@ -113,7 +113,10 @@ restart:
                        continue;
 
                obj = mo->obj;
-               drm_gem_object_reference(&obj->base);
+
+               if (!kref_get_unless_zero(&obj->base.refcount))
+                       continue;
+
                spin_unlock(&mn->lock);
 
                cancel_userptr(obj);
@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                        it = interval_tree_iter_first(&mn->objects, start, end);
                if (it != NULL) {
                        obj = container_of(it, struct i915_mmu_object, it)->obj;
-                       drm_gem_object_reference(&obj->base);
+
+                       /* The mmu_object is released late when destroying the
+                        * GEM object so it is entirely possible to gain a
+                        * reference on an object in the process of being freed
+                        * since our serialisation is via the spinlock and not
+                        * the struct_mutex - and consequently use it after it
+                        * is freed and then double free it.
+                        */
+                       if (!kref_get_unless_zero(&obj->base.refcount)) {
+                               spin_unlock(&mn->lock);
+                               serial = 0;
+                               continue;
+                       }
+
                        serial = mn->serial;
                }
                spin_unlock(&mn->lock);