From a594525c82e0b8d677a7e5fd13c7c115d41e9722 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jan 2022 09:57:39 +0100 Subject: [PATCH] drm/i915: Allow dead vm to unbind vma's without lock. i915_gem_vm_close may take the lock, and we currently have no better way of handling this. At least for now, allow a path in which holding vm->mutex is sufficient. This is the case, because the object destroy path will forcefully take vm->mutex now. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20220128085739.1464568-1-maarten.lankhorst@linux.intel.com Reviewed-by: Thomas Hellstrom --- drivers/gpu/drm/i915/i915_vma.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 2a14a4e..22cdc55 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -39,6 +39,17 @@ #include "i915_vma.h" #include "i915_vma_resource.h" +static inline void assert_vma_held_evict(const struct i915_vma *vma) +{ + /* + * We may be forced to unbind when the vm is dead, to clean it up. + * This is the only exception to the requirement of the object lock + * being held. + */ + if (atomic_read(&vma->vm->open)) + assert_object_held_shared(vma->obj); +} + static struct kmem_cache *slab_vmas; static struct i915_vma *i915_vma_alloc(void) @@ -1721,7 +1732,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) struct dma_fence *unbind_fence; GEM_BUG_ON(i915_vma_is_pinned(vma)); - assert_object_held_shared(vma->obj); + assert_vma_held_evict(vma); if (i915_vma_is_map_and_fenceable(vma)) { /* Force a pagefault for domain tracking on next user access */ @@ -1788,7 +1799,7 @@ int __i915_vma_unbind(struct i915_vma *vma) int ret; lockdep_assert_held(&vma->vm->mutex); - assert_object_held_shared(vma->obj); + assert_vma_held_evict(vma); if (!drm_mm_node_allocated(&vma->node)) return 0; -- 2.7.4