drm/i915: Require the vm mutex for i915_vma_bind()
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Tue, 21 Dec 2021 20:00:50 +0000 (21:00 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 22 Dec 2021 07:52:57 +0000 (08:52 +0100)
Protect updates of struct i915_vma flags and async binding / unbinding
with the vm::mutex. This means that i915_vma_bind() needs to assert
vm::mutex held. In order to make that possible drop the caching of
kmap_atomic() maps around i915_vma_bind().

An alternative would be to use kmap_local() but since we block cpu
unplugging during sleeps inside kmap_local() sections this may have
unwanted side-effects. Particularly since we might wait for gpu while
holding the vm mutex.

This change may theoretically increase execbuf cpu-usage on snb, but
at least on non-highmem systems that increase should be very small.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211221200050.436316-5-thomas.hellstrom@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_vma.c

index 1f025a2..e7f548a 100644 (file)
@@ -1098,6 +1098,47 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
        return &i915->ggtt;
 }
 
+static void reloc_cache_unmap(struct reloc_cache *cache)
+{
+       void *vaddr;
+
+       if (!cache->vaddr)
+               return;
+
+       vaddr = unmask_page(cache->vaddr);
+       if (cache->vaddr & KMAP)
+               kunmap_atomic(vaddr);
+       else
+               io_mapping_unmap_atomic((void __iomem *)vaddr);
+}
+
+static void reloc_cache_remap(struct reloc_cache *cache,
+                             struct drm_i915_gem_object *obj)
+{
+       void *vaddr;
+
+       if (!cache->vaddr)
+               return;
+
+       if (cache->vaddr & KMAP) {
+               struct page *page = i915_gem_object_get_page(obj, cache->page);
+
+               vaddr = kmap_atomic(page);
+               cache->vaddr = unmask_flags(cache->vaddr) |
+                       (unsigned long)vaddr;
+       } else {
+               struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+               unsigned long offset;
+
+               offset = cache->node.start;
+               if (!drm_mm_node_allocated(&cache->node))
+                       offset += cache->page << PAGE_SHIFT;
+
+               cache->vaddr = (unsigned long)
+                       io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+       }
+}
+
 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
 {
        void *vaddr;
@@ -1362,10 +1403,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
                 * batchbuffers.
                 */
                if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-                   GRAPHICS_VER(eb->i915) == 6) {
+                   GRAPHICS_VER(eb->i915) == 6 &&
+                   !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
+                       struct i915_vma *vma = target->vma;
+
+                       reloc_cache_unmap(&eb->reloc_cache);
+                       mutex_lock(&vma->vm->mutex);
                        err = i915_vma_bind(target->vma,
                                            target->vma->obj->cache_level,
                                            PIN_GLOBAL, NULL);
+                       mutex_unlock(&vma->vm->mutex);
+                       reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
                        if (err)
                                return err;
                }
index 3ff95e5..29a858c 100644 (file)
@@ -393,6 +393,7 @@ int i915_vma_bind(struct i915_vma *vma,
        u32 bind_flags;
        u32 vma_flags;
 
+       lockdep_assert_held(&vma->vm->mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->size > vma->node.size);