For some platfroms we use stop_machine version of
gen8_ggtt_insert_page/gen8_ggtt_insert_entries to avoid a
concurrent GGTT access bug but this causes a circular locking
dependency warning:
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&ggtt->error_mutex);
lock(dma_fence_map);
lock(&ggtt->error_mutex);
lock(cpu_hotplug_lock);
Fix this by calling gen8_ggtt_insert_page/gen8_ggtt_insert_entries
directly at error capture which is concurrent GGTT access safe because
reset path make sure of that.
v2: Fix rebase conflict and added a comment.
Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5595
Reviewed-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>
Signed-off-by: Ramalingam C <ramalingam.c@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220624110821.29190-1-nirmoy.das@intel.com
if (intel_vm_no_concurrent_access_wa(i915)) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+
+ /*
+ * Calling stop_machine() version of GGTT update function
+ * at error capture/reset path will raise lockdep warning.
+ * Allow calling gen8_ggtt_insert_* directly at reset path
+ * which is safe from parallel GGTT updates.
+ */
+ ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
+
ggtt->vm.bind_async_flags =
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
+ void (*raw_insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*raw_insert_entries)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
- ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ if (ggtt->vm.raw_insert_entries)
+ ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ else
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
mutex_lock(&ggtt->error_mutex);
- ggtt->vm.insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ if (ggtt->vm.raw_insert_page)
+ ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
+ else
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);