Add an usage for kernel submissions. Waiting for those are mandatory for
dynamic DMA-bufs.
As a precaution this patch also changes all occurrences where fences are
added as part of memory management in TTM, VMWGFX and i915 to use the
new value because it now becomes possible for drivers to ignore fences
with the WRITE usage.
v2: use "must" in documentation, fix whitespaces
v3: separate out some driver changes and better document why some
changes should still be part of this patch.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-5-christian.koenig@amd.com
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
- static const char *usage[] = { "write", "read" };
+ static const char *usage[] = { "kernel", "write", "read" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
int r;
spin_lock_init(&fence_lock);
- for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+ for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
++usage) {
r = subtests(tests, (void *)(unsigned long)usage);
if (r)
i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
return ret;
}
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret)) {
return ret;
dma_resv_add_fence(&ghost_obj->base._resv, fence,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
dma_resv_add_fence(bo->base.resv, &fence->base,
- DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_KERNEL);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
* This enum describes the different use cases for a dma_resv object and
* controls which fences are returned when queried.
*
- * An important fact is that there is the order WRITE<READ and when the
- * dma_resv object is asked for fences for one use case the fences for the
- * lower use case are returned as well.
+ * An important fact is that there is the order KERNEL<WRITE<READ and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
*/
enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
/**
* @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
*