Backmerging drm/drm-next to get the latest changes in the xlnx driver.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
DMA_RESV_USAGE_BOOKKEEP);
}
- if (fence && !p->immediate)
+ if (fence && !p->immediate) {
+ /*
+ * Most hw generations now have a separate queue for page table
+ * updates, but when the queue is shared with userspace we need
+ * the extra CPU round trip to correctly flush the TLB.
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
+ }
dma_fence_put(f);
return 0;
-
-error:
- amdgpu_job_free(p->job);
- return r;
}
/**
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+ /**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
+enum dma_resv_usage;
+struct dma_resv;
struct drm_gem_object;
struct drm_gpu_scheduler;