u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
- struct drm_gpuva *va, *next, *prev = NULL;
+ struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
int ret;
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
- goto next;
+ continue;
}
if (end > req_end) {
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
- goto next;
+ continue;
}
if (end > req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
if (ret)
return ret;
- goto next;
+ continue;
}
if (end > req_end) {
break;
}
}
-next:
- prev = va;
}
return op_map_cb(ops, priv,
}
void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
+nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
+ bool no_prefetch)
{
struct nvif_user *user = &chan->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
BUG_ON(chan->dma.ib_free < 1);
+ WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
+ (no_prefetch ? (1 << 31) : 0));
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
#include "nouveau_chan.h"
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
+void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
+ bool no_prefetch);
/*
* There's a hw race condition where you can't jump to your PUT offset,
*/
#define NOUVEAU_DMA_SKIPS (128 / 4)
+/* Maximum push buffer size. */
+#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
+
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,
if (chan->dma.ib_max) {
nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2);
+ (chan->dma.cur - chan->dma.put) << 2, false);
} else {
WRITE_PUT(chan->dma.cur);
}
}
for (i = 0; i < exec_job->push.count; i++) {
- nv50_dma_push(chan, exec_job->push.s[i].va,
- exec_job->push.s[i].va_len);
+ struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
+ bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
+
+ nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence, chan);
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
- int ret;
+ int i, ret;
+
+ for (i = 0; i < __args->push.count; i++) {
+ struct drm_nouveau_exec_push *p = &__args->push.s[i];
+
+ if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
+ NV_PRINTK(err, nouveau_cli(__args->file_priv),
+ "pushbuf size exceeds limit: 0x%x max 0x%x\n",
+ p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
+ return -EINVAL;
+ }
+ }
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
for (i = 0; i < req->nr_push; i++) {
struct nouveau_vma *vma = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
+ u64 addr = vma->addr + push[i].offset;
+ u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
+ bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, vma->addr + push[i].offset,
- push[i].length);
+ nv50_dma_push(chan, addr, length, no_prefetch);
}
} else
if (drm->client.device.info.chipset >= 0x25) {
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
+ /* If a sched job depends on a dma-fence from a job from the same GPU
+ * scheduler instance, but a different scheduler entity, the GPU
+ * scheduler does only wait for the particular job to be scheduled,
+ * rather than for the job to fully complete. This is due to the GPU
+ * scheduler assuming that there is a scheduler instance per ring.
+ * However, the current implementation, in order to avoid arbitrary
+ * amounts of kthreads, has a single scheduler instance while scheduler
+ * entities represent rings.
+ *
+ * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
+ * out-fences in order to force the scheduler to wait for full job
+ * completion for dependent jobs from different entities and same
+ * scheduler instance.
+ *
+ * There is some work in progress [1] to address the issues of firmware
+ * schedulers; once it is in-tree the scheduler topology in Nouveau
+ * should be re-worked accordingly.
+ *
+ * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
+
if (job->ops->armed_submit)
job->ops->armed_submit(job);
struct drm_gpuva *va = r->unmap->va;
struct uvmm_map_args remap_args = {
.kind = uvma_from_va(va)->kind,
+ .region = uvma_from_va(va)->region,
};
u64 ustart = va->va.addr;
u64 urange = va->va.range;
}
/**
- * drm_kunit_helper_context_alloc - Allocates an acquire context
+ * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context
* @test: The test context object
*
* Allocates and initializes a modeset acquire context.
dma1 = tt->dma_address[0];
dma2 = tt->dma_address[tt->num_pages - 1];
- KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
- KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
/**
* @va_len: the length of the push buffer mapping
*/
- __u64 va_len;
+ __u32 va_len;
+ /**
+ * @flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
};
/**