1 // SPDX-License-Identifier: MIT
3 #include <drm/drm_exec.h>
5 #include "nouveau_drv.h"
6 #include "nouveau_gem.h"
7 #include "nouveau_mem.h"
8 #include "nouveau_dma.h"
9 #include "nouveau_exec.h"
10 #include "nouveau_abi16.h"
11 #include "nouveau_chan.h"
12 #include "nouveau_sched.h"
13 #include "nouveau_uvmm.h"
18 * Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
19 * DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
21 * In order to use the UAPI firstly a user client must initialize the VA space
22 * using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
23 * should be managed by the kernel and which by the UMD.
25 * The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
26 * userspace-managable portion of the VA space. It provides operations to map
27 * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
28 * backed by a GEM object and the kernel will ignore GEM handles provided
29 * alongside a sparse mapping.
31 * Userspace may request memory backed mappings either within or outside of the
32 * bounds (but not crossing those bounds) of a previously mapped sparse
33 * mapping. Subsequently requested memory backed mappings within a sparse
34 * mapping will take precedence over the corresponding range of the sparse
35 * mapping. If such memory backed mappings are unmapped the kernel will make
36 * sure that the corresponding sparse mapping will take their place again.
37 * Requests to unmap a sparse mapping that still contains memory backed mappings
38 * will result in those memory backed mappings being unmapped first.
40 * Unmap requests are not bound to the range of existing mappings and can even
41 * overlap the bounds of sparse mappings. For such a request the kernel will
42 * make sure to unmap all memory backed mappings within the given range,
43 * splitting up memory backed mappings which are only partially contained
44 * within the given range. Unmap requests with the sparse flag set must match
45 * the range of a previously mapped sparse mapping exactly though.
47 * While the kernel generally permits arbitrary sequences and ranges of memory
48 * backed mappings being mapped and unmapped, either within a single or multiple
49 * VM_BIND ioctl calls, there are some restrictions for sparse mappings.
51 * The kernel does not permit to:
52 * - unmap non-existent sparse mappings
53 * - unmap a sparse mapping and map a new sparse mapping overlapping the range
54 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
55 * - unmap a sparse mapping and map new memory backed mappings overlapping the
56 * range of the previously unmapped sparse mapping within the same VM_BIND
59 * When using the VM_BIND ioctl to request the kernel to map memory to a given
60 * virtual address in the GPU's VA space there is no guarantee that the actual
61 * mappings are created in the GPU's MMU. If the given memory is swapped out
62 * at the time the bind operation is executed the kernel will stash the mapping
63 * details into it's internal alloctor and create the actual MMU mappings once
64 * the memory is swapped back in. While this is transparent for userspace, it is
65 * guaranteed that all the backing memory is swapped back in and all the memory
66 * mappings, as requested by userspace previously, are actually mapped once the
67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
69 * A VM_BIND job can be executed either synchronously or asynchronously. If
70 * exectued asynchronously, userspace may provide a list of syncobjs this job
71 * will wait for and/or a list of syncobj the kernel will signal once the
72 * VM_BIND job finished execution. If executed synchronously the ioctl will
73 * block until the bind job is finished. For synchronous jobs the kernel will
74 * not permit any syncobjs submitted to the kernel.
76 * To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
77 * jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
78 * the option to synchronize them with syncobjs.
80 * Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
83 * an up to date view of the VA space. However, the actual mappings might still
84 * be pending. Hence, EXEC jobs require to have the particular fences - of
85 * the corresponding VM_BIND jobs they depent on - attached to them.
89 nouveau_exec_job_submit(struct nouveau_job *job)
91 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
92 struct nouveau_cli *cli = job->cli;
93 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
94 struct drm_exec *exec = &job->exec;
95 struct drm_gem_object *obj;
99 /* Create a new fence, but do not emit yet. */
100 ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
104 nouveau_uvmm_lock(uvmm);
105 drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
106 DRM_EXEC_IGNORE_DUPLICATES);
107 drm_exec_until_all_locked(exec) {
108 struct drm_gpuva *va;
110 drm_gpuva_for_each_va(va, &uvmm->umgr) {
111 if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
114 ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
115 drm_exec_retry_on_contention(exec);
117 goto err_uvmm_unlock;
120 nouveau_uvmm_unlock(uvmm);
122 drm_exec_for_each_locked_object(exec, index, obj) {
123 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
125 ret = nouveau_bo_validate(nvbo, true, false);
133 nouveau_uvmm_unlock(uvmm);
141 nouveau_exec_job_armed_submit(struct nouveau_job *job)
143 struct drm_exec *exec = &job->exec;
144 struct drm_gem_object *obj;
147 drm_exec_for_each_locked_object(exec, index, obj)
148 dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
153 static struct dma_fence *
154 nouveau_exec_job_run(struct nouveau_job *job)
156 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
157 struct nouveau_channel *chan = exec_job->chan;
158 struct nouveau_fence *fence = exec_job->fence;
161 ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
163 NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
167 for (i = 0; i < exec_job->push.count; i++) {
168 struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
169 bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
171 nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
174 ret = nouveau_fence_emit(fence);
176 nouveau_fence_unref(&exec_job->fence);
177 NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
182 /* The fence was emitted successfully, set the job's fence pointer to
183 * NULL in order to avoid freeing it up when the job is cleaned up.
185 exec_job->fence = NULL;
191 nouveau_exec_job_free(struct nouveau_job *job)
193 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
195 nouveau_job_free(job);
197 kfree(exec_job->fence);
198 kfree(exec_job->push.s);
202 static enum drm_gpu_sched_stat
203 nouveau_exec_job_timeout(struct nouveau_job *job)
205 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
206 struct nouveau_channel *chan = exec_job->chan;
208 if (unlikely(!atomic_read(&chan->killed)))
209 nouveau_channel_kill(chan);
211 NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
214 nouveau_sched_entity_fini(job->entity);
216 return DRM_GPU_SCHED_STAT_ENODEV;
219 static struct nouveau_job_ops nouveau_exec_job_ops = {
220 .submit = nouveau_exec_job_submit,
221 .armed_submit = nouveau_exec_job_armed_submit,
222 .run = nouveau_exec_job_run,
223 .free = nouveau_exec_job_free,
224 .timeout = nouveau_exec_job_timeout,
228 nouveau_exec_job_init(struct nouveau_exec_job **pjob,
229 struct nouveau_exec_job_args *__args)
231 struct nouveau_exec_job *job;
232 struct nouveau_job_args args = {};
235 for (i = 0; i < __args->push.count; i++) {
236 struct drm_nouveau_exec_push *p = &__args->push.s[i];
238 if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
239 NV_PRINTK(err, nouveau_cli(__args->file_priv),
240 "pushbuf size exceeds limit: 0x%x max 0x%x\n",
241 p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
246 job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
250 job->push.count = __args->push.count;
251 if (__args->push.count) {
252 job->push.s = kmemdup(__args->push.s,
253 sizeof(*__args->push.s) *
262 job->chan = __args->chan;
264 args.sched_entity = __args->sched_entity;
265 args.file_priv = __args->file_priv;
267 args.in_sync.count = __args->in_sync.count;
268 args.in_sync.s = __args->in_sync.s;
270 args.out_sync.count = __args->out_sync.count;
271 args.out_sync.s = __args->out_sync.s;
273 args.ops = &nouveau_exec_job_ops;
274 args.resv_usage = DMA_RESV_USAGE_WRITE;
276 ret = nouveau_job_init(&job->base, &args);
292 nouveau_exec(struct nouveau_exec_job_args *args)
294 struct nouveau_exec_job *job;
297 ret = nouveau_exec_job_init(&job, args);
301 ret = nouveau_job_submit(&job->base);
308 nouveau_job_fini(&job->base);
313 nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
314 struct drm_nouveau_exec *req)
316 struct drm_nouveau_sync **s;
317 u32 inc = req->wait_count;
318 u64 ins = req->wait_ptr;
319 u32 outc = req->sig_count;
320 u64 outs = req->sig_ptr;
321 u32 pushc = req->push_count;
322 u64 pushs = req->push_ptr;
326 args->push.count = pushc;
327 args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
328 if (IS_ERR(args->push.s))
329 return PTR_ERR(args->push.s);
333 s = &args->in_sync.s;
335 args->in_sync.count = inc;
336 *s = u_memcpya(ins, inc, sizeof(**s));
344 s = &args->out_sync.s;
346 args->out_sync.count = outc;
347 *s = u_memcpya(outs, outc, sizeof(**s));
357 u_free(args->push.s);
359 u_free(args->in_sync.s);
364 nouveau_exec_ufree(struct nouveau_exec_job_args *args)
366 u_free(args->push.s);
367 u_free(args->in_sync.s);
368 u_free(args->out_sync.s);
372 nouveau_exec_ioctl_exec(struct drm_device *dev,
374 struct drm_file *file_priv)
376 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
377 struct nouveau_cli *cli = nouveau_cli(file_priv);
378 struct nouveau_abi16_chan *chan16;
379 struct nouveau_channel *chan = NULL;
380 struct nouveau_exec_job_args args = {};
381 struct drm_nouveau_exec *req = data;
384 if (unlikely(!abi16))
387 /* abi16 locks already */
388 if (unlikely(!nouveau_cli_uvmm(cli)))
389 return nouveau_abi16_put(abi16, -ENOSYS);
391 list_for_each_entry(chan16, &abi16->channels, head) {
392 if (chan16->chan->chid == req->channel) {
399 return nouveau_abi16_put(abi16, -ENOENT);
401 if (unlikely(atomic_read(&chan->killed)))
402 return nouveau_abi16_put(abi16, -ENODEV);
404 if (!chan->dma.ib_max)
405 return nouveau_abi16_put(abi16, -ENOSYS);
407 if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
408 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
409 req->push_count, NOUVEAU_GEM_MAX_PUSH);
410 return nouveau_abi16_put(abi16, -EINVAL);
413 ret = nouveau_exec_ucopy(&args, req);
417 args.sched_entity = &chan16->sched_entity;
418 args.file_priv = file_priv;
421 ret = nouveau_exec(&args);
426 nouveau_exec_ufree(&args);
428 return nouveau_abi16_put(abi16, ret);