2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
29 #include <linux/sched/signal.h>
30 #include <linux/dma-fence-array.h>
32 #include <drm/drm_syncobj.h>
34 #include "uapi/drm/vc4_drm.h"
37 #include "vc4_trace.h"
40 vc4_queue_hangcheck(struct drm_device *dev)
42 struct vc4_dev *vc4 = to_vc4_dev(dev);
44 mod_timer(&vc4->hangcheck.timer,
45 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
48 struct vc4_hang_state {
49 struct drm_vc4_get_hang_state user_state;
52 struct drm_gem_object **bo;
56 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
60 for (i = 0; i < state->user_state.bo_count; i++)
61 drm_gem_object_put(state->bo[i]);
67 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68 struct drm_file *file_priv)
70 struct drm_vc4_get_hang_state *get_state = data;
71 struct drm_vc4_get_hang_state_bo *bo_state;
72 struct vc4_hang_state *kernel_state;
73 struct drm_vc4_get_hang_state *state;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
75 unsigned long irqflags;
79 if (WARN_ON_ONCE(vc4->is_vc5))
83 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
87 spin_lock_irqsave(&vc4->job_lock, irqflags);
88 kernel_state = vc4->hang_state;
90 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
93 state = &kernel_state->user_state;
95 /* If the user's array isn't big enough, just return the
96 * required array size.
98 if (get_state->bo_count < state->bo_count) {
99 get_state->bo_count = state->bo_count;
100 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
104 vc4->hang_state = NULL;
105 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
107 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
108 state->bo = get_state->bo;
109 memcpy(get_state, state, sizeof(*state));
111 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
117 for (i = 0; i < state->bo_count; i++) {
118 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
121 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
126 goto err_delete_handle;
128 bo_state[i].handle = handle;
129 bo_state[i].paddr = vc4_bo->base.paddr;
130 bo_state[i].size = vc4_bo->base.base.size;
133 if (copy_to_user(u64_to_user_ptr(get_state->bo),
135 state->bo_count * sizeof(*bo_state)))
140 for (i = 0; i < state->bo_count; i++)
141 drm_gem_handle_delete(file_priv, bo_state[i].handle);
145 vc4_free_hang_state(dev, kernel_state);
152 vc4_save_hang_state(struct drm_device *dev)
154 struct vc4_dev *vc4 = to_vc4_dev(dev);
155 struct drm_vc4_get_hang_state *state;
156 struct vc4_hang_state *kernel_state;
157 struct vc4_exec_info *exec[2];
159 unsigned long irqflags;
160 unsigned int i, j, k, unref_list_count;
162 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
166 state = &kernel_state->user_state;
168 spin_lock_irqsave(&vc4->job_lock, irqflags);
169 exec[0] = vc4_first_bin_job(vc4);
170 exec[1] = vc4_first_render_job(vc4);
171 if (!exec[0] && !exec[1]) {
172 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
176 /* Get the bos from both binner and renderer into hang state. */
178 for (i = 0; i < 2; i++) {
182 unref_list_count = 0;
183 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
185 state->bo_count += exec[i]->bo_count + unref_list_count;
188 kernel_state->bo = kcalloc(state->bo_count,
189 sizeof(*kernel_state->bo), GFP_ATOMIC);
191 if (!kernel_state->bo) {
192 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
197 for (i = 0; i < 2; i++) {
201 for (j = 0; j < exec[i]->bo_count; j++) {
202 bo = to_vc4_bo(&exec[i]->bo[j]->base);
204 /* Retain BOs just in case they were marked purgeable.
205 * This prevents the BO from being purged before
206 * someone had a chance to dump the hang state.
208 WARN_ON(!refcount_read(&bo->usecnt));
209 refcount_inc(&bo->usecnt);
210 drm_gem_object_get(&exec[i]->bo[j]->base);
211 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
214 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
215 /* No need to retain BOs coming from the ->unref_list
216 * because they are naturally unpurgeable.
218 drm_gem_object_get(&bo->base.base);
219 kernel_state->bo[k++] = &bo->base.base;
223 WARN_ON_ONCE(k != state->bo_count);
226 state->start_bin = exec[0]->ct0ca;
228 state->start_render = exec[1]->ct1ca;
230 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
232 state->ct0ca = V3D_READ(V3D_CTNCA(0));
233 state->ct0ea = V3D_READ(V3D_CTNEA(0));
235 state->ct1ca = V3D_READ(V3D_CTNCA(1));
236 state->ct1ea = V3D_READ(V3D_CTNEA(1));
238 state->ct0cs = V3D_READ(V3D_CTNCS(0));
239 state->ct1cs = V3D_READ(V3D_CTNCS(1));
241 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
242 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
244 state->bpca = V3D_READ(V3D_BPCA);
245 state->bpcs = V3D_READ(V3D_BPCS);
246 state->bpoa = V3D_READ(V3D_BPOA);
247 state->bpos = V3D_READ(V3D_BPOS);
249 state->vpmbase = V3D_READ(V3D_VPMBASE);
251 state->dbge = V3D_READ(V3D_DBGE);
252 state->fdbgo = V3D_READ(V3D_FDBGO);
253 state->fdbgb = V3D_READ(V3D_FDBGB);
254 state->fdbgr = V3D_READ(V3D_FDBGR);
255 state->fdbgs = V3D_READ(V3D_FDBGS);
256 state->errstat = V3D_READ(V3D_ERRSTAT);
258 /* We need to turn purgeable BOs into unpurgeable ones so that
259 * userspace has a chance to dump the hang state before the kernel
260 * decides to purge those BOs.
261 * Note that BO consistency at dump time cannot be guaranteed. For
262 * example, if the owner of these BOs decides to re-use them or mark
263 * them purgeable again there's nothing we can do to prevent it.
265 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
266 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
268 if (bo->madv == __VC4_MADV_NOTSUPP)
271 mutex_lock(&bo->madv_lock);
272 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
273 bo->madv = VC4_MADV_WILLNEED;
274 refcount_dec(&bo->usecnt);
275 mutex_unlock(&bo->madv_lock);
278 spin_lock_irqsave(&vc4->job_lock, irqflags);
279 if (vc4->hang_state) {
280 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
281 vc4_free_hang_state(dev, kernel_state);
283 vc4->hang_state = kernel_state;
284 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
289 vc4_reset(struct drm_device *dev)
291 struct vc4_dev *vc4 = to_vc4_dev(dev);
293 DRM_INFO("Resetting GPU.\n");
295 mutex_lock(&vc4->power_lock);
296 if (vc4->power_refcount) {
297 /* Power the device off and back on the by dropping the
298 * reference on runtime PM.
300 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
301 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
303 mutex_unlock(&vc4->power_lock);
307 /* Rearm the hangcheck -- another job might have been waiting
308 * for our hung one to get kicked off, and vc4_irq_reset()
309 * would have started it.
311 vc4_queue_hangcheck(dev);
315 vc4_reset_work(struct work_struct *work)
317 struct vc4_dev *vc4 =
318 container_of(work, struct vc4_dev, hangcheck.reset_work);
320 vc4_save_hang_state(&vc4->base);
322 vc4_reset(&vc4->base);
326 vc4_hangcheck_elapsed(struct timer_list *t)
328 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
329 struct drm_device *dev = &vc4->base;
330 uint32_t ct0ca, ct1ca;
331 unsigned long irqflags;
332 struct vc4_exec_info *bin_exec, *render_exec;
334 spin_lock_irqsave(&vc4->job_lock, irqflags);
336 bin_exec = vc4_first_bin_job(vc4);
337 render_exec = vc4_first_render_job(vc4);
339 /* If idle, we can stop watching for hangs. */
340 if (!bin_exec && !render_exec) {
341 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
345 ct0ca = V3D_READ(V3D_CTNCA(0));
346 ct1ca = V3D_READ(V3D_CTNCA(1));
348 /* If we've made any progress in execution, rearm the timer
351 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
352 (render_exec && ct1ca != render_exec->last_ct1ca)) {
354 bin_exec->last_ct0ca = ct0ca;
356 render_exec->last_ct1ca = ct1ca;
357 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
358 vc4_queue_hangcheck(dev);
362 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
364 /* We've gone too long with no progress, reset. This has to
365 * be done from a work struct, since resetting can sleep and
366 * this timer hook isn't allowed to.
368 schedule_work(&vc4->hangcheck.reset_work);
372 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
374 struct vc4_dev *vc4 = to_vc4_dev(dev);
376 /* Set the current and end address of the control list.
377 * Writing the end register is what starts the job.
379 V3D_WRITE(V3D_CTNCA(thread), start);
380 V3D_WRITE(V3D_CTNEA(thread), end);
384 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
387 struct vc4_dev *vc4 = to_vc4_dev(dev);
389 unsigned long timeout_expire;
392 if (WARN_ON_ONCE(vc4->is_vc5))
395 if (vc4->finished_seqno >= seqno)
401 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
403 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
405 prepare_to_wait(&vc4->job_wait_queue, &wait,
406 interruptible ? TASK_INTERRUPTIBLE :
407 TASK_UNINTERRUPTIBLE);
409 if (interruptible && signal_pending(current)) {
414 if (vc4->finished_seqno >= seqno)
417 if (timeout_ns != ~0ull) {
418 if (time_after_eq(jiffies, timeout_expire)) {
422 schedule_timeout(timeout_expire - jiffies);
428 finish_wait(&vc4->job_wait_queue, &wait);
429 trace_vc4_wait_for_seqno_end(dev, seqno);
435 vc4_flush_caches(struct drm_device *dev)
437 struct vc4_dev *vc4 = to_vc4_dev(dev);
439 /* Flush the GPU L2 caches. These caches sit on top of system
440 * L3 (the 128kb or so shared with the CPU), and are
441 * non-allocating in the L3.
443 V3D_WRITE(V3D_L2CACTL,
446 V3D_WRITE(V3D_SLCACTL,
447 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
448 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
449 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
450 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
454 vc4_flush_texture_caches(struct drm_device *dev)
456 struct vc4_dev *vc4 = to_vc4_dev(dev);
458 V3D_WRITE(V3D_L2CACTL,
461 V3D_WRITE(V3D_SLCACTL,
462 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
463 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
466 /* Sets the registers for the next job to be actually be executed in
469 * The job_lock should be held during this.
472 vc4_submit_next_bin_job(struct drm_device *dev)
474 struct vc4_dev *vc4 = to_vc4_dev(dev);
475 struct vc4_exec_info *exec;
477 if (WARN_ON_ONCE(vc4->is_vc5))
481 exec = vc4_first_bin_job(vc4);
485 vc4_flush_caches(dev);
487 /* Only start the perfmon if it was not already started by a previous
490 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
491 vc4_perfmon_start(vc4, exec->perfmon);
493 /* Either put the job in the binner if it uses the binner, or
494 * immediately move it to the to-be-rendered queue.
496 if (exec->ct0ca != exec->ct0ea) {
497 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
499 struct vc4_exec_info *next;
501 vc4_move_job_to_render(dev, exec);
502 next = vc4_first_bin_job(vc4);
504 /* We can't start the next bin job if the previous job had a
505 * different perfmon instance attached to it. The same goes
506 * if one of them had a perfmon attached to it and the other
509 if (next && next->perfmon == exec->perfmon)
515 vc4_submit_next_render_job(struct drm_device *dev)
517 struct vc4_dev *vc4 = to_vc4_dev(dev);
518 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
523 if (WARN_ON_ONCE(vc4->is_vc5))
526 /* A previous RCL may have written to one of our textures, and
527 * our full cache flush at bin time may have occurred before
528 * that RCL completed. Flush the texture cache now, but not
529 * the instructions or uniforms (since we don't write those
532 vc4_flush_texture_caches(dev);
534 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
538 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
540 struct vc4_dev *vc4 = to_vc4_dev(dev);
541 bool was_empty = list_empty(&vc4->render_job_list);
543 if (WARN_ON_ONCE(vc4->is_vc5))
546 list_move_tail(&exec->head, &vc4->render_job_list);
548 vc4_submit_next_render_job(dev);
552 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
557 for (i = 0; i < exec->bo_count; i++) {
558 bo = to_vc4_bo(&exec->bo[i]->base);
561 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
564 list_for_each_entry(bo, &exec->unref_list, unref_head) {
568 for (i = 0; i < exec->rcl_write_bo_count; i++) {
569 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
570 bo->write_seqno = seqno;
572 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
577 vc4_unlock_bo_reservations(struct drm_device *dev,
578 struct vc4_exec_info *exec,
579 struct ww_acquire_ctx *acquire_ctx)
583 for (i = 0; i < exec->bo_count; i++) {
584 struct drm_gem_object *bo = &exec->bo[i]->base;
586 dma_resv_unlock(bo->resv);
589 ww_acquire_fini(acquire_ctx);
592 /* Takes the reservation lock on all the BOs being referenced, so that
593 * at queue submit time we can update the reservations.
595 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
596 * (all of which are on exec->unref_list). They're entirely private
597 * to vc4, so we don't attach dma-buf fences to them.
600 vc4_lock_bo_reservations(struct drm_device *dev,
601 struct vc4_exec_info *exec,
602 struct ww_acquire_ctx *acquire_ctx)
604 int contended_lock = -1;
606 struct drm_gem_object *bo;
608 ww_acquire_init(acquire_ctx, &reservation_ww_class);
611 if (contended_lock != -1) {
612 bo = &exec->bo[contended_lock]->base;
613 ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
615 ww_acquire_done(acquire_ctx);
620 for (i = 0; i < exec->bo_count; i++) {
621 if (i == contended_lock)
624 bo = &exec->bo[i]->base;
626 ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
630 for (j = 0; j < i; j++) {
631 bo = &exec->bo[j]->base;
632 dma_resv_unlock(bo->resv);
635 if (contended_lock != -1 && contended_lock >= i) {
636 bo = &exec->bo[contended_lock]->base;
638 dma_resv_unlock(bo->resv);
641 if (ret == -EDEADLK) {
646 ww_acquire_done(acquire_ctx);
651 ww_acquire_done(acquire_ctx);
653 /* Reserve space for our shared (read-only) fence references,
654 * before we commit the CL to the hardware.
656 for (i = 0; i < exec->bo_count; i++) {
657 bo = &exec->bo[i]->base;
659 ret = dma_resv_reserve_shared(bo->resv, 1);
661 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
669 /* Queues a struct vc4_exec_info for execution. If no job is
670 * currently executing, then submits it.
672 * Unlike most GPUs, our hardware only handles one command list at a
673 * time. To queue multiple jobs at once, we'd need to edit the
674 * previous command list to have a jump to the new one at the end, and
675 * then bump the end address. That's a change for a later date,
679 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
680 struct ww_acquire_ctx *acquire_ctx,
681 struct drm_syncobj *out_sync)
683 struct vc4_dev *vc4 = to_vc4_dev(dev);
684 struct vc4_exec_info *renderjob;
686 unsigned long irqflags;
687 struct vc4_fence *fence;
689 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
694 spin_lock_irqsave(&vc4->job_lock, irqflags);
696 seqno = ++vc4->emit_seqno;
699 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
700 vc4->dma_fence_context, exec->seqno);
701 fence->seqno = exec->seqno;
702 exec->fence = &fence->base;
705 drm_syncobj_replace_fence(out_sync, exec->fence);
707 vc4_update_bo_seqnos(exec, seqno);
709 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
711 list_add_tail(&exec->head, &vc4->bin_job_list);
713 /* If no bin job was executing and if the render job (if any) has the
714 * same perfmon as our job attached to it (or if both jobs don't have
715 * perfmon activated), then kick ours off. Otherwise, it'll get
716 * started when the previous job's flush/render done interrupt occurs.
718 renderjob = vc4_first_render_job(vc4);
719 if (vc4_first_bin_job(vc4) == exec &&
720 (!renderjob || renderjob->perfmon == exec->perfmon)) {
721 vc4_submit_next_bin_job(dev);
722 vc4_queue_hangcheck(dev);
725 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
731 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
732 * referenced by the job.
734 * @file_priv: DRM file for this fd
735 * @exec: V3D job being set up
737 * The command validator needs to reference BOs by their index within
738 * the submitted job's BO list. This does the validation of the job's
739 * BO list and reference counting for the lifetime of the job.
742 vc4_cl_lookup_bos(struct drm_device *dev,
743 struct drm_file *file_priv,
744 struct vc4_exec_info *exec)
746 struct drm_vc4_submit_cl *args = exec->args;
751 exec->bo_count = args->bo_handle_count;
753 if (!exec->bo_count) {
754 /* See comment on bo_index for why we have to check
757 DRM_DEBUG("Rendering requires BOs to validate\n");
761 exec->bo = kvmalloc_array(exec->bo_count,
762 sizeof(struct drm_gem_cma_object *),
763 GFP_KERNEL | __GFP_ZERO);
765 DRM_ERROR("Failed to allocate validated BO pointers\n");
769 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
772 DRM_ERROR("Failed to allocate incoming GEM handles\n");
776 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
777 exec->bo_count * sizeof(uint32_t))) {
779 DRM_ERROR("Failed to copy in GEM handles\n");
783 spin_lock(&file_priv->table_lock);
784 for (i = 0; i < exec->bo_count; i++) {
785 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
788 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
794 drm_gem_object_get(bo);
795 exec->bo[i] = (struct drm_gem_cma_object *)bo;
797 spin_unlock(&file_priv->table_lock);
802 for (i = 0; i < exec->bo_count; i++) {
803 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
805 goto fail_dec_usecnt;
812 /* Decrease usecnt on acquired objects.
813 * We cannot rely on vc4_complete_exec() to release resources here,
814 * because vc4_complete_exec() has no information about which BO has
815 * had its ->usecnt incremented.
816 * To make things easier we just free everything explicitly and set
817 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
820 for (i-- ; i >= 0; i--)
821 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
824 /* Release any reference to acquired objects. */
825 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
826 drm_gem_object_put(&exec->bo[i]->base);
836 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
838 struct drm_vc4_submit_cl *args = exec->args;
839 struct vc4_dev *vc4 = to_vc4_dev(dev);
843 uint32_t bin_offset = 0;
844 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
846 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
847 uint32_t exec_size = uniforms_offset + args->uniforms_size;
848 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
849 args->shader_rec_count);
852 if (shader_rec_offset < args->bin_cl_size ||
853 uniforms_offset < shader_rec_offset ||
854 exec_size < uniforms_offset ||
855 args->shader_rec_count >= (UINT_MAX /
856 sizeof(struct vc4_shader_state)) ||
857 temp_size < exec_size) {
858 DRM_DEBUG("overflow in exec arguments\n");
863 /* Allocate space where we'll store the copied in user command lists
864 * and shader records.
866 * We don't just copy directly into the BOs because we need to
867 * read the contents back for validation, and I think the
868 * bo->vaddr is uncached access.
870 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
872 DRM_ERROR("Failed to allocate storage for copying "
873 "in bin/render CLs.\n");
877 bin = temp + bin_offset;
878 exec->shader_rec_u = temp + shader_rec_offset;
879 exec->uniforms_u = temp + uniforms_offset;
880 exec->shader_state = temp + exec_size;
881 exec->shader_state_size = args->shader_rec_count;
883 if (copy_from_user(bin,
884 u64_to_user_ptr(args->bin_cl),
885 args->bin_cl_size)) {
890 if (copy_from_user(exec->shader_rec_u,
891 u64_to_user_ptr(args->shader_rec),
892 args->shader_rec_size)) {
897 if (copy_from_user(exec->uniforms_u,
898 u64_to_user_ptr(args->uniforms),
899 args->uniforms_size)) {
904 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
906 DRM_ERROR("Couldn't allocate BO for binning\n");
910 exec->exec_bo = &bo->base;
912 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
915 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
919 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
920 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
921 exec->shader_rec_size = args->shader_rec_size;
923 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
924 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
925 exec->uniforms_size = args->uniforms_size;
927 ret = vc4_validate_bin_cl(dev,
928 exec->exec_bo->vaddr + bin_offset,
934 ret = vc4_validate_shader_recs(dev, exec);
938 if (exec->found_tile_binning_mode_config_packet) {
939 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
944 /* Block waiting on any previous rendering into the CS's VBO,
945 * IB, or textures, so that pixels are actually written by the
946 * time we try to read them.
948 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
956 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
958 struct vc4_dev *vc4 = to_vc4_dev(dev);
959 unsigned long irqflags;
962 /* If we got force-completed because of GPU reset rather than
963 * through our IRQ handler, signal the fence now.
966 dma_fence_signal(exec->fence);
967 dma_fence_put(exec->fence);
971 for (i = 0; i < exec->bo_count; i++) {
972 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
974 vc4_bo_dec_usecnt(bo);
975 drm_gem_object_put(&exec->bo[i]->base);
980 while (!list_empty(&exec->unref_list)) {
981 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
982 struct vc4_bo, unref_head);
983 list_del(&bo->unref_head);
984 drm_gem_object_put(&bo->base.base);
987 /* Free up the allocation of any bin slots we used. */
988 spin_lock_irqsave(&vc4->job_lock, irqflags);
989 vc4->bin_alloc_used &= ~exec->bin_slots;
990 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
992 /* Release the reference on the binner BO if needed. */
993 if (exec->bin_bo_used)
994 vc4_v3d_bin_bo_put(vc4);
996 /* Release the reference we had on the perf monitor. */
997 vc4_perfmon_put(exec->perfmon);
1005 vc4_job_handle_completed(struct vc4_dev *vc4)
1007 unsigned long irqflags;
1008 struct vc4_seqno_cb *cb, *cb_temp;
1010 if (WARN_ON_ONCE(vc4->is_vc5))
1013 spin_lock_irqsave(&vc4->job_lock, irqflags);
1014 while (!list_empty(&vc4->job_done_list)) {
1015 struct vc4_exec_info *exec =
1016 list_first_entry(&vc4->job_done_list,
1017 struct vc4_exec_info, head);
1018 list_del(&exec->head);
1020 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1021 vc4_complete_exec(&vc4->base, exec);
1022 spin_lock_irqsave(&vc4->job_lock, irqflags);
1025 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1026 if (cb->seqno <= vc4->finished_seqno) {
1027 list_del_init(&cb->work.entry);
1028 schedule_work(&cb->work);
1032 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1035 static void vc4_seqno_cb_work(struct work_struct *work)
1037 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1042 int vc4_queue_seqno_cb(struct drm_device *dev,
1043 struct vc4_seqno_cb *cb, uint64_t seqno,
1044 void (*func)(struct vc4_seqno_cb *cb))
1046 struct vc4_dev *vc4 = to_vc4_dev(dev);
1047 unsigned long irqflags;
1049 if (WARN_ON_ONCE(vc4->is_vc5))
1053 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1055 spin_lock_irqsave(&vc4->job_lock, irqflags);
1056 if (seqno > vc4->finished_seqno) {
1058 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1060 schedule_work(&cb->work);
1062 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1067 /* Scheduled when any job has been completed, this walks the list of
1068 * jobs that had completed and unrefs their BOs and frees their exec
1072 vc4_job_done_work(struct work_struct *work)
1074 struct vc4_dev *vc4 =
1075 container_of(work, struct vc4_dev, job_done_work);
1077 vc4_job_handle_completed(vc4);
1081 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1083 uint64_t *timeout_ns)
1085 unsigned long start = jiffies;
1086 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1088 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1089 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1091 if (*timeout_ns >= delta)
1092 *timeout_ns -= delta;
1099 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1100 struct drm_file *file_priv)
1102 struct vc4_dev *vc4 = to_vc4_dev(dev);
1103 struct drm_vc4_wait_seqno *args = data;
1105 if (WARN_ON_ONCE(vc4->is_vc5))
1108 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1113 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1114 struct drm_file *file_priv)
1116 struct vc4_dev *vc4 = to_vc4_dev(dev);
1118 struct drm_vc4_wait_bo *args = data;
1119 struct drm_gem_object *gem_obj;
1122 if (WARN_ON_ONCE(vc4->is_vc5))
1128 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1130 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1133 bo = to_vc4_bo(gem_obj);
1135 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1138 drm_gem_object_put(gem_obj);
1143 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1145 * @data: ioctl argument
1146 * @file_priv: DRM file for this fd
1148 * This is the main entrypoint for userspace to submit a 3D frame to
1149 * the GPU. Userspace provides the binner command list (if
1150 * applicable), and the kernel sets up the render command list to draw
1151 * to the framebuffer described in the ioctl, using the command lists
1152 * that the 3D engine's binner will produce.
1155 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1156 struct drm_file *file_priv)
1158 struct vc4_dev *vc4 = to_vc4_dev(dev);
1159 struct vc4_file *vc4file = file_priv->driver_priv;
1160 struct drm_vc4_submit_cl *args = data;
1161 struct drm_syncobj *out_sync = NULL;
1162 struct vc4_exec_info *exec;
1163 struct ww_acquire_ctx acquire_ctx;
1164 struct dma_fence *in_fence;
1167 if (WARN_ON_ONCE(vc4->is_vc5))
1171 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1175 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1176 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1177 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1178 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1179 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1183 if (args->pad2 != 0) {
1184 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1188 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1190 DRM_ERROR("malloc failure on exec struct\n");
1195 ret = vc4_v3d_pm_get(vc4);
1202 INIT_LIST_HEAD(&exec->unref_list);
1204 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1208 if (args->perfmonid) {
1209 exec->perfmon = vc4_perfmon_find(vc4file,
1211 if (!exec->perfmon) {
1217 if (args->in_sync) {
1218 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1223 /* When the fence (or fence array) is exclusively from our
1224 * context we can skip the wait since jobs are executed in
1225 * order of their submission through this ioctl and this can
1226 * only have fences from a prior job.
1228 if (!dma_fence_match_context(in_fence,
1229 vc4->dma_fence_context)) {
1230 ret = dma_fence_wait(in_fence, true);
1232 dma_fence_put(in_fence);
1237 dma_fence_put(in_fence);
1240 if (exec->args->bin_cl_size != 0) {
1241 ret = vc4_get_bcl(dev, exec);
1249 ret = vc4_get_rcl(dev, exec);
1253 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1257 if (args->out_sync) {
1258 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1264 /* We replace the fence in out_sync in vc4_queue_submit since
1265 * the render job could execute immediately after that call.
1266 * If it finishes before our ioctl processing resumes the
1267 * render job fence could already have been freed.
1271 /* Clear this out of the struct we'll be putting in the queue,
1272 * since it's part of our stack.
1276 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1278 /* The syncobj isn't part of the exec data and we need to free our
1279 * reference even if job submission failed.
1282 drm_syncobj_put(out_sync);
1287 /* Return the seqno for our job. */
1288 args->seqno = vc4->emit_seqno;
1293 vc4_complete_exec(&vc4->base, exec);
1298 static void vc4_gem_destroy(struct drm_device *dev, void *unused);
1299 int vc4_gem_init(struct drm_device *dev)
1301 struct vc4_dev *vc4 = to_vc4_dev(dev);
1303 if (WARN_ON_ONCE(vc4->is_vc5))
1306 vc4->dma_fence_context = dma_fence_context_alloc(1);
1308 INIT_LIST_HEAD(&vc4->bin_job_list);
1309 INIT_LIST_HEAD(&vc4->render_job_list);
1310 INIT_LIST_HEAD(&vc4->job_done_list);
1311 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1312 spin_lock_init(&vc4->job_lock);
1314 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1315 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1317 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1319 mutex_init(&vc4->power_lock);
1321 INIT_LIST_HEAD(&vc4->purgeable.list);
1322 mutex_init(&vc4->purgeable.lock);
1324 return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1327 static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1329 struct vc4_dev *vc4 = to_vc4_dev(dev);
1331 /* Waiting for exec to finish would need to be done before
1332 * unregistering V3D.
1334 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1336 /* V3D should already have disabled its interrupt and cleared
1337 * the overflow allocation registers. Now free the object.
1340 drm_gem_object_put(&vc4->bin_bo->base.base);
1344 if (vc4->hang_state)
1345 vc4_free_hang_state(dev, vc4->hang_state);
1348 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1349 struct drm_file *file_priv)
1351 struct vc4_dev *vc4 = to_vc4_dev(dev);
1352 struct drm_vc4_gem_madvise *args = data;
1353 struct drm_gem_object *gem_obj;
1357 if (WARN_ON_ONCE(vc4->is_vc5))
1360 switch (args->madv) {
1361 case VC4_MADV_DONTNEED:
1362 case VC4_MADV_WILLNEED:
1371 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1373 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1377 bo = to_vc4_bo(gem_obj);
1379 /* Only BOs exposed to userspace can be purged. */
1380 if (bo->madv == __VC4_MADV_NOTSUPP) {
1381 DRM_DEBUG("madvise not supported on this BO\n");
1386 /* Not sure it's safe to purge imported BOs. Let's just assume it's
1387 * not until proven otherwise.
1389 if (gem_obj->import_attach) {
1390 DRM_DEBUG("madvise not supported on imported BOs\n");
1395 mutex_lock(&bo->madv_lock);
1397 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1398 !refcount_read(&bo->usecnt)) {
1399 /* If the BO is about to be marked as purgeable, is not used
1400 * and is not already purgeable or purged, add it to the
1403 vc4_bo_add_to_purgeable_pool(bo);
1404 } else if (args->madv == VC4_MADV_WILLNEED &&
1405 bo->madv == VC4_MADV_DONTNEED &&
1406 !refcount_read(&bo->usecnt)) {
1407 /* The BO has not been purged yet, just remove it from
1408 * the purgeable list.
1410 vc4_bo_remove_from_purgeable_pool(bo);
1413 /* Save the purged state. */
1414 args->retained = bo->madv != __VC4_MADV_PURGED;
1416 /* Update internal madv state only if the bo was not purged. */
1417 if (bo->madv != __VC4_MADV_PURGED)
1418 bo->madv = args->madv;
1420 mutex_unlock(&bo->madv_lock);
1425 drm_gem_object_put(gem_obj);