2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
29 #include <linux/sched/signal.h>
30 #include <linux/dma-fence-array.h>
32 #include <drm/drm_syncobj.h>
34 #include "uapi/drm/vc4_drm.h"
37 #include "vc4_trace.h"
40 vc4_queue_hangcheck(struct drm_device *dev)
42 struct vc4_dev *vc4 = to_vc4_dev(dev);
44 mod_timer(&vc4->hangcheck.timer,
45 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
48 struct vc4_hang_state {
49 struct drm_vc4_get_hang_state user_state;
52 struct drm_gem_object **bo;
56 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
60 for (i = 0; i < state->user_state.bo_count; i++)
61 drm_gem_object_put(state->bo[i]);
67 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68 struct drm_file *file_priv)
70 struct drm_vc4_get_hang_state *get_state = data;
71 struct drm_vc4_get_hang_state_bo *bo_state;
72 struct vc4_hang_state *kernel_state;
73 struct drm_vc4_get_hang_state *state;
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
75 unsigned long irqflags;
80 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
84 spin_lock_irqsave(&vc4->job_lock, irqflags);
85 kernel_state = vc4->hang_state;
87 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
90 state = &kernel_state->user_state;
92 /* If the user's array isn't big enough, just return the
93 * required array size.
95 if (get_state->bo_count < state->bo_count) {
96 get_state->bo_count = state->bo_count;
97 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
101 vc4->hang_state = NULL;
102 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
104 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105 state->bo = get_state->bo;
106 memcpy(get_state, state, sizeof(*state));
108 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
114 for (i = 0; i < state->bo_count; i++) {
115 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
118 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
123 goto err_delete_handle;
125 bo_state[i].handle = handle;
126 bo_state[i].paddr = vc4_bo->base.paddr;
127 bo_state[i].size = vc4_bo->base.base.size;
130 if (copy_to_user(u64_to_user_ptr(get_state->bo),
132 state->bo_count * sizeof(*bo_state)))
137 for (i = 0; i < state->bo_count; i++)
138 drm_gem_handle_delete(file_priv, bo_state[i].handle);
142 vc4_free_hang_state(dev, kernel_state);
149 vc4_save_hang_state(struct drm_device *dev)
151 struct vc4_dev *vc4 = to_vc4_dev(dev);
152 struct drm_vc4_get_hang_state *state;
153 struct vc4_hang_state *kernel_state;
154 struct vc4_exec_info *exec[2];
156 unsigned long irqflags;
157 unsigned int i, j, k, unref_list_count;
159 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
163 state = &kernel_state->user_state;
165 spin_lock_irqsave(&vc4->job_lock, irqflags);
166 exec[0] = vc4_first_bin_job(vc4);
167 exec[1] = vc4_first_render_job(vc4);
168 if (!exec[0] && !exec[1]) {
169 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
173 /* Get the bos from both binner and renderer into hang state. */
175 for (i = 0; i < 2; i++) {
179 unref_list_count = 0;
180 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
182 state->bo_count += exec[i]->bo_count + unref_list_count;
185 kernel_state->bo = kcalloc(state->bo_count,
186 sizeof(*kernel_state->bo), GFP_ATOMIC);
188 if (!kernel_state->bo) {
189 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
194 for (i = 0; i < 2; i++) {
198 for (j = 0; j < exec[i]->bo_count; j++) {
199 bo = to_vc4_bo(&exec[i]->bo[j]->base);
201 /* Retain BOs just in case they were marked purgeable.
202 * This prevents the BO from being purged before
203 * someone had a chance to dump the hang state.
205 WARN_ON(!refcount_read(&bo->usecnt));
206 refcount_inc(&bo->usecnt);
207 drm_gem_object_get(&exec[i]->bo[j]->base);
208 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
211 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212 /* No need to retain BOs coming from the ->unref_list
213 * because they are naturally unpurgeable.
215 drm_gem_object_get(&bo->base.base);
216 kernel_state->bo[k++] = &bo->base.base;
220 WARN_ON_ONCE(k != state->bo_count);
223 state->start_bin = exec[0]->ct0ca;
225 state->start_render = exec[1]->ct1ca;
227 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
229 state->ct0ca = V3D_READ(V3D_CTNCA(0));
230 state->ct0ea = V3D_READ(V3D_CTNEA(0));
232 state->ct1ca = V3D_READ(V3D_CTNCA(1));
233 state->ct1ea = V3D_READ(V3D_CTNEA(1));
235 state->ct0cs = V3D_READ(V3D_CTNCS(0));
236 state->ct1cs = V3D_READ(V3D_CTNCS(1));
238 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
241 state->bpca = V3D_READ(V3D_BPCA);
242 state->bpcs = V3D_READ(V3D_BPCS);
243 state->bpoa = V3D_READ(V3D_BPOA);
244 state->bpos = V3D_READ(V3D_BPOS);
246 state->vpmbase = V3D_READ(V3D_VPMBASE);
248 state->dbge = V3D_READ(V3D_DBGE);
249 state->fdbgo = V3D_READ(V3D_FDBGO);
250 state->fdbgb = V3D_READ(V3D_FDBGB);
251 state->fdbgr = V3D_READ(V3D_FDBGR);
252 state->fdbgs = V3D_READ(V3D_FDBGS);
253 state->errstat = V3D_READ(V3D_ERRSTAT);
255 /* We need to turn purgeable BOs into unpurgeable ones so that
256 * userspace has a chance to dump the hang state before the kernel
257 * decides to purge those BOs.
258 * Note that BO consistency at dump time cannot be guaranteed. For
259 * example, if the owner of these BOs decides to re-use them or mark
260 * them purgeable again there's nothing we can do to prevent it.
262 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
265 if (bo->madv == __VC4_MADV_NOTSUPP)
268 mutex_lock(&bo->madv_lock);
269 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270 bo->madv = VC4_MADV_WILLNEED;
271 refcount_dec(&bo->usecnt);
272 mutex_unlock(&bo->madv_lock);
275 spin_lock_irqsave(&vc4->job_lock, irqflags);
276 if (vc4->hang_state) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 vc4_free_hang_state(dev, kernel_state);
280 vc4->hang_state = kernel_state;
281 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
286 vc4_reset(struct drm_device *dev)
288 struct vc4_dev *vc4 = to_vc4_dev(dev);
290 DRM_INFO("Resetting GPU.\n");
292 mutex_lock(&vc4->power_lock);
293 if (vc4->power_refcount) {
294 /* Power the device off and back on the by dropping the
295 * reference on runtime PM.
297 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
300 mutex_unlock(&vc4->power_lock);
304 /* Rearm the hangcheck -- another job might have been waiting
305 * for our hung one to get kicked off, and vc4_irq_reset()
306 * would have started it.
308 vc4_queue_hangcheck(dev);
312 vc4_reset_work(struct work_struct *work)
314 struct vc4_dev *vc4 =
315 container_of(work, struct vc4_dev, hangcheck.reset_work);
317 vc4_save_hang_state(&vc4->base);
319 vc4_reset(&vc4->base);
323 vc4_hangcheck_elapsed(struct timer_list *t)
325 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326 struct drm_device *dev = &vc4->base;
327 uint32_t ct0ca, ct1ca;
328 unsigned long irqflags;
329 struct vc4_exec_info *bin_exec, *render_exec;
331 spin_lock_irqsave(&vc4->job_lock, irqflags);
333 bin_exec = vc4_first_bin_job(vc4);
334 render_exec = vc4_first_render_job(vc4);
336 /* If idle, we can stop watching for hangs. */
337 if (!bin_exec && !render_exec) {
338 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
342 ct0ca = V3D_READ(V3D_CTNCA(0));
343 ct1ca = V3D_READ(V3D_CTNCA(1));
345 /* If we've made any progress in execution, rearm the timer
348 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349 (render_exec && ct1ca != render_exec->last_ct1ca)) {
351 bin_exec->last_ct0ca = ct0ca;
353 render_exec->last_ct1ca = ct1ca;
354 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355 vc4_queue_hangcheck(dev);
359 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
361 /* We've gone too long with no progress, reset. This has to
362 * be done from a work struct, since resetting can sleep and
363 * this timer hook isn't allowed to.
365 schedule_work(&vc4->hangcheck.reset_work);
369 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
371 struct vc4_dev *vc4 = to_vc4_dev(dev);
373 /* Set the current and end address of the control list.
374 * Writing the end register is what starts the job.
376 V3D_WRITE(V3D_CTNCA(thread), start);
377 V3D_WRITE(V3D_CTNEA(thread), end);
381 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
384 struct vc4_dev *vc4 = to_vc4_dev(dev);
386 unsigned long timeout_expire;
389 if (vc4->finished_seqno >= seqno)
395 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
397 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
399 prepare_to_wait(&vc4->job_wait_queue, &wait,
400 interruptible ? TASK_INTERRUPTIBLE :
401 TASK_UNINTERRUPTIBLE);
403 if (interruptible && signal_pending(current)) {
408 if (vc4->finished_seqno >= seqno)
411 if (timeout_ns != ~0ull) {
412 if (time_after_eq(jiffies, timeout_expire)) {
416 schedule_timeout(timeout_expire - jiffies);
422 finish_wait(&vc4->job_wait_queue, &wait);
423 trace_vc4_wait_for_seqno_end(dev, seqno);
429 vc4_flush_caches(struct drm_device *dev)
431 struct vc4_dev *vc4 = to_vc4_dev(dev);
433 /* Flush the GPU L2 caches. These caches sit on top of system
434 * L3 (the 128kb or so shared with the CPU), and are
435 * non-allocating in the L3.
437 V3D_WRITE(V3D_L2CACTL,
440 V3D_WRITE(V3D_SLCACTL,
441 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
448 vc4_flush_texture_caches(struct drm_device *dev)
450 struct vc4_dev *vc4 = to_vc4_dev(dev);
452 V3D_WRITE(V3D_L2CACTL,
455 V3D_WRITE(V3D_SLCACTL,
456 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
460 /* Sets the registers for the next job to be actually be executed in
463 * The job_lock should be held during this.
466 vc4_submit_next_bin_job(struct drm_device *dev)
468 struct vc4_dev *vc4 = to_vc4_dev(dev);
469 struct vc4_exec_info *exec;
472 exec = vc4_first_bin_job(vc4);
476 vc4_flush_caches(dev);
478 /* Only start the perfmon if it was not already started by a previous
481 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482 vc4_perfmon_start(vc4, exec->perfmon);
484 /* Either put the job in the binner if it uses the binner, or
485 * immediately move it to the to-be-rendered queue.
487 if (exec->ct0ca != exec->ct0ea) {
488 trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
490 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
492 struct vc4_exec_info *next;
494 vc4_move_job_to_render(dev, exec);
495 next = vc4_first_bin_job(vc4);
497 /* We can't start the next bin job if the previous job had a
498 * different perfmon instance attached to it. The same goes
499 * if one of them had a perfmon attached to it and the other
502 if (next && next->perfmon == exec->perfmon)
508 vc4_submit_next_render_job(struct drm_device *dev)
510 struct vc4_dev *vc4 = to_vc4_dev(dev);
511 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
516 /* A previous RCL may have written to one of our textures, and
517 * our full cache flush at bin time may have occurred before
518 * that RCL completed. Flush the texture cache now, but not
519 * the instructions or uniforms (since we don't write those
522 vc4_flush_texture_caches(dev);
524 trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea);
525 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
529 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
531 struct vc4_dev *vc4 = to_vc4_dev(dev);
532 bool was_empty = list_empty(&vc4->render_job_list);
534 list_move_tail(&exec->head, &vc4->render_job_list);
536 vc4_submit_next_render_job(dev);
540 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
545 for (i = 0; i < exec->bo_count; i++) {
546 bo = to_vc4_bo(&exec->bo[i]->base);
549 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
552 list_for_each_entry(bo, &exec->unref_list, unref_head) {
556 for (i = 0; i < exec->rcl_write_bo_count; i++) {
557 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
558 bo->write_seqno = seqno;
560 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
565 vc4_unlock_bo_reservations(struct drm_device *dev,
566 struct vc4_exec_info *exec,
567 struct ww_acquire_ctx *acquire_ctx)
571 for (i = 0; i < exec->bo_count; i++) {
572 struct drm_gem_object *bo = &exec->bo[i]->base;
574 dma_resv_unlock(bo->resv);
577 ww_acquire_fini(acquire_ctx);
580 /* Takes the reservation lock on all the BOs being referenced, so that
581 * at queue submit time we can update the reservations.
583 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
584 * (all of which are on exec->unref_list). They're entirely private
585 * to vc4, so we don't attach dma-buf fences to them.
588 vc4_lock_bo_reservations(struct drm_device *dev,
589 struct vc4_exec_info *exec,
590 struct ww_acquire_ctx *acquire_ctx)
592 int contended_lock = -1;
594 struct drm_gem_object *bo;
596 ww_acquire_init(acquire_ctx, &reservation_ww_class);
599 if (contended_lock != -1) {
600 bo = &exec->bo[contended_lock]->base;
601 ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
603 ww_acquire_done(acquire_ctx);
608 for (i = 0; i < exec->bo_count; i++) {
609 if (i == contended_lock)
612 bo = &exec->bo[i]->base;
614 ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
618 for (j = 0; j < i; j++) {
619 bo = &exec->bo[j]->base;
620 dma_resv_unlock(bo->resv);
623 if (contended_lock != -1 && contended_lock >= i) {
624 bo = &exec->bo[contended_lock]->base;
626 dma_resv_unlock(bo->resv);
629 if (ret == -EDEADLK) {
634 ww_acquire_done(acquire_ctx);
639 ww_acquire_done(acquire_ctx);
641 /* Reserve space for our shared (read-only) fence references,
642 * before we commit the CL to the hardware.
644 for (i = 0; i < exec->bo_count; i++) {
645 bo = &exec->bo[i]->base;
647 ret = dma_resv_reserve_shared(bo->resv, 1);
649 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
657 /* Queues a struct vc4_exec_info for execution. If no job is
658 * currently executing, then submits it.
660 * Unlike most GPUs, our hardware only handles one command list at a
661 * time. To queue multiple jobs at once, we'd need to edit the
662 * previous command list to have a jump to the new one at the end, and
663 * then bump the end address. That's a change for a later date,
667 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
668 struct ww_acquire_ctx *acquire_ctx,
669 struct drm_syncobj *out_sync)
671 struct vc4_dev *vc4 = to_vc4_dev(dev);
672 struct vc4_exec_info *renderjob;
674 unsigned long irqflags;
675 struct vc4_fence *fence;
677 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
682 spin_lock_irqsave(&vc4->job_lock, irqflags);
684 seqno = ++vc4->emit_seqno;
687 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
688 vc4->dma_fence_context, exec->seqno);
689 fence->seqno = exec->seqno;
690 exec->fence = &fence->base;
693 drm_syncobj_replace_fence(out_sync, exec->fence);
695 vc4_update_bo_seqnos(exec, seqno);
697 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
699 list_add_tail(&exec->head, &vc4->bin_job_list);
701 /* If no bin job was executing and if the render job (if any) has the
702 * same perfmon as our job attached to it (or if both jobs don't have
703 * perfmon activated), then kick ours off. Otherwise, it'll get
704 * started when the previous job's flush/render done interrupt occurs.
706 renderjob = vc4_first_render_job(vc4);
707 if (vc4_first_bin_job(vc4) == exec &&
708 (!renderjob || renderjob->perfmon == exec->perfmon)) {
709 vc4_submit_next_bin_job(dev);
710 vc4_queue_hangcheck(dev);
713 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
719 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
720 * referenced by the job.
722 * @file_priv: DRM file for this fd
723 * @exec: V3D job being set up
725 * The command validator needs to reference BOs by their index within
726 * the submitted job's BO list. This does the validation of the job's
727 * BO list and reference counting for the lifetime of the job.
730 vc4_cl_lookup_bos(struct drm_device *dev,
731 struct drm_file *file_priv,
732 struct vc4_exec_info *exec)
734 struct drm_vc4_submit_cl *args = exec->args;
739 exec->bo_count = args->bo_handle_count;
741 if (!exec->bo_count) {
742 /* See comment on bo_index for why we have to check
745 DRM_DEBUG("Rendering requires BOs to validate\n");
749 exec->bo = kvmalloc_array(exec->bo_count,
750 sizeof(struct drm_gem_cma_object *),
751 GFP_KERNEL | __GFP_ZERO);
753 DRM_ERROR("Failed to allocate validated BO pointers\n");
757 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
760 DRM_ERROR("Failed to allocate incoming GEM handles\n");
764 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
765 exec->bo_count * sizeof(uint32_t))) {
767 DRM_ERROR("Failed to copy in GEM handles\n");
771 spin_lock(&file_priv->table_lock);
772 for (i = 0; i < exec->bo_count; i++) {
773 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
776 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
782 drm_gem_object_get(bo);
783 exec->bo[i] = (struct drm_gem_cma_object *)bo;
785 spin_unlock(&file_priv->table_lock);
790 for (i = 0; i < exec->bo_count; i++) {
791 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
793 goto fail_dec_usecnt;
800 /* Decrease usecnt on acquired objects.
801 * We cannot rely on vc4_complete_exec() to release resources here,
802 * because vc4_complete_exec() has no information about which BO has
803 * had its ->usecnt incremented.
804 * To make things easier we just free everything explicitly and set
805 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
808 for (i-- ; i >= 0; i--)
809 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
812 /* Release any reference to acquired objects. */
813 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
814 drm_gem_object_put(&exec->bo[i]->base);
824 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
826 struct drm_vc4_submit_cl *args = exec->args;
827 struct vc4_dev *vc4 = to_vc4_dev(dev);
831 uint32_t bin_offset = 0;
832 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
834 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
835 uint32_t exec_size = uniforms_offset + args->uniforms_size;
836 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
837 args->shader_rec_count);
840 if (shader_rec_offset < args->bin_cl_size ||
841 uniforms_offset < shader_rec_offset ||
842 exec_size < uniforms_offset ||
843 args->shader_rec_count >= (UINT_MAX /
844 sizeof(struct vc4_shader_state)) ||
845 temp_size < exec_size) {
846 DRM_DEBUG("overflow in exec arguments\n");
851 /* Allocate space where we'll store the copied in user command lists
852 * and shader records.
854 * We don't just copy directly into the BOs because we need to
855 * read the contents back for validation, and I think the
856 * bo->vaddr is uncached access.
858 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
860 DRM_ERROR("Failed to allocate storage for copying "
861 "in bin/render CLs.\n");
865 bin = temp + bin_offset;
866 exec->shader_rec_u = temp + shader_rec_offset;
867 exec->uniforms_u = temp + uniforms_offset;
868 exec->shader_state = temp + exec_size;
869 exec->shader_state_size = args->shader_rec_count;
871 if (copy_from_user(bin,
872 u64_to_user_ptr(args->bin_cl),
873 args->bin_cl_size)) {
878 if (copy_from_user(exec->shader_rec_u,
879 u64_to_user_ptr(args->shader_rec),
880 args->shader_rec_size)) {
885 if (copy_from_user(exec->uniforms_u,
886 u64_to_user_ptr(args->uniforms),
887 args->uniforms_size)) {
892 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
894 DRM_ERROR("Couldn't allocate BO for binning\n");
898 exec->exec_bo = &bo->base;
900 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
903 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
907 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
908 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
909 exec->shader_rec_size = args->shader_rec_size;
911 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
912 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
913 exec->uniforms_size = args->uniforms_size;
915 ret = vc4_validate_bin_cl(dev,
916 exec->exec_bo->vaddr + bin_offset,
922 ret = vc4_validate_shader_recs(dev, exec);
926 if (exec->found_tile_binning_mode_config_packet) {
927 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
932 /* Block waiting on any previous rendering into the CS's VBO,
933 * IB, or textures, so that pixels are actually written by the
934 * time we try to read them.
936 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
944 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
946 struct vc4_dev *vc4 = to_vc4_dev(dev);
947 unsigned long irqflags;
950 /* If we got force-completed because of GPU reset rather than
951 * through our IRQ handler, signal the fence now.
954 dma_fence_signal(exec->fence);
955 dma_fence_put(exec->fence);
959 for (i = 0; i < exec->bo_count; i++) {
960 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
962 vc4_bo_dec_usecnt(bo);
963 drm_gem_object_put(&exec->bo[i]->base);
968 while (!list_empty(&exec->unref_list)) {
969 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
970 struct vc4_bo, unref_head);
971 list_del(&bo->unref_head);
972 drm_gem_object_put(&bo->base.base);
975 /* Free up the allocation of any bin slots we used. */
976 spin_lock_irqsave(&vc4->job_lock, irqflags);
977 vc4->bin_alloc_used &= ~exec->bin_slots;
978 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
980 /* Release the reference on the binner BO if needed. */
981 if (exec->bin_bo_used)
982 vc4_v3d_bin_bo_put(vc4);
984 /* Release the reference we had on the perf monitor. */
985 vc4_perfmon_put(exec->perfmon);
993 vc4_job_handle_completed(struct vc4_dev *vc4)
995 unsigned long irqflags;
996 struct vc4_seqno_cb *cb, *cb_temp;
998 spin_lock_irqsave(&vc4->job_lock, irqflags);
999 while (!list_empty(&vc4->job_done_list)) {
1000 struct vc4_exec_info *exec =
1001 list_first_entry(&vc4->job_done_list,
1002 struct vc4_exec_info, head);
1003 list_del(&exec->head);
1005 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1006 vc4_complete_exec(&vc4->base, exec);
1007 spin_lock_irqsave(&vc4->job_lock, irqflags);
1010 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1011 if (cb->seqno <= vc4->finished_seqno) {
1012 list_del_init(&cb->work.entry);
1013 schedule_work(&cb->work);
1017 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1020 static void vc4_seqno_cb_work(struct work_struct *work)
1022 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1027 int vc4_queue_seqno_cb(struct drm_device *dev,
1028 struct vc4_seqno_cb *cb, uint64_t seqno,
1029 void (*func)(struct vc4_seqno_cb *cb))
1031 struct vc4_dev *vc4 = to_vc4_dev(dev);
1032 unsigned long irqflags;
1035 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1037 spin_lock_irqsave(&vc4->job_lock, irqflags);
1038 if (seqno > vc4->finished_seqno) {
1040 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1042 schedule_work(&cb->work);
1044 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1049 /* Scheduled when any job has been completed, this walks the list of
1050 * jobs that had completed and unrefs their BOs and frees their exec
1054 vc4_job_done_work(struct work_struct *work)
1056 struct vc4_dev *vc4 =
1057 container_of(work, struct vc4_dev, job_done_work);
1059 vc4_job_handle_completed(vc4);
1063 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1065 uint64_t *timeout_ns)
1067 unsigned long start = jiffies;
1068 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1070 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1071 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1073 if (*timeout_ns >= delta)
1074 *timeout_ns -= delta;
1081 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1082 struct drm_file *file_priv)
1084 struct drm_vc4_wait_seqno *args = data;
1086 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1091 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1092 struct drm_file *file_priv)
1095 struct drm_vc4_wait_bo *args = data;
1096 struct drm_gem_object *gem_obj;
1102 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1104 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1107 bo = to_vc4_bo(gem_obj);
1109 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1112 drm_gem_object_put(gem_obj);
1117 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1119 * @data: ioctl argument
1120 * @file_priv: DRM file for this fd
1122 * This is the main entrypoint for userspace to submit a 3D frame to
1123 * the GPU. Userspace provides the binner command list (if
1124 * applicable), and the kernel sets up the render command list to draw
1125 * to the framebuffer described in the ioctl, using the command lists
1126 * that the 3D engine's binner will produce.
1129 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1130 struct drm_file *file_priv)
1132 struct vc4_dev *vc4 = to_vc4_dev(dev);
1133 struct vc4_file *vc4file = file_priv->driver_priv;
1134 struct drm_vc4_submit_cl *args = data;
1135 struct drm_syncobj *out_sync = NULL;
1136 struct vc4_exec_info *exec;
1137 struct ww_acquire_ctx acquire_ctx;
1138 struct dma_fence *in_fence;
1141 trace_vc4_submit_cl_ioctl(dev, args->bin_cl_size,
1142 args->shader_rec_size,
1143 args->bo_handle_count);
1146 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1150 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1151 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1152 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1153 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1154 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1158 if (args->pad2 != 0) {
1159 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1163 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1165 DRM_ERROR("malloc failure on exec struct\n");
1169 ret = vc4_v3d_pm_get(vc4);
1176 INIT_LIST_HEAD(&exec->unref_list);
1178 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1182 if (args->perfmonid) {
1183 exec->perfmon = vc4_perfmon_find(vc4file,
1185 if (!exec->perfmon) {
1191 if (args->in_sync) {
1192 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1197 /* When the fence (or fence array) is exclusively from our
1198 * context we can skip the wait since jobs are executed in
1199 * order of their submission through this ioctl and this can
1200 * only have fences from a prior job.
1202 if (!dma_fence_match_context(in_fence,
1203 vc4->dma_fence_context)) {
1204 ret = dma_fence_wait(in_fence, true);
1206 dma_fence_put(in_fence);
1211 dma_fence_put(in_fence);
1214 if (exec->args->bin_cl_size != 0) {
1215 ret = vc4_get_bcl(dev, exec);
1223 ret = vc4_get_rcl(dev, exec);
1227 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1231 if (args->out_sync) {
1232 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1238 /* We replace the fence in out_sync in vc4_queue_submit since
1239 * the render job could execute immediately after that call.
1240 * If it finishes before our ioctl processing resumes the
1241 * render job fence could already have been freed.
1245 /* Clear this out of the struct we'll be putting in the queue,
1246 * since it's part of our stack.
1250 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1252 /* The syncobj isn't part of the exec data and we need to free our
1253 * reference even if job submission failed.
1256 drm_syncobj_put(out_sync);
1261 /* Return the seqno for our job. */
1262 args->seqno = vc4->emit_seqno;
1267 vc4_complete_exec(&vc4->base, exec);
1272 static void vc4_gem_destroy(struct drm_device *dev, void *unused);
1273 int vc4_gem_init(struct drm_device *dev)
1275 struct vc4_dev *vc4 = to_vc4_dev(dev);
1277 vc4->dma_fence_context = dma_fence_context_alloc(1);
1279 INIT_LIST_HEAD(&vc4->bin_job_list);
1280 INIT_LIST_HEAD(&vc4->render_job_list);
1281 INIT_LIST_HEAD(&vc4->job_done_list);
1282 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1283 spin_lock_init(&vc4->job_lock);
1285 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1286 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1288 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1290 mutex_init(&vc4->power_lock);
1292 INIT_LIST_HEAD(&vc4->purgeable.list);
1293 mutex_init(&vc4->purgeable.lock);
1295 return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1298 static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1300 struct vc4_dev *vc4 = to_vc4_dev(dev);
1302 /* Waiting for exec to finish would need to be done before
1303 * unregistering V3D.
1305 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1307 /* V3D should already have disabled its interrupt and cleared
1308 * the overflow allocation registers. Now free the object.
1311 drm_gem_object_put(&vc4->bin_bo->base.base);
1315 if (vc4->hang_state)
1316 vc4_free_hang_state(dev, vc4->hang_state);
1319 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1320 struct drm_file *file_priv)
1322 struct drm_vc4_gem_madvise *args = data;
1323 struct drm_gem_object *gem_obj;
1327 switch (args->madv) {
1328 case VC4_MADV_DONTNEED:
1329 case VC4_MADV_WILLNEED:
1338 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1340 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1344 bo = to_vc4_bo(gem_obj);
1346 /* Only BOs exposed to userspace can be purged. */
1347 if (bo->madv == __VC4_MADV_NOTSUPP) {
1348 DRM_DEBUG("madvise not supported on this BO\n");
1353 /* Not sure it's safe to purge imported BOs. Let's just assume it's
1354 * not until proven otherwise.
1356 if (gem_obj->import_attach) {
1357 DRM_DEBUG("madvise not supported on imported BOs\n");
1362 mutex_lock(&bo->madv_lock);
1364 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1365 !refcount_read(&bo->usecnt)) {
1366 /* If the BO is about to be marked as purgeable, is not used
1367 * and is not already purgeable or purged, add it to the
1370 vc4_bo_add_to_purgeable_pool(bo);
1371 } else if (args->madv == VC4_MADV_WILLNEED &&
1372 bo->madv == VC4_MADV_DONTNEED &&
1373 !refcount_read(&bo->usecnt)) {
1374 /* The BO has not been purged yet, just remove it from
1375 * the purgeable list.
1377 vc4_bo_remove_from_purgeable_pool(bo);
1380 /* Save the purged state. */
1381 args->retained = bo->madv != __VC4_MADV_PURGED;
1383 /* Update internal madv state only if the bo was not purged. */
1384 if (bo->madv != __VC4_MADV_PURGED)
1385 bo->madv = args->madv;
1387 mutex_unlock(&bo->madv_lock);
1392 drm_gem_object_put(gem_obj);