1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
4 #include <linux/device.h>
5 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/reset.h>
12 #include <linux/sched/signal.h>
13 #include <linux/uaccess.h>
15 #include <drm/drm_syncobj.h>
16 #include <uapi/drm/v3d_drm.h>
20 #include "v3d_trace.h"
23 v3d_clock_down_work(struct work_struct *work)
26 container_of(work, struct v3d_dev, clk_down_work.work);
29 ret = clk_set_min_rate(v3d->clk, v3d->clk_down_rate);
31 WARN_ON_ONCE(ret != 0);
35 v3d_clock_up_get(struct v3d_dev *v3d)
37 mutex_lock(&v3d->clk_lock);
38 if (v3d->clk_refcount++ == 0) {
39 cancel_delayed_work_sync(&v3d->clk_down_work);
43 ret = clk_set_min_rate(v3d->clk, v3d->clk_up_rate);
44 WARN_ON_ONCE(ret != 0);
48 mutex_unlock(&v3d->clk_lock);
52 v3d_clock_up_put(struct v3d_dev *v3d)
54 mutex_lock(&v3d->clk_lock);
55 if (--v3d->clk_refcount == 0) {
56 schedule_delayed_work(&v3d->clk_down_work,
57 msecs_to_jiffies(100));
59 mutex_unlock(&v3d->clk_lock);
64 v3d_init_core(struct v3d_dev *v3d, int core)
66 /* Set OVRTMUOUT, which means that the texture sampler uniform
67 * configuration's tmu output type field is used, instead of
68 * using the hardware default behavior based on the texture
69 * type. If you want the default behavior, you can still put
70 * "2" in the indirect texture state's output_type field.
73 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
75 /* Whenever we flush the L2T cache, we always want to flush
78 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
79 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
82 /* Sets invariant state for the HW. */
84 v3d_init_hw_state(struct v3d_dev *v3d)
86 v3d_init_core(v3d, 0);
90 v3d_idle_axi(struct v3d_dev *v3d, int core)
92 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
94 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
95 (V3D_GMP_STATUS_RD_COUNT_MASK |
96 V3D_GMP_STATUS_WR_COUNT_MASK |
97 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
98 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
103 v3d_idle_gca(struct v3d_dev *v3d)
108 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
110 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
111 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
112 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
113 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
118 v3d_reset_by_bridge(struct v3d_dev *v3d)
120 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
122 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
123 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
124 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
125 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
127 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
128 * of the unit, so reset it to its power-on value here.
130 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
132 WARN_ON_ONCE(V3D_GET_FIELD(version,
133 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
134 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
135 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
136 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
141 v3d_reset_v3d(struct v3d_dev *v3d)
144 reset_control_reset(v3d->reset);
146 v3d_reset_by_bridge(v3d);
148 v3d_init_hw_state(v3d);
152 v3d_reset(struct v3d_dev *v3d)
154 struct drm_device *dev = &v3d->drm;
156 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
157 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
158 V3D_CORE_READ(0, V3D_ERR_STAT));
159 trace_v3d_reset_begin(dev);
161 /* XXX: only needed for safe powerdown, not reset. */
163 v3d_idle_axi(v3d, 0);
168 v3d_mmu_set_page_table(v3d);
171 v3d_perfmon_stop(v3d, v3d->active_perfmon, false);
173 trace_v3d_reset_end(dev);
177 v3d_flush_l3(struct v3d_dev *v3d)
180 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
182 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
183 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
186 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
187 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
192 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
193 * uniforms and instructions on V3D 3.2.
196 v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
201 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
206 /* Invalidates texture L2 cachelines */
208 v3d_flush_l2t(struct v3d_dev *v3d, int core)
210 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
211 * need to wait for completion before dispatching the job --
212 * L2T accesses will be stalled until the flush has completed.
213 * However, we do need to make sure we don't try to trigger a
214 * new flush while the L2_CLEAN queue is trying to
215 * synchronously clean after a job.
217 mutex_lock(&v3d->cache_clean_lock);
218 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
219 V3D_L2TCACTL_L2TFLS |
220 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
221 mutex_unlock(&v3d->cache_clean_lock);
224 /* Cleans texture L1 and L2 cachelines (writing back dirty data).
226 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
227 * executed, we need to make sure that the clean is done before
228 * signaling job completion. So, we synchronously wait before
229 * returning, and we make sure that L2 invalidates don't happen in the
230 * meantime to confuse our are-we-done checks.
233 v3d_clean_caches(struct v3d_dev *v3d)
235 struct drm_device *dev = &v3d->drm;
238 trace_v3d_cache_clean_begin(dev);
240 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
241 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
242 V3D_L2TCACTL_TMUWCF), 100)) {
243 DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
246 mutex_lock(&v3d->cache_clean_lock);
247 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
248 V3D_L2TCACTL_L2TFLS |
249 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
251 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
252 V3D_L2TCACTL_L2TFLS), 100)) {
253 DRM_ERROR("Timeout waiting for L2T clean\n");
256 mutex_unlock(&v3d->cache_clean_lock);
258 trace_v3d_cache_clean_end(dev);
261 /* Invalidates the slice caches. These are read-only caches. */
263 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
265 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
266 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
267 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
268 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
269 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
273 v3d_invalidate_caches(struct v3d_dev *v3d)
275 /* Invalidate the caches from the outside in. That way if
276 * another CL's concurrent use of nearby memory were to pull
277 * an invalidated cacheline back in, we wouldn't leave stale
278 * data in the inner cache.
281 v3d_invalidate_l2c(v3d, 0);
282 v3d_flush_l2t(v3d, 0);
283 v3d_invalidate_slices(v3d, 0);
286 /* Takes the reservation lock on all the BOs being referenced, so that
287 * at queue submit time we can update the reservations.
289 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
290 * (all of which are on exec->unref_list). They're entirely private
291 * to v3d, so we don't attach dma-buf fences to them.
294 v3d_lock_bo_reservations(struct v3d_job *job,
295 struct ww_acquire_ctx *acquire_ctx)
299 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
303 for (i = 0; i < job->bo_count; i++) {
304 ret = drm_gem_fence_array_add_implicit(&job->deps,
307 drm_gem_unlock_reservations(job->bo, job->bo_count,
317 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
318 * referenced by the job.
320 * @file_priv: DRM file for this fd
321 * @job: V3D job being set up
322 * @bo_handles: GEM handles
323 * @bo_count: Number of GEM handles passed in
325 * The command validator needs to reference BOs by their index within
326 * the submitted job's BO list. This does the validation of the job's
327 * BO list and reference counting for the lifetime of the job.
329 * Note that this function doesn't need to unreference the BOs on
330 * failure, because that will happen at v3d_exec_cleanup() time.
333 v3d_lookup_bos(struct drm_device *dev,
334 struct drm_file *file_priv,
343 job->bo_count = bo_count;
345 if (!job->bo_count) {
346 /* See comment on bo_index for why we have to check
349 DRM_DEBUG("Rendering requires BOs\n");
353 job->bo = kvmalloc_array(job->bo_count,
354 sizeof(struct drm_gem_cma_object *),
355 GFP_KERNEL | __GFP_ZERO);
357 DRM_DEBUG("Failed to allocate validated BO pointers\n");
361 handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
364 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
368 if (copy_from_user(handles,
369 (void __user *)(uintptr_t)bo_handles,
370 job->bo_count * sizeof(u32))) {
372 DRM_DEBUG("Failed to copy in GEM handles\n");
376 spin_lock(&file_priv->table_lock);
377 for (i = 0; i < job->bo_count; i++) {
378 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
381 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
384 spin_unlock(&file_priv->table_lock);
387 drm_gem_object_get(bo);
390 spin_unlock(&file_priv->table_lock);
398 v3d_job_free(struct kref *ref)
400 struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
402 struct dma_fence *fence;
403 struct v3d_dev *v3d = job->v3d;
406 for (i = 0; i < job->bo_count; i++) {
408 drm_gem_object_put(job->bo[i]);
412 xa_for_each(&job->deps, index, fence) {
413 dma_fence_put(fence);
415 xa_destroy(&job->deps);
417 dma_fence_put(job->irq_fence);
418 dma_fence_put(job->done_fence);
420 v3d_clock_up_put(v3d);
423 v3d_perfmon_put(job->perfmon);
429 v3d_render_job_free(struct kref *ref)
431 struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
433 struct v3d_bo *bo, *save;
435 list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
436 drm_gem_object_put(&bo->base.base);
442 void v3d_job_put(struct v3d_job *job)
444 kref_put(&job->refcount, job->free);
448 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
449 struct drm_file *file_priv)
452 struct drm_v3d_wait_bo *args = data;
453 ktime_t start = ktime_get();
455 unsigned long timeout_jiffies =
456 nsecs_to_jiffies_timeout(args->timeout_ns);
461 ret = drm_gem_dma_resv_wait(file_priv, args->handle,
462 true, timeout_jiffies);
464 /* Decrement the user's timeout, in case we got interrupted
465 * such that the ioctl will be restarted.
467 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
468 if (delta_ns < args->timeout_ns)
469 args->timeout_ns -= delta_ns;
471 args->timeout_ns = 0;
473 /* Asked to wait beyond the jiffie/scheduler precision? */
474 if (ret == -ETIME && args->timeout_ns)
481 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
482 struct v3d_job *job, void (*free)(struct kref *ref),
485 struct dma_fence *in_fence = NULL;
491 xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
493 ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
497 ret = drm_gem_fence_array_add(&job->deps, in_fence);
501 v3d_clock_up_get(v3d);
502 kref_init(&job->refcount);
506 xa_destroy(&job->deps);
511 v3d_push_job(struct v3d_file_priv *v3d_priv,
512 struct v3d_job *job, enum v3d_queue queue)
516 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
521 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
523 /* put by scheduler job completion */
524 kref_get(&job->refcount);
526 drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
532 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
534 struct ww_acquire_ctx *acquire_ctx,
536 struct dma_fence *done_fence)
538 struct drm_syncobj *sync_out;
541 for (i = 0; i < job->bo_count; i++) {
542 /* XXX: Use shared fences for read-only objects. */
543 dma_resv_add_excl_fence(job->bo[i]->resv,
547 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
549 /* Update the return sync object for the job */
550 sync_out = drm_syncobj_find(file_priv, out_sync);
552 drm_syncobj_replace_fence(sync_out, done_fence);
553 drm_syncobj_put(sync_out);
558 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
560 * @data: ioctl argument
561 * @file_priv: DRM file for this fd
563 * This is the main entrypoint for userspace to submit a 3D frame to
564 * the GPU. Userspace provides the binner command list (if
565 * applicable), and the kernel sets up the render command list to draw
566 * to the framebuffer described in the ioctl, using the command lists
567 * that the 3D engine's binner will produce.
570 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv)
573 struct v3d_dev *v3d = to_v3d_dev(dev);
574 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
575 struct drm_v3d_submit_cl *args = data;
576 struct v3d_bin_job *bin = NULL;
577 struct v3d_render_job *render;
578 struct v3d_job *clean_job = NULL;
579 struct v3d_job *last_job;
580 struct ww_acquire_ctx acquire_ctx;
583 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
588 if (args->flags != 0 &&
589 args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
590 DRM_INFO("invalid flags: %d\n", args->flags);
594 render = kcalloc(1, sizeof(*render), GFP_KERNEL);
598 render->start = args->rcl_start;
599 render->end = args->rcl_end;
600 INIT_LIST_HEAD(&render->unref_list);
602 ret = v3d_job_init(v3d, file_priv, &render->base,
603 v3d_render_job_free, args->in_sync_rcl);
609 if (args->bcl_start != args->bcl_end) {
610 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
612 v3d_job_put(&render->base);
616 ret = v3d_job_init(v3d, file_priv, &bin->base,
617 v3d_job_free, args->in_sync_bcl);
619 v3d_job_put(&render->base);
624 bin->start = args->bcl_start;
625 bin->end = args->bcl_end;
626 bin->qma = args->qma;
627 bin->qms = args->qms;
628 bin->qts = args->qts;
629 bin->render = render;
632 if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
633 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
639 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
646 last_job = clean_job;
648 last_job = &render->base;
651 ret = v3d_lookup_bos(dev, file_priv, last_job,
652 args->bo_handles, args->bo_handle_count);
656 ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
660 if (args->perfmon_id) {
661 render->base.perfmon = v3d_perfmon_find(v3d_priv,
664 if (!render->base.perfmon) {
670 mutex_lock(&v3d->sched_lock);
672 bin->base.perfmon = render->base.perfmon;
673 v3d_perfmon_get(bin->base.perfmon);
674 ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
678 ret = drm_gem_fence_array_add(&render->base.deps,
679 dma_fence_get(bin->base.done_fence));
684 ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
689 struct dma_fence *render_fence =
690 dma_fence_get(render->base.done_fence);
691 ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
694 clean_job->perfmon = render->base.perfmon;
695 v3d_perfmon_get(clean_job->perfmon);
696 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
701 mutex_unlock(&v3d->sched_lock);
703 v3d_attach_fences_and_unlock_reservation(file_priv,
707 last_job->done_fence);
710 v3d_job_put(&bin->base);
711 v3d_job_put(&render->base);
713 v3d_job_put(clean_job);
718 mutex_unlock(&v3d->sched_lock);
720 drm_gem_unlock_reservations(last_job->bo,
721 last_job->bo_count, &acquire_ctx);
724 v3d_job_put(&bin->base);
725 v3d_job_put(&render->base);
727 v3d_job_put(clean_job);
733 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
735 * @data: ioctl argument
736 * @file_priv: DRM file for this fd
738 * Userspace provides the register setup for the TFU, which we don't
739 * need to validate since the TFU is behind the MMU.
742 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
743 struct drm_file *file_priv)
745 struct v3d_dev *v3d = to_v3d_dev(dev);
746 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
747 struct drm_v3d_submit_tfu *args = data;
748 struct v3d_tfu_job *job;
749 struct ww_acquire_ctx acquire_ctx;
752 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
754 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
758 ret = v3d_job_init(v3d, file_priv, &job->base,
759 v3d_job_free, args->in_sync);
765 job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
766 sizeof(*job->base.bo), GFP_KERNEL);
768 v3d_job_put(&job->base);
774 spin_lock(&file_priv->table_lock);
775 for (job->base.bo_count = 0;
776 job->base.bo_count < ARRAY_SIZE(args->bo_handles);
777 job->base.bo_count++) {
778 struct drm_gem_object *bo;
780 if (!args->bo_handles[job->base.bo_count])
783 bo = idr_find(&file_priv->object_idr,
784 args->bo_handles[job->base.bo_count]);
786 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
788 args->bo_handles[job->base.bo_count]);
790 spin_unlock(&file_priv->table_lock);
793 drm_gem_object_get(bo);
794 job->base.bo[job->base.bo_count] = bo;
796 spin_unlock(&file_priv->table_lock);
798 ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
802 mutex_lock(&v3d->sched_lock);
803 ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
806 mutex_unlock(&v3d->sched_lock);
808 v3d_attach_fences_and_unlock_reservation(file_priv,
809 &job->base, &acquire_ctx,
811 job->base.done_fence);
813 v3d_job_put(&job->base);
818 mutex_unlock(&v3d->sched_lock);
819 drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
822 v3d_job_put(&job->base);
828 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
830 * @data: ioctl argument
831 * @file_priv: DRM file for this fd
833 * Userspace provides the register setup for the CSD, which we don't
834 * need to validate since the CSD is behind the MMU.
837 v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
838 struct drm_file *file_priv)
840 struct v3d_dev *v3d = to_v3d_dev(dev);
841 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
842 struct drm_v3d_submit_csd *args = data;
843 struct v3d_csd_job *job;
844 struct v3d_job *clean_job;
845 struct ww_acquire_ctx acquire_ctx;
848 trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
850 if (!v3d_has_csd(v3d)) {
851 DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
855 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
859 ret = v3d_job_init(v3d, file_priv, &job->base,
860 v3d_job_free, args->in_sync);
866 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
868 v3d_job_put(&job->base);
873 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
875 v3d_job_put(&job->base);
882 ret = v3d_lookup_bos(dev, file_priv, clean_job,
883 args->bo_handles, args->bo_handle_count);
887 ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
891 if (args->perfmon_id) {
892 job->base.perfmon = v3d_perfmon_find(v3d_priv,
894 if (!job->base.perfmon) {
900 mutex_lock(&v3d->sched_lock);
901 ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
905 ret = drm_gem_fence_array_add(&clean_job->deps,
906 dma_fence_get(job->base.done_fence));
910 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
913 mutex_unlock(&v3d->sched_lock);
915 v3d_attach_fences_and_unlock_reservation(file_priv,
919 clean_job->done_fence);
921 v3d_job_put(&job->base);
922 v3d_job_put(clean_job);
927 mutex_unlock(&v3d->sched_lock);
929 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
932 v3d_job_put(&job->base);
933 v3d_job_put(clean_job);
939 v3d_gem_init(struct drm_device *dev)
941 struct v3d_dev *v3d = to_v3d_dev(dev);
942 u32 pt_size = 4096 * 1024;
945 for (i = 0; i < V3D_MAX_QUEUES; i++)
946 v3d->queue[i].fence_context = dma_fence_context_alloc(1);
948 spin_lock_init(&v3d->mm_lock);
949 spin_lock_init(&v3d->job_lock);
950 mutex_init(&v3d->bo_lock);
951 mutex_init(&v3d->reset_lock);
952 mutex_init(&v3d->sched_lock);
953 mutex_init(&v3d->cache_clean_lock);
955 mutex_init(&v3d->clk_lock);
956 INIT_DELAYED_WORK(&v3d->clk_down_work, v3d_clock_down_work);
958 /* kick the clock so firmware knows we are using firmware clock interface */
959 v3d_clock_up_get(v3d);
960 v3d_clock_up_put(v3d);
962 /* Note: We don't allocate address 0. Various bits of HW
963 * treat 0 as special, such as the occlusion query counters
964 * where 0 means "disabled".
966 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
968 v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
970 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
972 drm_mm_takedown(&v3d->mm);
973 dev_err(v3d->drm.dev,
974 "Failed to allocate page tables. "
975 "Please ensure you have CMA enabled.\n");
979 v3d_init_hw_state(v3d);
980 v3d_mmu_set_page_table(v3d);
982 ret = v3d_sched_init(v3d);
984 drm_mm_takedown(&v3d->mm);
985 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
993 v3d_gem_destroy(struct drm_device *dev)
995 struct v3d_dev *v3d = to_v3d_dev(dev);
999 /* Waiting for jobs to finish would need to be done before
1000 * unregistering V3D.
1002 WARN_ON(v3d->bin_job);
1003 WARN_ON(v3d->render_job);
1005 drm_mm_takedown(&v3d->mm);
1007 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,