1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
4 #include <linux/delay.h>
5 #include <linux/mutex.h>
6 #include <linux/spinlock_types.h>
7 #include <linux/workqueue.h>
9 #include <drm/drm_encoder.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/gpu_scheduler.h>
14 #include "uapi/drm/v3d_drm.h"
17 struct platform_device;
20 #define GMP_GRANULARITY (128 * 1024)
22 /* Enum for each of the V3D queues. */
31 #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
33 struct v3d_queue_state {
34 struct drm_gpu_scheduler sched;
41 struct drm_device drm;
43 /* Short representation (e.g. 33, 41) of the V3D tech version
49 void __iomem *hub_regs;
50 void __iomem *core_regs[3];
51 void __iomem *bridge_regs;
52 void __iomem *gca_regs;
54 struct delayed_work clk_down_work;
55 unsigned long clk_up_rate, clk_down_rate;
56 struct mutex clk_lock;
60 struct reset_control *reset;
62 /* Virtual and DMA addresses of the single shared page table. */
66 /* Virtual and DMA addresses of the MMU's scratch page. When
67 * a read or write is invalid in the MMU, it will be
71 dma_addr_t mmu_scratch_paddr;
72 /* virtual address bits from V3D to the MMU. */
75 /* Number of V3D cores. */
78 /* Allocator managing the address space. All units are in
84 struct work_struct overflow_mem_work;
86 struct v3d_bin_job *bin_job;
87 struct v3d_render_job *render_job;
88 struct v3d_tfu_job *tfu_job;
89 struct v3d_csd_job *csd_job;
91 struct v3d_queue_state queue[V3D_MAX_QUEUES];
93 /* Spinlock used to synchronize the overflow memory
94 * management against bin job submission.
98 /* Protects bo_stats */
101 /* Lock taken when resetting the GPU, to keep multiple
102 * processes from trying to park the scheduler threads and
105 struct mutex reset_lock;
107 /* Lock taken when creating and pushing the GPU scheduler
108 * jobs, to keep the sched-fence seqnos in order.
110 struct mutex sched_lock;
112 /* Lock taken during a cache clean and when initiating an L2
113 * flush, to keep L2 flushes from interfering with the
114 * synchronous L2 cleans.
116 struct mutex cache_clean_lock;
124 static inline struct v3d_dev *
125 to_v3d_dev(struct drm_device *dev)
127 return container_of(dev, struct v3d_dev, drm);
131 v3d_has_csd(struct v3d_dev *v3d)
133 return v3d->ver >= 41;
136 #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
138 struct drm_v3d_file_private {
143 /* The per-fd struct, which tracks the MMU mappings. */
144 struct v3d_file_priv {
147 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
148 struct drm_v3d_file_private priv;
152 struct drm_gem_shmem_object base;
154 struct drm_mm_node node;
156 /* List entry for the BO's position in
157 * v3d_render_job->unref_list
159 struct list_head unref_head;
162 static inline struct v3d_bo *
163 to_v3d_bo(struct drm_gem_object *bo)
165 return (struct v3d_bo *)bo;
169 struct dma_fence base;
170 struct drm_device *dev;
171 /* v3d seqno for signaled() test */
173 enum v3d_queue queue;
176 static inline struct v3d_fence *
177 to_v3d_fence(struct dma_fence *fence)
179 return (struct v3d_fence *)fence;
182 #define V3D_READ(offset) readl(v3d->hub_regs + offset)
183 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
185 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
186 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
188 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
189 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
191 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
192 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
195 struct drm_sched_job base;
197 struct kref refcount;
201 /* This is the array of BOs that were looked up at the start
204 struct drm_gem_object **bo;
207 /* Array of struct dma_fence * to block on before submitting this job.
210 unsigned long last_dep;
212 /* v3d fence to be signaled by IRQ handler when the job is complete. */
213 struct dma_fence *irq_fence;
215 /* scheduler fence for when the job is considered complete and
216 * the BO reservations can be released.
218 struct dma_fence *done_fence;
220 /* Callback for the freeing of the job on refcount going to 0. */
221 void (*free)(struct kref *ref);
227 /* GPU virtual addresses of the start/end of the CL job. */
230 u32 timedout_ctca, timedout_ctra;
232 /* Corresponding render job, for attaching our overflow memory. */
233 struct v3d_render_job *render;
235 /* Submitted tile memory allocation start/size, tile state. */
239 struct v3d_render_job {
242 /* GPU virtual addresses of the start/end of the CL job. */
245 u32 timedout_ctca, timedout_ctra;
247 /* List of overflow BOs used in the job that need to be
248 * released once the job is complete.
250 struct list_head unref_list;
256 struct drm_v3d_submit_tfu args;
262 u32 timedout_batches;
264 struct drm_v3d_submit_csd args;
268 * __wait_for - magic wait macro
270 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
271 * important that we check the condition again after having timed out, since the
272 * timeout could be due to preemption or similar and we've never had a chance to
273 * check the condition before the timeout.
275 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
276 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
277 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
281 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
283 /* Guarantee COND check prior to timeout */ \
290 ret__ = -ETIMEDOUT; \
293 usleep_range(wait__, wait__ * 2); \
294 if (wait__ < (Wmax)) \
300 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
302 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
304 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
306 /* nsecs_to_jiffies64() does not guard against overflow */
307 if (NSEC_PER_SEC % HZ &&
308 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
309 return MAX_JIFFY_OFFSET;
311 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
315 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
316 void v3d_free_object(struct drm_gem_object *gem_obj);
317 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
319 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
320 struct drm_file *file_priv);
321 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
322 struct drm_file *file_priv);
323 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv);
325 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
326 struct dma_buf_attachment *attach,
327 struct sg_table *sgt);
328 int v3d_drm_gem_prime_fd_to_handle(struct drm_device *dev,
329 struct drm_file *file_priv, int prime_fd,
333 void v3d_debugfs_init(struct drm_minor *minor);
336 extern const struct dma_fence_ops v3d_fence_ops;
337 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
340 int v3d_gem_init(struct drm_device *dev);
341 void v3d_gem_destroy(struct drm_device *dev);
342 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *file_priv);
344 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
345 struct drm_file *file_priv);
346 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *file_priv);
348 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
349 struct drm_file *file_priv);
350 void v3d_job_put(struct v3d_job *job);
351 void v3d_reset(struct v3d_dev *v3d);
352 void v3d_invalidate_caches(struct v3d_dev *v3d);
353 void v3d_clean_caches(struct v3d_dev *v3d);
356 int v3d_irq_init(struct v3d_dev *v3d);
357 void v3d_irq_enable(struct v3d_dev *v3d);
358 void v3d_irq_disable(struct v3d_dev *v3d);
359 void v3d_irq_reset(struct v3d_dev *v3d);
362 int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
364 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
365 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
366 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
369 int v3d_sched_init(struct v3d_dev *v3d);
370 void v3d_sched_fini(struct v3d_dev *v3d);