1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
4 #include <linux/delay.h>
5 #include <linux/mutex.h>
6 #include <linux/spinlock_types.h>
7 #include <linux/workqueue.h>
9 #include <drm/drm_encoder.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/gpu_scheduler.h>
14 #include "uapi/drm/v3d_drm.h"
17 struct platform_device;
20 #define GMP_GRANULARITY (128 * 1024)
22 #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
25 v3d_queue_to_string(enum v3d_queue queue)
28 case V3D_BIN: return "v3d_bin";
29 case V3D_RENDER: return "v3d_render";
30 case V3D_TFU: return "v3d_tfu";
31 case V3D_CSD: return "v3d_csd";
32 case V3D_CACHE_CLEAN: return "v3d_cache_clean";
37 struct v3d_queue_state {
38 struct drm_gpu_scheduler sched;
44 struct v3d_queue_pid_stats {
45 struct list_head list;
47 /* Time in jiffes.to purge the stats of this process. Every time a
48 * process sends a new job to the queue, this timeout is delayed by
49 * V3D_QUEUE_STATS_TIMEOUT while the gpu_pid_stats_timeout of the
50 * queue is not reached.
52 unsigned long timeout_purge;
57 struct v3d_queue_stats {
63 /* Time in jiffes to stop collecting gpu stats by process. This is
64 * increased by every access to*the debugfs interface gpu_pid_usage.
65 * If the debugfs is not used stats are not collected.
67 unsigned long gpu_pid_stats_timeout;
69 struct list_head pid_stats_list;
72 /* pid_stats by process (v3d_queue_pid_stats) are recorded if there is an
73 * access to the gpu_pid_usageare debugfs interface for the last
74 * V3D_QUEUE_STATS_TIMEOUT (70s).
76 * The same timeout is used to purge the stats by process for those process
77 * that have not sent jobs this period.
79 #define V3D_QUEUE_STATS_TIMEOUT (70 * HZ)
82 /* Performance monitor object. The perform lifetime is controlled by userspace
83 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
84 * request, and when this is the case, HW perf counters will be activated just
85 * before the submit_cl is submitted to the GPU and disabled when the job is
86 * done. This way, only events related to a specific job will be counted.
89 /* Tracks the number of users of the perfmon, when this counter reaches
90 * zero the perfmon is destroyed.
94 /* Protects perfmon stop, as it can be invoked from multiple places. */
97 /* Number of counters activated in this perfmon instance
98 * (should be less than DRM_V3D_MAX_PERF_COUNTERS).
102 /* Events counted by the HW perf counters. */
103 u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
105 /* Storage for counter values. Counters are incremented by the
106 * HW perf counter values every time the perfmon is attached
107 * to a GPU job. This way, perfmon users don't have to
108 * retrieve the results after each job if they want to track
109 * events covering several submissions. Note that counter
110 * values can't be reset, but you can fake a reset by
111 * destroying the perfmon and creating a new one.
117 struct drm_device drm;
119 /* Short representation (e.g. 33, 41) of the V3D tech version
123 bool single_irq_line;
125 void __iomem *hub_regs;
126 void __iomem *core_regs[3];
127 void __iomem *bridge_regs;
128 void __iomem *gca_regs;
130 struct delayed_work clk_down_work;
131 unsigned long clk_up_rate, clk_down_rate;
132 struct mutex clk_lock;
136 struct reset_control *reset;
138 /* Virtual and DMA addresses of the single shared page table. */
142 /* Virtual and DMA addresses of the MMU's scratch page. When
143 * a read or write is invalid in the MMU, it will be
147 dma_addr_t mmu_scratch_paddr;
148 /* virtual address bits from V3D to the MMU. */
151 /* Number of V3D cores. */
154 /* Allocator managing the address space. All units are in
160 struct work_struct overflow_mem_work;
162 struct v3d_bin_job *bin_job;
163 struct v3d_render_job *render_job;
164 struct v3d_tfu_job *tfu_job;
165 struct v3d_csd_job *csd_job;
167 struct v3d_queue_state queue[V3D_MAX_QUEUES];
169 /* Spinlock used to synchronize the overflow memory
170 * management against bin job submission.
174 /* Used to track the active perfmon if any. */
175 struct v3d_perfmon *active_perfmon;
177 /* Protects bo_stats */
178 struct mutex bo_lock;
180 /* Lock taken when resetting the GPU, to keep multiple
181 * processes from trying to park the scheduler threads and
184 struct mutex reset_lock;
186 /* Lock taken when creating and pushing the GPU scheduler
187 * jobs, to keep the sched-fence seqnos in order.
189 struct mutex sched_lock;
191 /* Lock taken during a cache clean and when initiating an L2
192 * flush, to keep L2 flushes from interfering with the
193 * synchronous L2 cleans.
195 struct mutex cache_clean_lock;
202 struct v3d_queue_stats gpu_queue_stats[V3D_MAX_QUEUES];
205 static inline struct v3d_dev *
206 to_v3d_dev(struct drm_device *dev)
208 return container_of(dev, struct v3d_dev, drm);
212 v3d_has_csd(struct v3d_dev *v3d)
214 return v3d->ver >= 41;
217 #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
219 struct drm_v3d_file_private {
224 /* The per-fd struct, which tracks the MMU mappings. */
225 struct v3d_file_priv {
233 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
234 struct drm_v3d_file_private priv;
238 struct drm_gem_shmem_object base;
240 struct drm_mm_node node;
242 /* List entry for the BO's position in
243 * v3d_render_job->unref_list
245 struct list_head unref_head;
248 static inline struct v3d_bo *
249 to_v3d_bo(struct drm_gem_object *bo)
251 return (struct v3d_bo *)bo;
255 struct dma_fence base;
256 struct drm_device *dev;
257 /* v3d seqno for signaled() test */
259 enum v3d_queue queue;
262 static inline struct v3d_fence *
263 to_v3d_fence(struct dma_fence *fence)
265 return (struct v3d_fence *)fence;
268 #define V3D_READ(offset) readl(v3d->hub_regs + offset)
269 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
271 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
272 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
274 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
275 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
277 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
278 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
281 struct drm_sched_job base;
283 struct kref refcount;
287 /* This is the array of BOs that were looked up at the start
290 struct drm_gem_object **bo;
293 /* v3d fence to be signaled by IRQ handler when the job is complete. */
294 struct dma_fence *irq_fence;
296 /* scheduler fence for when the job is considered complete and
297 * the BO reservations can be released.
299 struct dma_fence *done_fence;
301 /* Pointer to a performance monitor object if the user requested it,
304 struct v3d_perfmon *perfmon;
306 /* PID of the process that submitted the job that could be used to
307 * for collecting stats by process of gpu usage.
311 /* Callback for the freeing of the job on refcount going to 0. */
312 void (*free)(struct kref *ref);
318 /* GPU virtual addresses of the start/end of the CL job. */
321 u32 timedout_ctca, timedout_ctra;
323 /* Corresponding render job, for attaching our overflow memory. */
324 struct v3d_render_job *render;
326 /* Submitted tile memory allocation start/size, tile state. */
330 struct v3d_render_job {
333 /* GPU virtual addresses of the start/end of the CL job. */
336 u32 timedout_ctca, timedout_ctra;
338 /* List of overflow BOs used in the job that need to be
339 * released once the job is complete.
341 struct list_head unref_list;
347 struct drm_v3d_submit_tfu args;
353 u32 timedout_batches;
355 struct drm_v3d_submit_csd args;
358 struct v3d_submit_outsync {
359 struct drm_syncobj *syncobj;
362 struct v3d_submit_ext {
370 struct v3d_submit_outsync *out_syncs;
374 * __wait_for - magic wait macro
376 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
377 * important that we check the condition again after having timed out, since the
378 * timeout could be due to preemption or similar and we've never had a chance to
379 * check the condition before the timeout.
381 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
382 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
383 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
387 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
389 /* Guarantee COND check prior to timeout */ \
396 ret__ = -ETIMEDOUT; \
399 usleep_range(wait__, wait__ * 2); \
400 if (wait__ < (Wmax)) \
406 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
408 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
410 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
412 /* nsecs_to_jiffies64() does not guard against overflow */
413 if ((NSEC_PER_SEC % HZ) != 0 &&
414 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
415 return MAX_JIFFY_OFFSET;
417 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
421 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
422 void v3d_free_object(struct drm_gem_object *gem_obj);
423 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
425 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *file_priv);
427 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
428 struct drm_file *file_priv);
429 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
430 struct drm_file *file_priv);
431 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
432 struct dma_buf_attachment *attach,
433 struct sg_table *sgt);
434 int v3d_drm_gem_prime_fd_to_handle(struct drm_device *dev,
435 struct drm_file *file_priv, int prime_fd,
439 void v3d_debugfs_init(struct drm_minor *minor);
442 extern const struct dma_fence_ops v3d_fence_ops;
443 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
446 int v3d_gem_init(struct drm_device *dev);
447 void v3d_gem_destroy(struct drm_device *dev);
448 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
449 struct drm_file *file_priv);
450 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
451 struct drm_file *file_priv);
452 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *file_priv);
454 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *file_priv);
456 void v3d_job_cleanup(struct v3d_job *job);
457 void v3d_job_put(struct v3d_job *job);
458 void v3d_reset(struct v3d_dev *v3d);
459 void v3d_invalidate_caches(struct v3d_dev *v3d);
460 void v3d_clean_caches(struct v3d_dev *v3d);
463 int v3d_irq_init(struct v3d_dev *v3d);
464 void v3d_irq_enable(struct v3d_dev *v3d);
465 void v3d_irq_disable(struct v3d_dev *v3d);
466 void v3d_irq_reset(struct v3d_dev *v3d);
469 int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
471 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
472 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
473 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
476 int v3d_sched_init(struct v3d_dev *v3d);
477 void v3d_sched_fini(struct v3d_dev *v3d);
478 void v3d_sched_stats_update(struct v3d_queue_stats *queue_stats);
481 void v3d_perfmon_get(struct v3d_perfmon *perfmon);
482 void v3d_perfmon_put(struct v3d_perfmon *perfmon);
483 void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
484 void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon,
486 struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id);
487 void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv);
488 void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv);
489 int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
490 struct drm_file *file_priv);
491 int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
492 struct drm_file *file_priv);
493 int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
494 struct drm_file *file_priv);