1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 Broadcom
8 #include <linux/delay.h>
10 #include <linux/refcount.h>
11 #include <linux/uaccess.h>
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_debugfs.h>
15 #include <drm/drm_device.h>
16 #include <drm/drm_encoder.h>
17 #include <drm/drm_gem_cma_helper.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_mm.h>
20 #include <drm/drm_modeset_lock.h>
22 #include "uapi/drm/vc4_drm.h"
25 struct drm_gem_object;
27 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
30 enum vc4_kernel_bo_type {
31 /* Any kernel allocation (gem_create_object hook) before it
32 * gets another type set.
36 VC4_BO_TYPE_V3D_SHADER,
41 VC4_BO_TYPE_KERNEL_CACHE,
45 /* Performance monitor object. The perform lifetime is controlled by userspace
46 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
47 * request, and when this is the case, HW perf counters will be activated just
48 * before the submit_cl is submitted to the GPU and disabled when the job is
49 * done. This way, only events related to a specific job will be counted.
54 /* Tracks the number of users of the perfmon, when this counter reaches
55 * zero the perfmon is destroyed.
59 /* Number of counters activated in this perfmon instance
60 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
64 /* Events counted by the HW perf counters. */
65 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
67 /* Storage for counter values. Counters are incremented by the HW
68 * perf counter values every time the perfmon is attached to a GPU job.
69 * This way, perfmon users don't have to retrieve the results after
70 * each job if they want to track events covering several submissions.
71 * Note that counter values can't be reset, but you can fake a reset by
72 * destroying the perfmon and creating a new one.
78 struct drm_device base;
91 struct vc4_hang_state *hang_state;
93 /* The kernel-space BO cache. Tracks buffers that have been
94 * unreferenced by all other users (refcounts of 0!) but not
95 * yet freed, so we can do cheap allocations.
98 /* Array of list heads for entries in the BO cache,
99 * based on number of pages, so we can do O(1) lookups
100 * in the cache when allocating.
102 struct list_head *size_list;
103 uint32_t size_list_size;
105 /* List of all BOs in the cache, ordered by age, so we
106 * can do O(1) lookups when trying to free old
109 struct list_head time_list;
110 struct work_struct time_work;
111 struct timer_list time_timer;
121 /* Protects bo_cache and bo_labels. */
122 struct mutex bo_lock;
124 /* Purgeable BO pool. All BOs in this pool can have their memory
125 * reclaimed if the driver is unable to allocate new BOs. We also
126 * keep stats related to the purge mechanism here.
129 struct list_head list;
132 unsigned int purged_num;
137 uint64_t dma_fence_context;
139 /* Sequence number for the last job queued in bin_job_list.
140 * Starts at 0 (no jobs emitted).
144 /* Sequence number for the last completed job on the GPU.
145 * Starts at 0 (no jobs completed).
147 uint64_t finished_seqno;
149 /* List of all struct vc4_exec_info for jobs to be executed in
150 * the binner. The first job in the list is the one currently
151 * programmed into ct0ca for execution.
153 struct list_head bin_job_list;
155 /* List of all struct vc4_exec_info for jobs that have
156 * completed binning and are ready for rendering. The first
157 * job in the list is the one currently programmed into ct1ca
160 struct list_head render_job_list;
162 /* List of the finished vc4_exec_infos waiting to be freed by
165 struct list_head job_done_list;
166 /* Spinlock used to synchronize the job_list and seqno
167 * accesses between the IRQ handler and GEM ioctls.
170 wait_queue_head_t job_wait_queue;
171 struct work_struct job_done_work;
173 /* Used to track the active perfmon if any. Access to this field is
174 * protected by job_lock.
176 struct vc4_perfmon *active_perfmon;
178 /* List of struct vc4_seqno_cb for callbacks to be made from a
179 * workqueue when the given seqno is passed.
181 struct list_head seqno_cb_list;
183 /* The memory used for storing binner tile alloc, tile state,
184 * and overflow memory allocations. This is freed when V3D
187 struct vc4_bo *bin_bo;
189 /* Size of blocks allocated within bin_bo. */
190 uint32_t bin_alloc_size;
192 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
195 uint32_t bin_alloc_used;
197 /* Bitmask of the current bin_alloc used for overflow memory. */
198 uint32_t bin_alloc_overflow;
200 /* Incremented when an underrun error happened after an atomic commit.
201 * This is particularly useful to detect when a specific modeset is too
202 * demanding in term of memory or HVS bandwidth which is hard to guess
203 * at atomic check time.
207 struct work_struct overflow_mem_work;
211 /* Set to true when the load tracker is active. */
212 bool load_tracker_enabled;
214 /* Mutex controlling the power refcount. */
215 struct mutex power_lock;
218 struct timer_list timer;
219 struct work_struct reset_work;
222 struct drm_modeset_lock ctm_state_lock;
223 struct drm_private_obj ctm_manager;
224 struct drm_private_obj hvs_channels;
225 struct drm_private_obj load_tracker;
227 /* List of vc4_debugfs_info_entry for adding to debugfs once
228 * the minor is available (after drm_dev_register()).
230 struct list_head debugfs_list;
232 /* Mutex for binner bo allocation. */
233 struct mutex bin_bo_lock;
234 /* Reference count for our binner bo. */
235 struct kref bin_bo_kref;
238 static inline struct vc4_dev *
239 to_vc4_dev(struct drm_device *dev)
241 return container_of(dev, struct vc4_dev, base);
245 struct drm_gem_cma_object base;
247 /* seqno of the last job to render using this BO. */
250 /* seqno of the last job to use the RCL to write to this BO.
252 * Note that this doesn't include binner overflow memory
255 uint64_t write_seqno;
259 /* List entry for the BO's position in either
260 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
262 struct list_head unref_head;
264 /* Time in jiffies when the BO was put in vc4->bo_cache. */
265 unsigned long free_time;
267 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
268 struct list_head size_head;
270 /* Struct for shader validation state, if created by
271 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
273 struct vc4_validated_shader_info *validated_shader;
275 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
276 * for user-allocated labels.
280 /* Count the number of active users. This is needed to determine
281 * whether we can move the BO to the purgeable list or not (when the BO
282 * is used by the GPU or the display engine we can't purge it).
286 /* Store purgeable/purged state here */
288 struct mutex madv_lock;
291 static inline struct vc4_bo *
292 to_vc4_bo(struct drm_gem_object *bo)
294 return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
298 struct dma_fence base;
299 struct drm_device *dev;
300 /* vc4 seqno for signaled() test */
304 static inline struct vc4_fence *
305 to_vc4_fence(struct dma_fence *fence)
307 return container_of(fence, struct vc4_fence, base);
310 struct vc4_seqno_cb {
311 struct work_struct work;
313 void (*func)(struct vc4_seqno_cb *cb);
318 struct platform_device *pdev;
321 struct debugfs_regset32 regset;
326 struct platform_device *pdev;
330 struct clk *core_clk;
332 /* Memory manager for CRTCs to allocate space in the display
333 * list. Units are dwords.
335 struct drm_mm dlist_mm;
336 /* Memory manager for the LBM memory used by HVS scaling. */
337 struct drm_mm lbm_mm;
340 struct drm_mm_node mitchell_netravali_filter;
342 struct debugfs_regset32 regset;
346 struct drm_plane base;
349 static inline struct vc4_plane *
350 to_vc4_plane(struct drm_plane *plane)
352 return container_of(plane, struct vc4_plane, base);
355 enum vc4_scaling_mode {
361 struct vc4_plane_state {
362 struct drm_plane_state base;
363 /* System memory copy of the display list for this element, computed
364 * at atomic_check time.
367 u32 dlist_size; /* Number of dwords allocated for the display list */
368 u32 dlist_count; /* Number of used dwords in the display list. */
370 /* Offset in the dlist to various words, for pageflip or
378 /* Offset where the plane's dlist was last stored in the
379 * hardware at vc4_crtc_atomic_flush() time.
381 u32 __iomem *hw_dlist;
383 /* Clipped coordinates of the plane on the display. */
384 int crtc_x, crtc_y, crtc_w, crtc_h;
385 /* Clipped area being scanned from in the FB. */
388 u32 src_w[2], src_h[2];
390 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
391 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
395 /* Offset to start scanning out from the start of the plane's
400 /* Our allocation in LBM for temporary storage during scaling. */
401 struct drm_mm_node lbm;
403 /* Set when the plane has per-pixel alpha content or does not cover
404 * the entire screen. This is a hint to the CRTC that it might need
405 * to enable background color fill.
409 /* Mark the dlist as initialized. Useful to avoid initializing it twice
410 * when async update is not possible.
412 bool dlist_initialized;
414 /* Load of this plane on the HVS block. The load is expressed in HVS
419 /* Memory bandwidth needed for this plane. This is expressed in
425 static inline struct vc4_plane_state *
426 to_vc4_plane_state(struct drm_plane_state *state)
428 return container_of(state, struct vc4_plane_state, base);
431 enum vc4_encoder_type {
432 VC4_ENCODER_TYPE_NONE,
433 VC4_ENCODER_TYPE_HDMI0,
434 VC4_ENCODER_TYPE_HDMI1,
435 VC4_ENCODER_TYPE_VEC,
436 VC4_ENCODER_TYPE_DSI0,
437 VC4_ENCODER_TYPE_DSI1,
438 VC4_ENCODER_TYPE_SMI,
439 VC4_ENCODER_TYPE_DPI,
443 struct drm_encoder base;
444 enum vc4_encoder_type type;
447 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
448 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
449 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
451 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
452 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
455 static inline struct vc4_encoder *
456 to_vc4_encoder(struct drm_encoder *encoder)
458 return container_of(encoder, struct vc4_encoder, base);
461 struct vc4_crtc_data {
462 const char *debugfs_name;
464 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */
465 unsigned int hvs_available_channels;
467 /* Which output of the HVS this pixelvalve sources from. */
472 struct vc4_crtc_data base;
474 /* Depth of the PixelValve FIFO in bytes */
475 unsigned int fifo_depth;
477 /* Number of pixels output per clock period */
480 enum vc4_encoder_type encoder_types[4];
484 struct drm_crtc base;
485 struct platform_device *pdev;
486 const struct vc4_crtc_data *data;
489 /* Timestamp at start of vblank irq - unaffected by lock delays. */
496 struct drm_pending_vblank_event *event;
498 struct debugfs_regset32 regset;
501 * @feeds_txp: True if the CRTC feeds our writeback controller.
506 * @irq_lock: Spinlock protecting the resources shared between
507 * the atomic code and our vblank handler.
512 * @current_dlist: Start offset of the display list currently
513 * set in the HVS for that CRTC. Protected by @irq_lock, and
514 * copied in vc4_hvs_update_dlist() for the CRTC interrupt
515 * handler to have access to that value.
517 unsigned int current_dlist;
520 * @current_hvs_channel: HVS channel currently assigned to the
521 * CRTC. Protected by @irq_lock, and copied in
522 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
523 * access to that value.
525 unsigned int current_hvs_channel;
528 static inline struct vc4_crtc *
529 to_vc4_crtc(struct drm_crtc *crtc)
531 return container_of(crtc, struct vc4_crtc, base);
534 static inline const struct vc4_crtc_data *
535 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc)
540 static inline const struct vc4_pv_data *
541 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
543 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc);
545 return container_of(data, struct vc4_pv_data, base);
548 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
549 struct drm_crtc_state *state);
551 struct vc4_crtc_state {
552 struct drm_crtc_state base;
553 /* Dlist area for this CRTC configuration. */
554 struct drm_mm_node mm;
556 unsigned int assigned_channel;
565 unsigned long hvs_load;
567 /* Transitional state below, only valid during atomic commits */
571 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
573 static inline struct vc4_crtc_state *
574 to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
576 return container_of(crtc_state, struct vc4_crtc_state, base);
579 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
580 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
581 #define HVS_READ(offset) readl(hvs->regs + offset)
582 #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset)
584 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
586 struct vc4_exec_info {
589 /* Sequence number for this bin/render job. */
592 /* Latest write_seqno of any BO that binning depends on. */
593 uint64_t bin_dep_seqno;
595 struct dma_fence *fence;
597 /* Last current addresses the hardware was processing when the
598 * hangcheck timer checked on us.
600 uint32_t last_ct0ca, last_ct1ca;
602 /* Kernel-space copy of the ioctl arguments */
603 struct drm_vc4_submit_cl *args;
605 /* This is the array of BOs that were looked up at the start of exec.
606 * Command validation will use indices into this array.
608 struct drm_gem_cma_object **bo;
611 /* List of BOs that are being written by the RCL. Other than
612 * the binner temporary storage, this is all the BOs written
615 struct drm_gem_cma_object *rcl_write_bo[4];
616 uint32_t rcl_write_bo_count;
618 /* Pointers for our position in vc4->job_list */
619 struct list_head head;
621 /* List of other BOs used in the job that need to be released
622 * once the job is complete.
624 struct list_head unref_list;
626 /* Current unvalidated indices into @bo loaded by the non-hardware
627 * VC4_PACKET_GEM_HANDLES.
629 uint32_t bo_index[2];
631 /* This is the BO where we store the validated command lists, shader
632 * records, and uniforms.
634 struct drm_gem_cma_object *exec_bo;
637 * This tracks the per-shader-record state (packet 64) that
638 * determines the length of the shader record and the offset
639 * it's expected to be found at. It gets read in from the
642 struct vc4_shader_state {
644 /* Maximum vertex index referenced by any primitive using this
650 /** How many shader states the user declared they were using. */
651 uint32_t shader_state_size;
652 /** How many shader state records the validator has seen. */
653 uint32_t shader_state_count;
655 bool found_tile_binning_mode_config_packet;
656 bool found_start_tile_binning_packet;
657 bool found_increment_semaphore_packet;
659 uint8_t bin_tiles_x, bin_tiles_y;
660 /* Physical address of the start of the tile alloc array
661 * (where each tile's binned CL will start)
663 uint32_t tile_alloc_offset;
664 /* Bitmask of which binner slots are freed when this job completes. */
668 * Computed addresses pointing into exec_bo where we start the
669 * bin thread (ct0) and render thread (ct1).
671 uint32_t ct0ca, ct0ea;
672 uint32_t ct1ca, ct1ea;
674 /* Pointer to the unvalidated bin CL (if present). */
677 /* Pointers to the shader recs. These paddr gets incremented as CL
678 * packets are relocated in validate_gl_shader_state, and the vaddrs
679 * (u and v) get incremented and size decremented as the shader recs
680 * themselves are validated.
684 uint32_t shader_rec_p;
685 uint32_t shader_rec_size;
687 /* Pointers to the uniform data. These pointers are incremented, and
688 * size decremented, as each batch of uniforms is uploaded.
693 uint32_t uniforms_size;
695 /* Pointer to a performance monitor object if the user requested it,
698 struct vc4_perfmon *perfmon;
700 /* Whether the exec has taken a reference to the binner BO, which should
701 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
706 /* Per-open file private data. Any driver-specific resource that has to be
707 * released when the DRM file is closed should be placed here.
720 static inline struct vc4_exec_info *
721 vc4_first_bin_job(struct vc4_dev *vc4)
723 return list_first_entry_or_null(&vc4->bin_job_list,
724 struct vc4_exec_info, head);
727 static inline struct vc4_exec_info *
728 vc4_first_render_job(struct vc4_dev *vc4)
730 return list_first_entry_or_null(&vc4->render_job_list,
731 struct vc4_exec_info, head);
734 static inline struct vc4_exec_info *
735 vc4_last_render_job(struct vc4_dev *vc4)
737 if (list_empty(&vc4->render_job_list))
739 return list_last_entry(&vc4->render_job_list,
740 struct vc4_exec_info, head);
744 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
747 * This will be used at draw time to relocate the reference to the texture
748 * contents in p0, and validate that the offset combined with
749 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
750 * Note that the hardware treats unprovided config parameters as 0, so not all
751 * of them need to be set up for every texure sample, and we'll store ~0 as
752 * the offset to mark the unused ones.
754 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
755 * Setup") for definitions of the texture parameters.
757 struct vc4_texture_sample_info {
759 uint32_t p_offset[4];
763 * struct vc4_validated_shader_info - information about validated shaders that
764 * needs to be used from command list validation.
766 * For a given shader, each time a shader state record references it, we need
767 * to verify that the shader doesn't read more uniforms than the shader state
768 * record's uniform BO pointer can provide, and we need to apply relocations
769 * and validate the shader state record's uniforms that define the texture
772 struct vc4_validated_shader_info {
773 uint32_t uniforms_size;
774 uint32_t uniforms_src_size;
775 uint32_t num_texture_samples;
776 struct vc4_texture_sample_info *texture_samples;
778 uint32_t num_uniform_addr_offsets;
779 uint32_t *uniform_addr_offsets;
785 * __wait_for - magic wait macro
787 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
788 * important that we check the condition again after having timed out, since the
789 * timeout could be due to preemption or similar and we've never had a chance to
790 * check the condition before the timeout.
792 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
793 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
794 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
798 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
800 /* Guarantee COND check prior to timeout */ \
807 ret__ = -ETIMEDOUT; \
810 usleep_range(wait__, wait__ * 2); \
811 if (wait__ < (Wmax)) \
817 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
819 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
822 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
823 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
824 bool from_cache, enum vc4_kernel_bo_type type);
825 int vc4_bo_dumb_create(struct drm_file *file_priv,
826 struct drm_device *dev,
827 struct drm_mode_create_dumb *args);
828 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
829 struct drm_file *file_priv);
830 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv);
832 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
833 struct drm_file *file_priv);
834 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
835 struct drm_file *file_priv);
836 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
837 struct drm_file *file_priv);
838 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
839 struct drm_file *file_priv);
840 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
841 struct drm_file *file_priv);
842 int vc4_bo_cache_init(struct drm_device *dev);
843 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
844 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
845 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
846 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
849 extern struct platform_driver vc4_crtc_driver;
850 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
851 int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
852 const struct drm_crtc_funcs *crtc_funcs,
853 const struct drm_crtc_helper_funcs *crtc_helper_funcs);
854 void vc4_crtc_destroy(struct drm_crtc *crtc);
855 int vc4_page_flip(struct drm_crtc *crtc,
856 struct drm_framebuffer *fb,
857 struct drm_pending_vblank_event *event,
859 struct drm_modeset_acquire_ctx *ctx);
860 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
861 void vc4_crtc_destroy_state(struct drm_crtc *crtc,
862 struct drm_crtc_state *state);
863 void vc4_crtc_reset(struct drm_crtc *crtc);
864 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
865 void vc4_crtc_send_vblank(struct drm_crtc *crtc);
866 void vc4_crtc_get_margins(struct drm_crtc_state *state,
867 unsigned int *left, unsigned int *right,
868 unsigned int *top, unsigned int *bottom);
871 void vc4_debugfs_init(struct drm_minor *minor);
872 #ifdef CONFIG_DEBUG_FS
873 void vc4_debugfs_add_file(struct drm_device *drm,
874 const char *filename,
875 int (*show)(struct seq_file*, void*),
877 void vc4_debugfs_add_regset32(struct drm_device *drm,
878 const char *filename,
879 struct debugfs_regset32 *regset);
881 static inline void vc4_debugfs_add_file(struct drm_device *drm,
882 const char *filename,
883 int (*show)(struct seq_file*, void*),
888 static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
889 const char *filename,
890 struct debugfs_regset32 *regset)
896 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
897 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
900 extern struct platform_driver vc4_dpi_driver;
903 extern struct platform_driver vc4_dsi_driver;
906 extern const struct dma_fence_ops vc4_fence_ops;
909 int vc4_gem_init(struct drm_device *dev);
910 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
911 struct drm_file *file_priv);
912 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
913 struct drm_file *file_priv);
914 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
915 struct drm_file *file_priv);
916 void vc4_submit_next_bin_job(struct drm_device *dev);
917 void vc4_submit_next_render_job(struct drm_device *dev);
918 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
919 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
920 uint64_t timeout_ns, bool interruptible);
921 void vc4_job_handle_completed(struct vc4_dev *vc4);
922 int vc4_queue_seqno_cb(struct drm_device *dev,
923 struct vc4_seqno_cb *cb, uint64_t seqno,
924 void (*func)(struct vc4_seqno_cb *cb));
925 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
926 struct drm_file *file_priv);
929 extern struct platform_driver vc4_hdmi_driver;
932 extern struct platform_driver vc4_vec_driver;
935 extern struct platform_driver vc4_txp_driver;
938 void vc4_irq_enable(struct drm_device *dev);
939 void vc4_irq_disable(struct drm_device *dev);
940 int vc4_irq_install(struct drm_device *dev, int irq);
941 void vc4_irq_uninstall(struct drm_device *dev);
942 void vc4_irq_reset(struct drm_device *dev);
945 extern struct platform_driver vc4_hvs_driver;
946 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
947 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
948 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
949 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
950 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
951 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
952 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
953 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
954 void vc4_hvs_dump_state(struct vc4_hvs *hvs);
955 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
956 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
959 int vc4_kms_load(struct drm_device *dev);
962 struct drm_plane *vc4_plane_init(struct drm_device *dev,
963 enum drm_plane_type type,
964 uint32_t possible_crtcs);
965 int vc4_plane_create_additional_planes(struct drm_device *dev);
966 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
967 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
968 void vc4_plane_async_set_fb(struct drm_plane *plane,
969 struct drm_framebuffer *fb);
972 extern struct platform_driver vc4_v3d_driver;
973 extern const struct of_device_id vc4_v3d_dt_match[];
974 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
975 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
976 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
977 int vc4_v3d_pm_get(struct vc4_dev *vc4);
978 void vc4_v3d_pm_put(struct vc4_dev *vc4);
982 vc4_validate_bin_cl(struct drm_device *dev,
985 struct vc4_exec_info *exec);
988 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
990 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
993 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
995 bool vc4_check_tex_size(struct vc4_exec_info *exec,
996 struct drm_gem_cma_object *fbo,
997 uint32_t offset, uint8_t tiling_format,
998 uint32_t width, uint32_t height, uint8_t cpp);
1000 /* vc4_validate_shader.c */
1001 struct vc4_validated_shader_info *
1002 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
1005 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
1006 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
1007 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
1008 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
1010 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
1011 void vc4_perfmon_open_file(struct vc4_file *vc4file);
1012 void vc4_perfmon_close_file(struct vc4_file *vc4file);
1013 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *file_priv);
1015 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
1016 struct drm_file *file_priv);
1017 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
1018 struct drm_file *file_priv);
1020 #endif /* _VC4_DRV_H_ */