1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 Broadcom
8 #include <linux/delay.h>
9 #include <linux/refcount.h>
10 #include <linux/uaccess.h>
12 #include <drm/drm_atomic.h>
13 #include <drm/drm_debugfs.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_encoder.h>
16 #include <drm/drm_gem_cma_helper.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_mm.h>
19 #include <drm/drm_modeset_lock.h>
21 #include "uapi/drm/vc4_drm.h"
25 struct drm_gem_object;
27 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
30 enum vc4_kernel_bo_type {
31 /* Any kernel allocation (gem_create_object hook) before it
32 * gets another type set.
36 VC4_BO_TYPE_V3D_SHADER,
41 VC4_BO_TYPE_KERNEL_CACHE,
45 /* Performance monitor object. The perform lifetime is controlled by userspace
46 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
47 * request, and when this is the case, HW perf counters will be activated just
48 * before the submit_cl is submitted to the GPU and disabled when the job is
49 * done. This way, only events related to a specific job will be counted.
54 /* Tracks the number of users of the perfmon, when this counter reaches
55 * zero the perfmon is destroyed.
59 /* Number of counters activated in this perfmon instance
60 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
64 /* Events counted by the HW perf counters. */
65 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
67 /* Storage for counter values. Counters are incremented by the HW
68 * perf counter values every time the perfmon is attached to a GPU job.
69 * This way, perfmon users don't have to retrieve the results after
70 * each job if they want to track events covering several submissions.
71 * Note that counter values can't be reset, but you can fake a reset by
72 * destroying the perfmon and creating a new one.
78 struct drm_device base;
86 struct rpi_firmware *firmware;
90 struct vc4_fkms *fkms;
92 struct vc4_hang_state *hang_state;
94 /* The kernel-space BO cache. Tracks buffers that have been
95 * unreferenced by all other users (refcounts of 0!) but not
96 * yet freed, so we can do cheap allocations.
99 /* Array of list heads for entries in the BO cache,
100 * based on number of pages, so we can do O(1) lookups
101 * in the cache when allocating.
103 struct list_head *size_list;
104 uint32_t size_list_size;
106 /* List of all BOs in the cache, ordered by age, so we
107 * can do O(1) lookups when trying to free old
110 struct list_head time_list;
111 struct work_struct time_work;
112 struct timer_list time_timer;
122 /* Protects bo_cache and bo_labels. */
123 struct mutex bo_lock;
125 /* Purgeable BO pool. All BOs in this pool can have their memory
126 * reclaimed if the driver is unable to allocate new BOs. We also
127 * keep stats related to the purge mechanism here.
130 struct list_head list;
133 unsigned int purged_num;
138 uint64_t dma_fence_context;
140 /* Sequence number for the last job queued in bin_job_list.
141 * Starts at 0 (no jobs emitted).
145 /* Sequence number for the last completed job on the GPU.
146 * Starts at 0 (no jobs completed).
148 uint64_t finished_seqno;
150 /* List of all struct vc4_exec_info for jobs to be executed in
151 * the binner. The first job in the list is the one currently
152 * programmed into ct0ca for execution.
154 struct list_head bin_job_list;
156 /* List of all struct vc4_exec_info for jobs that have
157 * completed binning and are ready for rendering. The first
158 * job in the list is the one currently programmed into ct1ca
161 struct list_head render_job_list;
163 /* List of the finished vc4_exec_infos waiting to be freed by
166 struct list_head job_done_list;
167 /* Spinlock used to synchronize the job_list and seqno
168 * accesses between the IRQ handler and GEM ioctls.
171 wait_queue_head_t job_wait_queue;
172 struct work_struct job_done_work;
174 /* Used to track the active perfmon if any. Access to this field is
175 * protected by job_lock.
177 struct vc4_perfmon *active_perfmon;
179 /* List of struct vc4_seqno_cb for callbacks to be made from a
180 * workqueue when the given seqno is passed.
182 struct list_head seqno_cb_list;
184 /* The memory used for storing binner tile alloc, tile state,
185 * and overflow memory allocations. This is freed when V3D
188 struct vc4_bo *bin_bo;
190 /* Size of blocks allocated within bin_bo. */
191 uint32_t bin_alloc_size;
193 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
196 uint32_t bin_alloc_used;
198 /* Bitmask of the current bin_alloc used for overflow memory. */
199 uint32_t bin_alloc_overflow;
201 /* Incremented when an underrun error happened after an atomic commit.
202 * This is particularly useful to detect when a specific modeset is too
203 * demanding in term of memory or HVS bandwidth which is hard to guess
204 * at atomic check time.
208 struct work_struct overflow_mem_work;
212 /* Set to true when the load tracker is active. */
213 bool load_tracker_enabled;
215 /* Mutex controlling the power refcount. */
216 struct mutex power_lock;
219 struct timer_list timer;
220 struct work_struct reset_work;
223 struct drm_modeset_lock ctm_state_lock;
224 struct drm_private_obj ctm_manager;
225 struct drm_private_obj hvs_channels;
226 struct drm_private_obj load_tracker;
228 /* List of vc4_debugfs_info_entry for adding to debugfs once
229 * the minor is available (after drm_dev_register()).
231 struct list_head debugfs_list;
233 /* Mutex for binner bo allocation. */
234 struct mutex bin_bo_lock;
235 /* Reference count for our binner bo. */
236 struct kref bin_bo_kref;
239 static inline struct vc4_dev *
240 to_vc4_dev(struct drm_device *dev)
242 return container_of(dev, struct vc4_dev, base);
246 struct drm_gem_cma_object base;
248 /* seqno of the last job to render using this BO. */
251 /* seqno of the last job to use the RCL to write to this BO.
253 * Note that this doesn't include binner overflow memory
256 uint64_t write_seqno;
260 /* List entry for the BO's position in either
261 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
263 struct list_head unref_head;
265 /* Time in jiffies when the BO was put in vc4->bo_cache. */
266 unsigned long free_time;
268 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
269 struct list_head size_head;
271 /* Struct for shader validation state, if created by
272 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
274 struct vc4_validated_shader_info *validated_shader;
276 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
277 * for user-allocated labels.
281 /* Count the number of active users. This is needed to determine
282 * whether we can move the BO to the purgeable list or not (when the BO
283 * is used by the GPU or the display engine we can't purge it).
287 /* Store purgeable/purged state here */
289 struct mutex madv_lock;
292 static inline struct vc4_bo *
293 to_vc4_bo(struct drm_gem_object *bo)
295 return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
299 struct dma_fence base;
300 struct drm_device *dev;
301 /* vc4 seqno for signaled() test */
305 static inline struct vc4_fence *
306 to_vc4_fence(struct dma_fence *fence)
308 return container_of(fence, struct vc4_fence, base);
311 struct vc4_seqno_cb {
312 struct work_struct work;
314 void (*func)(struct vc4_seqno_cb *cb);
319 struct platform_device *pdev;
322 struct debugfs_regset32 regset;
327 struct platform_device *pdev;
331 struct clk *core_clk;
333 /* Memory manager for CRTCs to allocate space in the display
334 * list. Units are dwords.
336 struct drm_mm dlist_mm;
337 /* Memory manager for the LBM memory used by HVS scaling. */
338 struct drm_mm lbm_mm;
341 struct drm_mm_node mitchell_netravali_filter;
343 struct debugfs_regset32 regset;
346 * Even if HDMI0 on the RPi4 can output modes requiring a pixel
347 * rate higher than 297MHz, it needs some adjustments in the
348 * config.txt file to be able to do so and thus won't always be
351 bool vc5_hdmi_enable_scrambling;
354 * 4096x2160@60 requires a core overclock to work, so register
355 * whether that is sufficient.
357 bool vc5_hdmi_enable_4096by2160;
361 struct drm_plane base;
364 static inline struct vc4_plane *
365 to_vc4_plane(struct drm_plane *plane)
367 return container_of(plane, struct vc4_plane, base);
370 enum vc4_scaling_mode {
376 struct vc4_plane_state {
377 struct drm_plane_state base;
378 /* System memory copy of the display list for this element, computed
379 * at atomic_check time.
382 u32 dlist_size; /* Number of dwords allocated for the display list */
383 u32 dlist_count; /* Number of used dwords in the display list. */
385 /* Offset in the dlist to various words, for pageflip or
393 /* Offset where the plane's dlist was last stored in the
394 * hardware at vc4_crtc_atomic_flush() time.
396 u32 __iomem *hw_dlist;
398 /* Clipped coordinates of the plane on the display. */
399 int crtc_x, crtc_y, crtc_w, crtc_h;
400 /* Clipped area being scanned from in the FB in u16.16 format */
403 u32 src_w[2], src_h[2];
405 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
406 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
410 /* Offset to start scanning out from the start of the plane's
415 /* Our allocation in LBM for temporary storage during scaling. */
416 struct drm_mm_node lbm;
418 /* Set when the plane has per-pixel alpha content or does not cover
419 * the entire screen. This is a hint to the CRTC that it might need
420 * to enable background color fill.
424 /* Mark the dlist as initialized. Useful to avoid initializing it twice
425 * when async update is not possible.
427 bool dlist_initialized;
429 /* Load of this plane on the HVS block. The load is expressed in HVS
434 /* Memory bandwidth needed for this plane. This is expressed in
440 static inline struct vc4_plane_state *
441 to_vc4_plane_state(struct drm_plane_state *state)
443 return container_of(state, struct vc4_plane_state, base);
446 enum vc4_encoder_type {
447 VC4_ENCODER_TYPE_NONE,
448 VC4_ENCODER_TYPE_HDMI0,
449 VC4_ENCODER_TYPE_HDMI1,
450 VC4_ENCODER_TYPE_VEC,
451 VC4_ENCODER_TYPE_DSI0,
452 VC4_ENCODER_TYPE_DSI1,
453 VC4_ENCODER_TYPE_SMI,
454 VC4_ENCODER_TYPE_DPI,
458 struct drm_encoder base;
459 enum vc4_encoder_type type;
462 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
463 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
464 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
466 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
467 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
470 static inline struct vc4_encoder *
471 to_vc4_encoder(struct drm_encoder *encoder)
473 return container_of(encoder, struct vc4_encoder, base);
476 struct vc4_crtc_data {
477 const char *debugfs_name;
479 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */
480 unsigned int hvs_available_channels;
482 /* Which output of the HVS this pixelvalve sources from. */
487 struct vc4_crtc_data base;
489 /* Depth of the PixelValve FIFO in bytes */
490 unsigned int fifo_depth;
492 /* Number of pixels output per clock period */
495 enum vc4_encoder_type encoder_types[4];
498 struct vc5_gamma_entry {
503 #define VC5_HVS_SET_GAMMA_ENTRY(x, c, g) (struct vc5_gamma_entry){ \
504 .x_c_terms = VC4_SET_FIELD((x), SCALER5_DSPGAMMA_OFF_X) | \
505 VC4_SET_FIELD((c), SCALER5_DSPGAMMA_OFF_C), \
510 struct drm_crtc base;
511 struct platform_device *pdev;
512 const struct vc4_crtc_data *data;
515 /* Timestamp at start of vblank irq - unaffected by lock delays. */
519 struct { /* VC4 gamma LUT */
524 struct { /* VC5 gamma PWL entries */
525 struct vc5_gamma_entry pwl_r[SCALER5_DSPGAMMA_NUM_POINTS];
526 struct vc5_gamma_entry pwl_g[SCALER5_DSPGAMMA_NUM_POINTS];
527 struct vc5_gamma_entry pwl_b[SCALER5_DSPGAMMA_NUM_POINTS];
528 struct vc5_gamma_entry pwl_a[SCALER5_DSPGAMMA_NUM_POINTS];
532 struct drm_pending_vblank_event *event;
534 struct debugfs_regset32 regset;
537 * @feeds_txp: True if the CRTC feeds our writeback controller.
542 * @irq_lock: Spinlock protecting the resources shared between
543 * the atomic code and our vblank handler.
548 * @current_dlist: Start offset of the display list currently
549 * set in the HVS for that CRTC. Protected by @irq_lock, and
550 * copied in vc4_hvs_update_dlist() for the CRTC interrupt
551 * handler to have access to that value.
553 unsigned int current_dlist;
556 * @current_hvs_channel: HVS channel currently assigned to the
557 * CRTC. Protected by @irq_lock, and copied in
558 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
559 * access to that value.
561 unsigned int current_hvs_channel;
564 static inline struct vc4_crtc *
565 to_vc4_crtc(struct drm_crtc *crtc)
567 return container_of(crtc, struct vc4_crtc, base);
570 static inline const struct vc4_crtc_data *
571 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc)
576 static inline const struct vc4_pv_data *
577 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
579 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc);
581 return container_of(data, struct vc4_pv_data, base);
584 struct drm_connector *vc4_get_crtc_connector(struct drm_crtc *crtc,
585 struct drm_crtc_state *state);
587 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
588 struct drm_crtc_state *state);
590 struct vc4_crtc_state {
591 struct drm_crtc_state base;
592 /* Dlist area for this CRTC configuration. */
593 struct drm_mm_node mm;
595 unsigned int assigned_channel;
597 struct drm_connector_tv_margins margins;
599 unsigned long hvs_load;
601 /* Transitional state below, only valid during atomic commits */
605 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
607 static inline struct vc4_crtc_state *
608 to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
610 return container_of(crtc_state, struct vc4_crtc_state, base);
613 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
614 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
615 #define HVS_READ(offset) readl(hvs->regs + offset)
616 #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset)
618 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
620 struct vc4_exec_info {
623 /* Sequence number for this bin/render job. */
626 /* Latest write_seqno of any BO that binning depends on. */
627 uint64_t bin_dep_seqno;
629 struct dma_fence *fence;
631 /* Last current addresses the hardware was processing when the
632 * hangcheck timer checked on us.
634 uint32_t last_ct0ca, last_ct1ca;
636 /* Kernel-space copy of the ioctl arguments */
637 struct drm_vc4_submit_cl *args;
639 /* This is the array of BOs that were looked up at the start of exec.
640 * Command validation will use indices into this array.
642 struct drm_gem_cma_object **bo;
645 /* List of BOs that are being written by the RCL. Other than
646 * the binner temporary storage, this is all the BOs written
649 struct drm_gem_cma_object *rcl_write_bo[4];
650 uint32_t rcl_write_bo_count;
652 /* Pointers for our position in vc4->job_list */
653 struct list_head head;
655 /* List of other BOs used in the job that need to be released
656 * once the job is complete.
658 struct list_head unref_list;
660 /* Current unvalidated indices into @bo loaded by the non-hardware
661 * VC4_PACKET_GEM_HANDLES.
663 uint32_t bo_index[2];
665 /* This is the BO where we store the validated command lists, shader
666 * records, and uniforms.
668 struct drm_gem_cma_object *exec_bo;
671 * This tracks the per-shader-record state (packet 64) that
672 * determines the length of the shader record and the offset
673 * it's expected to be found at. It gets read in from the
676 struct vc4_shader_state {
678 /* Maximum vertex index referenced by any primitive using this
684 /** How many shader states the user declared they were using. */
685 uint32_t shader_state_size;
686 /** How many shader state records the validator has seen. */
687 uint32_t shader_state_count;
689 bool found_tile_binning_mode_config_packet;
690 bool found_start_tile_binning_packet;
691 bool found_increment_semaphore_packet;
693 uint8_t bin_tiles_x, bin_tiles_y;
694 /* Physical address of the start of the tile alloc array
695 * (where each tile's binned CL will start)
697 uint32_t tile_alloc_offset;
698 /* Bitmask of which binner slots are freed when this job completes. */
702 * Computed addresses pointing into exec_bo where we start the
703 * bin thread (ct0) and render thread (ct1).
705 uint32_t ct0ca, ct0ea;
706 uint32_t ct1ca, ct1ea;
708 /* Pointer to the unvalidated bin CL (if present). */
711 /* Pointers to the shader recs. These paddr gets incremented as CL
712 * packets are relocated in validate_gl_shader_state, and the vaddrs
713 * (u and v) get incremented and size decremented as the shader recs
714 * themselves are validated.
718 uint32_t shader_rec_p;
719 uint32_t shader_rec_size;
721 /* Pointers to the uniform data. These pointers are incremented, and
722 * size decremented, as each batch of uniforms is uploaded.
727 uint32_t uniforms_size;
729 /* Pointer to a performance monitor object if the user requested it,
732 struct vc4_perfmon *perfmon;
734 /* Whether the exec has taken a reference to the binner BO, which should
735 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
740 /* Per-open file private data. Any driver-specific resource that has to be
741 * released when the DRM file is closed should be placed here.
754 static inline struct vc4_exec_info *
755 vc4_first_bin_job(struct vc4_dev *vc4)
757 return list_first_entry_or_null(&vc4->bin_job_list,
758 struct vc4_exec_info, head);
761 static inline struct vc4_exec_info *
762 vc4_first_render_job(struct vc4_dev *vc4)
764 return list_first_entry_or_null(&vc4->render_job_list,
765 struct vc4_exec_info, head);
768 static inline struct vc4_exec_info *
769 vc4_last_render_job(struct vc4_dev *vc4)
771 if (list_empty(&vc4->render_job_list))
773 return list_last_entry(&vc4->render_job_list,
774 struct vc4_exec_info, head);
778 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
781 * This will be used at draw time to relocate the reference to the texture
782 * contents in p0, and validate that the offset combined with
783 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
784 * Note that the hardware treats unprovided config parameters as 0, so not all
785 * of them need to be set up for every texure sample, and we'll store ~0 as
786 * the offset to mark the unused ones.
788 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
789 * Setup") for definitions of the texture parameters.
791 struct vc4_texture_sample_info {
793 uint32_t p_offset[4];
797 * struct vc4_validated_shader_info - information about validated shaders that
798 * needs to be used from command list validation.
800 * For a given shader, each time a shader state record references it, we need
801 * to verify that the shader doesn't read more uniforms than the shader state
802 * record's uniform BO pointer can provide, and we need to apply relocations
803 * and validate the shader state record's uniforms that define the texture
806 struct vc4_validated_shader_info {
807 uint32_t uniforms_size;
808 uint32_t uniforms_src_size;
809 uint32_t num_texture_samples;
810 struct vc4_texture_sample_info *texture_samples;
812 uint32_t num_uniform_addr_offsets;
813 uint32_t *uniform_addr_offsets;
819 * __wait_for - magic wait macro
821 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
822 * important that we check the condition again after having timed out, since the
823 * timeout could be due to preemption or similar and we've never had a chance to
824 * check the condition before the timeout.
826 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
827 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
828 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
832 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
834 /* Guarantee COND check prior to timeout */ \
841 ret__ = -ETIMEDOUT; \
844 usleep_range(wait__, wait__ * 2); \
845 if (wait__ < (Wmax)) \
851 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
853 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
856 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
857 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
858 bool from_cache, enum vc4_kernel_bo_type type);
859 int vc4_bo_dumb_create(struct drm_file *file_priv,
860 struct drm_device *dev,
861 struct drm_mode_create_dumb *args);
862 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
863 struct drm_file *file_priv);
864 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
865 struct drm_file *file_priv);
866 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
867 struct drm_file *file_priv);
868 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
869 struct drm_file *file_priv);
870 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv);
872 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
873 struct drm_file *file_priv);
874 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv);
876 int vc4_bo_cache_init(struct drm_device *dev);
877 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
878 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
879 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
880 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
881 int vc4_bo_debugfs_init(struct drm_minor *minor);
884 extern struct platform_driver vc4_crtc_driver;
885 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
886 int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
887 const struct drm_crtc_funcs *crtc_funcs,
888 const struct drm_crtc_helper_funcs *crtc_helper_funcs);
889 int vc4_page_flip(struct drm_crtc *crtc,
890 struct drm_framebuffer *fb,
891 struct drm_pending_vblank_event *event,
893 struct drm_modeset_acquire_ctx *ctx);
894 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
895 void vc4_crtc_destroy_state(struct drm_crtc *crtc,
896 struct drm_crtc_state *state);
897 void vc4_crtc_reset(struct drm_crtc *crtc);
898 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
899 int vc4_crtc_late_register(struct drm_crtc *crtc);
900 void vc4_crtc_send_vblank(struct drm_crtc *crtc);
901 void vc4_crtc_get_margins(struct drm_crtc_state *state,
902 unsigned int *left, unsigned int *right,
903 unsigned int *top, unsigned int *bottom);
906 void vc4_debugfs_init(struct drm_minor *minor);
907 #ifdef CONFIG_DEBUG_FS
908 int vc4_debugfs_add_file(struct drm_minor *minor,
909 const char *filename,
910 int (*show)(struct seq_file*, void*),
912 int vc4_debugfs_add_regset32(struct drm_minor *minor,
913 const char *filename,
914 struct debugfs_regset32 *regset);
916 static inline int vc4_debugfs_add_file(struct drm_minor *minor,
917 const char *filename,
918 int (*show)(struct seq_file*, void*),
924 static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
925 const char *filename,
926 struct debugfs_regset32 *regset)
933 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
934 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
937 extern struct platform_driver vc4_dpi_driver;
940 extern struct platform_driver vc4_dsi_driver;
943 extern const struct dma_fence_ops vc4_fence_ops;
945 /* vc4_firmware_kms.c */
946 extern struct platform_driver vc4_firmware_kms_driver;
949 int vc4_gem_init(struct drm_device *dev);
950 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
951 struct drm_file *file_priv);
952 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv);
954 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file_priv);
956 void vc4_submit_next_bin_job(struct drm_device *dev);
957 void vc4_submit_next_render_job(struct drm_device *dev);
958 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
959 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
960 uint64_t timeout_ns, bool interruptible);
961 void vc4_job_handle_completed(struct vc4_dev *vc4);
962 int vc4_queue_seqno_cb(struct drm_device *dev,
963 struct vc4_seqno_cb *cb, uint64_t seqno,
964 void (*func)(struct vc4_seqno_cb *cb));
965 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
966 struct drm_file *file_priv);
969 extern struct platform_driver vc4_hdmi_driver;
972 extern struct platform_driver vc4_vec_driver;
975 extern struct platform_driver vc4_txp_driver;
978 void vc4_irq_enable(struct drm_device *dev);
979 void vc4_irq_disable(struct drm_device *dev);
980 int vc4_irq_install(struct drm_device *dev, int irq);
981 void vc4_irq_uninstall(struct drm_device *dev);
982 void vc4_irq_reset(struct drm_device *dev);
985 extern struct platform_driver vc4_hvs_driver;
986 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
987 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
988 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
989 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
990 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
991 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
992 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
993 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
994 void vc4_hvs_dump_state(struct vc4_hvs *hvs);
995 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
996 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
997 int vc4_hvs_debugfs_init(struct drm_minor *minor);
1000 int vc4_kms_load(struct drm_device *dev);
1003 struct drm_plane *vc4_plane_init(struct drm_device *dev,
1004 enum drm_plane_type type,
1005 uint32_t possible_crtcs);
1006 int vc4_plane_create_additional_planes(struct drm_device *dev);
1007 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
1008 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
1009 void vc4_plane_async_set_fb(struct drm_plane *plane,
1010 struct drm_framebuffer *fb);
1013 extern struct platform_driver vc4_v3d_driver;
1014 extern const struct of_device_id vc4_v3d_dt_match[];
1015 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
1016 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
1017 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
1018 int vc4_v3d_pm_get(struct vc4_dev *vc4);
1019 void vc4_v3d_pm_put(struct vc4_dev *vc4);
1020 int vc4_v3d_debugfs_init(struct drm_minor *minor);
1022 /* vc4_validate.c */
1024 vc4_validate_bin_cl(struct drm_device *dev,
1027 struct vc4_exec_info *exec);
1030 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1032 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
1035 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1037 bool vc4_check_tex_size(struct vc4_exec_info *exec,
1038 struct drm_gem_cma_object *fbo,
1039 uint32_t offset, uint8_t tiling_format,
1040 uint32_t width, uint32_t height, uint8_t cpp);
1042 /* vc4_validate_shader.c */
1043 struct vc4_validated_shader_info *
1044 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
1047 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
1048 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
1049 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
1050 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
1052 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
1053 void vc4_perfmon_open_file(struct vc4_file *vc4file);
1054 void vc4_perfmon_close_file(struct vc4_file *vc4file);
1055 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv);
1057 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
1058 struct drm_file *file_priv);
1059 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
1060 struct drm_file *file_priv);
1062 #endif /* _VC4_DRV_H_ */