1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
14 #include <linux/clk.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_gem_framebuffer_helper.h>
20 #include <drm/drm_plane_helper.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_vblank.h>
27 #define HVS_NUM_CHANNELS 3
29 struct vc4_ctm_state {
30 struct drm_private_state base;
31 struct drm_color_ctm *ctm;
35 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
37 return container_of(priv, struct vc4_ctm_state, base);
40 struct vc4_hvs_state {
41 struct drm_private_state base;
42 unsigned long core_clock_rate;
46 unsigned long fifo_load;
47 struct drm_crtc_commit *pending_commit;
48 } fifo_state[HVS_NUM_CHANNELS];
51 static struct vc4_hvs_state *
52 to_vc4_hvs_state(struct drm_private_state *priv)
54 return container_of(priv, struct vc4_hvs_state, base);
57 struct vc4_load_tracker_state {
58 struct drm_private_state base;
63 static struct vc4_load_tracker_state *
64 to_vc4_load_tracker_state(struct drm_private_state *priv)
66 return container_of(priv, struct vc4_load_tracker_state, base);
69 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
70 struct drm_private_obj *manager)
72 struct drm_device *dev = state->dev;
73 struct vc4_dev *vc4 = to_vc4_dev(dev);
74 struct drm_private_state *priv_state;
77 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
81 priv_state = drm_atomic_get_private_obj_state(state, manager);
82 if (IS_ERR(priv_state))
83 return ERR_CAST(priv_state);
85 return to_vc4_ctm_state(priv_state);
88 static struct drm_private_state *
89 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
91 struct vc4_ctm_state *state;
93 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
97 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
102 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
103 struct drm_private_state *state)
105 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
110 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
111 .atomic_duplicate_state = vc4_ctm_duplicate_state,
112 .atomic_destroy_state = vc4_ctm_destroy_state,
115 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
117 struct vc4_dev *vc4 = to_vc4_dev(dev);
119 drm_atomic_private_obj_fini(&vc4->ctm_manager);
122 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
124 struct vc4_ctm_state *ctm_state;
126 drm_modeset_lock_init(&vc4->ctm_state_lock);
128 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
132 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
133 &vc4_ctm_state_funcs);
135 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
138 /* Converts a DRM S31.32 value to the HW S0.9 format. */
139 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
144 r = in & BIT_ULL(63) ? BIT(9) : 0;
146 if ((in & GENMASK_ULL(62, 32)) > 0) {
147 /* We have zero integer bits so we can only saturate here. */
150 /* Otherwise take the 9 most important fractional bits. */
151 r |= (in >> 23) & GENMASK(8, 0);
158 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
160 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
161 struct drm_color_ctm *ctm = ctm_state->ctm;
163 if (vc4->firmware_kms)
166 if (ctm_state->fifo) {
167 HVS_WRITE(SCALER_OLEDCOEF2,
168 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
169 SCALER_OLEDCOEF2_R_TO_R) |
170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
171 SCALER_OLEDCOEF2_R_TO_G) |
172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
173 SCALER_OLEDCOEF2_R_TO_B));
174 HVS_WRITE(SCALER_OLEDCOEF1,
175 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
176 SCALER_OLEDCOEF1_G_TO_R) |
177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
178 SCALER_OLEDCOEF1_G_TO_G) |
179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
180 SCALER_OLEDCOEF1_G_TO_B));
181 HVS_WRITE(SCALER_OLEDCOEF0,
182 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
183 SCALER_OLEDCOEF0_B_TO_R) |
184 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
185 SCALER_OLEDCOEF0_B_TO_G) |
186 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
187 SCALER_OLEDCOEF0_B_TO_B));
190 HVS_WRITE(SCALER_OLEDOFFS,
191 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
194 static struct vc4_hvs_state *
195 vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
197 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
198 struct drm_private_state *priv_state;
200 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
201 if (IS_ERR(priv_state))
202 return ERR_CAST(priv_state);
204 return to_vc4_hvs_state(priv_state);
207 static struct vc4_hvs_state *
208 vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
210 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
211 struct drm_private_state *priv_state;
213 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
214 if (IS_ERR(priv_state))
215 return ERR_CAST(priv_state);
217 return to_vc4_hvs_state(priv_state);
220 static struct vc4_hvs_state *
221 vc4_hvs_get_global_state(struct drm_atomic_state *state)
223 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
224 struct drm_private_state *priv_state;
226 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
227 if (IS_ERR(priv_state))
228 return ERR_CAST(priv_state);
230 return to_vc4_hvs_state(priv_state);
233 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
234 struct drm_atomic_state *state)
236 struct drm_crtc_state *crtc_state;
237 struct drm_crtc *crtc;
240 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
241 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
242 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
246 if (!crtc_state->active)
249 if (vc4_state->assigned_channel != 2)
253 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
255 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
257 * DSP3 is connected to FIFO2 unless the transposer is
258 * enabled. In this case, FIFO 2 is directly accessed by the
259 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
262 if (vc4_crtc->feeds_txp)
263 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
265 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
267 dispctrl = HVS_READ(SCALER_DISPCTRL) &
268 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
269 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
273 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
274 struct drm_atomic_state *state)
276 struct drm_crtc_state *crtc_state;
277 struct drm_crtc *crtc;
282 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
283 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
284 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
286 if (!vc4_state->update_muxing)
289 switch (vc4_crtc->data->hvs_output) {
291 mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
292 reg = HVS_READ(SCALER_DISPECTRL);
293 HVS_WRITE(SCALER_DISPECTRL,
294 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
295 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
299 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
302 mux = vc4_state->assigned_channel;
304 reg = HVS_READ(SCALER_DISPCTRL);
305 HVS_WRITE(SCALER_DISPCTRL,
306 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
307 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
311 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
314 mux = vc4_state->assigned_channel;
316 reg = HVS_READ(SCALER_DISPEOLN);
317 HVS_WRITE(SCALER_DISPEOLN,
318 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
319 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
324 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
327 mux = vc4_state->assigned_channel;
329 reg = HVS_READ(SCALER_DISPDITHER);
330 HVS_WRITE(SCALER_DISPDITHER,
331 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
332 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
341 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
343 struct drm_device *dev = state->dev;
344 struct vc4_dev *vc4 = to_vc4_dev(dev);
345 struct vc4_hvs *hvs = vc4->hvs;
346 struct drm_crtc_state *new_crtc_state;
347 struct vc4_hvs_state *new_hvs_state;
348 struct drm_crtc *crtc;
349 struct vc4_hvs_state *old_hvs_state;
350 unsigned int channel;
351 struct clk_request *core_req;
354 old_hvs_state = vc4_hvs_get_old_global_state(state);
355 if (WARN_ON(!old_hvs_state))
358 new_hvs_state = vc4_hvs_get_new_global_state(state);
359 if (WARN_ON(!new_hvs_state))
362 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
363 struct vc4_crtc_state *vc4_crtc_state;
365 if (!new_crtc_state->commit || vc4->firmware_kms)
368 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
369 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
372 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
373 struct drm_crtc_commit *commit;
376 if (!old_hvs_state->fifo_state[channel].in_use)
379 commit = old_hvs_state->fifo_state[channel].pending_commit;
383 ret = drm_crtc_commit_wait(commit);
385 drm_err(dev, "Timed out waiting for commit\n");
387 drm_crtc_commit_put(commit);
388 old_hvs_state->fifo_state[channel].pending_commit = NULL;
391 if (vc4->hvs && vc4->hvs->hvs5) {
392 unsigned long core_rate = max_t(unsigned long,
394 new_hvs_state->core_clock_rate);
396 core_req = clk_request_start(hvs->core_clk, core_rate);
398 * And remove the previous one based on the HVS
399 * requirements if any.
401 clk_request_done(hvs->core_req);
404 drm_atomic_helper_commit_modeset_disables(dev, state);
406 vc4_ctm_commit(vc4, state);
408 if (!vc4->firmware_kms) {
409 if (vc4->hvs && vc4->hvs->hvs5)
410 vc5_hvs_pv_muxing_commit(vc4, state);
412 vc4_hvs_pv_muxing_commit(vc4, state);
415 drm_atomic_helper_commit_planes(dev, state, 0);
417 drm_atomic_helper_commit_modeset_enables(dev, state);
419 drm_atomic_helper_fake_vblank(state);
421 drm_atomic_helper_commit_hw_done(state);
423 drm_atomic_helper_wait_for_flip_done(dev, state);
425 drm_atomic_helper_cleanup_planes(dev, state);
427 if (vc4->hvs && vc4->hvs->hvs5) {
428 drm_dbg(dev, "Running the core clock at %lu Hz\n",
429 new_hvs_state->core_clock_rate);
432 * Request a clock rate based on the current HVS
435 hvs->core_req = clk_request_start(hvs->core_clk,
436 new_hvs_state->core_clock_rate);
438 /* And drop the temporary request */
439 clk_request_done(core_req);
443 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
445 struct drm_device *dev = state->dev;
446 struct vc4_dev *vc4 = to_vc4_dev(dev);
447 struct drm_crtc_state *crtc_state;
448 struct vc4_hvs_state *hvs_state;
449 struct drm_crtc *crtc;
452 /* We know for sure we don't want an async update here. Set
453 * state->legacy_cursor_update to false to prevent
454 * drm_atomic_helper_setup_commit() from auto-completing
457 if (!vc4->firmware_kms)
458 state->legacy_cursor_update = false;
460 hvs_state = vc4_hvs_get_new_global_state(state);
461 if (WARN_ON(IS_ERR(hvs_state)))
462 return PTR_ERR(hvs_state);
464 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
465 struct vc4_crtc_state *vc4_crtc_state =
466 to_vc4_crtc_state(crtc_state);
467 unsigned int channel =
468 vc4_crtc_state->assigned_channel;
470 if (channel == VC4_HVS_CHANNEL_DISABLED)
473 if (!hvs_state->fifo_state[channel].in_use)
476 hvs_state->fifo_state[channel].pending_commit =
477 drm_crtc_commit_get(crtc_state->commit);
483 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
484 struct drm_file *file_priv,
485 const struct drm_mode_fb_cmd2 *mode_cmd)
487 struct drm_mode_fb_cmd2 mode_cmd_local;
489 /* If the user didn't specify a modifier, use the
490 * vc4_set_tiling_ioctl() state for the BO.
492 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
493 struct drm_gem_object *gem_obj;
496 gem_obj = drm_gem_object_lookup(file_priv,
497 mode_cmd->handles[0]);
499 DRM_DEBUG("Failed to look up GEM BO %d\n",
500 mode_cmd->handles[0]);
501 return ERR_PTR(-ENOENT);
503 bo = to_vc4_bo(gem_obj);
505 mode_cmd_local = *mode_cmd;
508 mode_cmd_local.modifier[0] =
509 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
511 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
514 drm_gem_object_put(gem_obj);
516 mode_cmd = &mode_cmd_local;
519 return drm_gem_fb_create(dev, file_priv, mode_cmd);
522 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
523 * at a time and the HW only supports S0.9 scalars. To account for the latter,
524 * we don't allow userland to set a CTM that we have no hope of approximating.
527 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
529 struct vc4_dev *vc4 = to_vc4_dev(dev);
530 struct vc4_ctm_state *ctm_state = NULL;
531 struct drm_crtc *crtc;
532 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
533 struct drm_color_ctm *ctm;
536 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
537 /* CTM is being disabled. */
538 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
539 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
540 if (IS_ERR(ctm_state))
541 return PTR_ERR(ctm_state);
546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
547 if (new_crtc_state->ctm == old_crtc_state->ctm)
551 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
552 if (IS_ERR(ctm_state))
553 return PTR_ERR(ctm_state);
556 /* CTM is being enabled or the matrix changed. */
557 if (new_crtc_state->ctm) {
558 struct vc4_crtc_state *vc4_crtc_state =
559 to_vc4_crtc_state(new_crtc_state);
561 /* fifo is 1-based since 0 disables CTM. */
562 int fifo = vc4_crtc_state->assigned_channel + 1;
564 /* Check userland isn't trying to turn on CTM for more
565 * than one CRTC at a time.
567 if (ctm_state->fifo && ctm_state->fifo != fifo) {
568 DRM_DEBUG_DRIVER("Too many CTM configured\n");
572 /* Check we can approximate the specified CTM.
573 * We disallow scalars |c| > 1.0 since the HW has
576 ctm = new_crtc_state->ctm->data;
577 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
578 u64 val = ctm->matrix[i];
581 if (val > BIT_ULL(32))
585 ctm_state->fifo = fifo;
586 ctm_state->ctm = ctm;
593 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
595 struct drm_plane_state *old_plane_state, *new_plane_state;
596 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
597 struct vc4_load_tracker_state *load_state;
598 struct drm_private_state *priv_state;
599 struct drm_plane *plane;
602 priv_state = drm_atomic_get_private_obj_state(state,
604 if (IS_ERR(priv_state))
605 return PTR_ERR(priv_state);
607 load_state = to_vc4_load_tracker_state(priv_state);
608 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
609 new_plane_state, i) {
610 struct vc4_plane_state *vc4_plane_state;
612 if (old_plane_state->fb && old_plane_state->crtc) {
613 vc4_plane_state = to_vc4_plane_state(old_plane_state);
614 load_state->membus_load -= vc4_plane_state->membus_load;
615 load_state->hvs_load -= vc4_plane_state->hvs_load;
618 if (new_plane_state->fb && new_plane_state->crtc) {
619 vc4_plane_state = to_vc4_plane_state(new_plane_state);
620 load_state->membus_load += vc4_plane_state->membus_load;
621 load_state->hvs_load += vc4_plane_state->hvs_load;
625 /* Don't check the load when the tracker is disabled. */
626 if (!vc4->load_tracker_enabled)
629 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
630 * the system work when other blocks are accessing the memory.
632 if (load_state->membus_load > SZ_1G + SZ_512M)
635 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
636 * consider the maximum number of cycles is 240M.
638 if (load_state->hvs_load > 240000000ULL)
644 static struct drm_private_state *
645 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
647 struct vc4_load_tracker_state *state;
649 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
653 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
658 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
659 struct drm_private_state *state)
661 struct vc4_load_tracker_state *load_state;
663 load_state = to_vc4_load_tracker_state(state);
667 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
668 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
669 .atomic_destroy_state = vc4_load_tracker_destroy_state,
672 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
674 struct vc4_dev *vc4 = to_vc4_dev(dev);
676 drm_atomic_private_obj_fini(&vc4->load_tracker);
679 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
681 struct vc4_load_tracker_state *load_state;
683 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
687 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
689 &vc4_load_tracker_state_funcs);
691 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
694 static struct drm_private_state *
695 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
697 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
698 struct vc4_hvs_state *state;
701 state = kzalloc(sizeof(*state), GFP_KERNEL);
705 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
707 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
708 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
709 state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
712 state->core_clock_rate = old_state->core_clock_rate;
717 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
718 struct drm_private_state *state)
720 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
723 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
724 if (!hvs_state->fifo_state[i].pending_commit)
727 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
733 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
734 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
735 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
738 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
740 struct vc4_dev *vc4 = to_vc4_dev(dev);
742 drm_atomic_private_obj_fini(&vc4->hvs_channels);
745 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
747 struct vc4_hvs_state *state;
749 state = kzalloc(sizeof(*state), GFP_KERNEL);
753 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
755 &vc4_hvs_state_funcs);
757 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
761 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
762 * the TXP (and therefore all the CRTCs found on that platform).
764 * The naive (and our initial) implementation would just iterate over
765 * all the active CRTCs, try to find a suitable FIFO, and then remove it
766 * from the pool of available FIFOs. However, there are a few corner
767 * cases that need to be considered:
769 * - When running in a dual-display setup (so with two CRTCs involved),
770 * we can update the state of a single CRTC (for example by changing
771 * its mode using xrandr under X11) without affecting the other. In
772 * this case, the other CRTC wouldn't be in the state at all, so we
773 * need to consider all the running CRTCs in the DRM device to assign
774 * a FIFO, not just the one in the state.
776 * - To fix the above, we can't use drm_atomic_get_crtc_state on all
777 * enabled CRTCs to pull their CRTC state into the global state, since
778 * a page flip would start considering their vblank to complete. Since
779 * we don't have a guarantee that they are actually active, that
780 * vblank might never happen, and shouldn't even be considered if we
781 * want to do a page flip on a single CRTC. That can be tested by
782 * doing a modetest -v first on HDMI1 and then on HDMI0.
784 * - Since we need the pixelvalve to be disabled and enabled back when
785 * the FIFO is changed, we should keep the FIFO assigned for as long
786 * as the CRTC is enabled, only considering it free again once that
787 * CRTC has been disabled. This can be tested by booting X11 on a
788 * single display, and changing the resolution down and then back up.
790 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
791 struct drm_atomic_state *state)
793 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
794 struct vc4_hvs_state *hvs_new_state;
795 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
796 struct drm_crtc *crtc;
797 unsigned int unassigned_channels = 0;
800 hvs_new_state = vc4_hvs_get_global_state(state);
801 if (IS_ERR(hvs_new_state))
802 return PTR_ERR(hvs_new_state);
804 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
805 if (!hvs_new_state->fifo_state[i].in_use)
806 unassigned_channels |= BIT(i);
808 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
809 struct vc4_crtc_state *old_vc4_crtc_state =
810 to_vc4_crtc_state(old_crtc_state);
811 struct vc4_crtc_state *new_vc4_crtc_state =
812 to_vc4_crtc_state(new_crtc_state);
813 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
814 unsigned int matching_channels;
815 unsigned int channel;
817 if (vc4->firmware_kms)
820 /* Nothing to do here, let's skip it */
821 if (old_crtc_state->enable == new_crtc_state->enable)
824 /* Muxing will need to be modified, mark it as such */
825 new_vc4_crtc_state->update_muxing = true;
827 /* If we're disabling our CRTC, we put back our channel */
828 if (!new_crtc_state->enable) {
829 channel = old_vc4_crtc_state->assigned_channel;
830 hvs_new_state->fifo_state[channel].in_use = false;
831 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
836 * The problem we have to solve here is that we have
837 * up to 7 encoders, connected to up to 6 CRTCs.
839 * Those CRTCs, depending on the instance, can be
840 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
841 * the change the muxing between FIFOs and outputs in
842 * the HVS accordingly.
844 * It would be pretty hard to come up with an
845 * algorithm that would generically solve
846 * this. However, the current routing trees we support
847 * allow us to simplify a bit the problem.
849 * Indeed, with the current supported layouts, if we
850 * try to assign in the ascending crtc index order the
851 * FIFOs, we can't fall into the situation where an
852 * earlier CRTC that had multiple routes is assigned
853 * one that was the only option for a later CRTC.
855 * If the layout changes and doesn't give us that in
856 * the future, we will need to have something smarter,
857 * but it works so far.
859 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
860 if (!matching_channels)
863 channel = ffs(matching_channels) - 1;
864 new_vc4_crtc_state->assigned_channel = channel;
865 unassigned_channels &= ~BIT(channel);
866 hvs_new_state->fifo_state[channel].in_use = true;
873 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
875 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
876 struct drm_private_state *priv_state;
877 struct vc4_hvs_state *hvs_new_state;
878 struct vc4_load_tracker_state *load_state;
879 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
880 struct drm_crtc *crtc;
881 unsigned int num_outputs;
882 unsigned long pixel_rate;
883 unsigned long cob_rate;
886 priv_state = drm_atomic_get_private_obj_state(state,
888 if (IS_ERR(priv_state))
889 return PTR_ERR(priv_state);
891 load_state = to_vc4_load_tracker_state(priv_state);
893 hvs_new_state = vc4_hvs_get_global_state(state);
897 for_each_oldnew_crtc_in_state(state, crtc,
901 if (old_crtc_state->active) {
902 struct vc4_crtc_state *old_vc4_state =
903 to_vc4_crtc_state(old_crtc_state);
904 unsigned int channel = old_vc4_state->assigned_channel;
906 hvs_new_state->fifo_state[channel].fifo_load = 0;
909 if (new_crtc_state->active) {
910 struct vc4_crtc_state *new_vc4_state =
911 to_vc4_crtc_state(new_crtc_state);
912 unsigned int channel = new_vc4_state->assigned_channel;
914 hvs_new_state->fifo_state[channel].fifo_load =
915 new_vc4_state->hvs_load;
921 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
922 if (!hvs_new_state->fifo_state[i].in_use)
926 cob_rate += hvs_new_state->fifo_state[i].fifo_load;
929 pixel_rate = load_state->hvs_load;
930 if (num_outputs > 1) {
931 pixel_rate = (pixel_rate * 40) / 100;
933 pixel_rate = (pixel_rate * 60) / 100;
936 hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
943 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
947 ret = vc4_pv_muxing_atomic_check(dev, state);
951 ret = vc4_ctm_atomic_check(dev, state);
955 ret = drm_atomic_helper_check(dev, state);
959 ret = vc4_load_tracker_atomic_check(state);
963 return vc4_core_clock_atomic_check(state);
966 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
967 .atomic_commit_setup = vc4_atomic_commit_setup,
968 .atomic_commit_tail = vc4_atomic_commit_tail,
971 static const struct drm_mode_config_funcs vc4_mode_funcs = {
972 .atomic_check = vc4_atomic_check,
973 .atomic_commit = drm_atomic_helper_commit,
974 .fb_create = vc4_fb_create,
977 int vc4_kms_load(struct drm_device *dev)
979 struct vc4_dev *vc4 = to_vc4_dev(dev);
980 bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
985 * The limits enforced by the load tracker aren't relevant for
986 * the BCM2711, but the load tracker computations are used for
987 * the core clock rate calculation.
990 /* Start with the load tracker enabled. Can be
991 * disabled through the debugfs load_tracker file.
993 vc4->load_tracker_enabled = true;
996 /* Set support for vblank irq fast disable, before drm_vblank_init() */
997 dev->vblank_disable_immediate = true;
999 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1001 dev_err(dev->dev, "failed to initialize vblank\n");
1006 dev->mode_config.max_width = 7680;
1007 dev->mode_config.max_height = 7680;
1009 dev->mode_config.max_width = 2048;
1010 dev->mode_config.max_height = 2048;
1013 dev->mode_config.funcs = &vc4_mode_funcs;
1014 dev->mode_config.helper_private = &vc4_mode_config_helpers;
1015 dev->mode_config.preferred_depth = 24;
1016 dev->mode_config.async_page_flip = true;
1017 if (vc4->firmware_kms)
1018 dev->mode_config.normalize_zpos = true;
1020 ret = vc4_ctm_obj_init(vc4);
1024 ret = vc4_load_tracker_obj_init(vc4);
1028 ret = vc4_hvs_channels_obj_init(vc4);
1032 drm_mode_config_reset(dev);
1034 drm_kms_helper_poll_init(dev);