1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
14 #include <linux/bitfield.h>
15 #include <linux/bitops.h>
16 #include <linux/clk.h>
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_plane_helper.h>
23 #include <drm/drm_probe_helper.h>
24 #include <drm/drm_vblank.h>
25 #include <drm/drm_drv.h>
30 struct vc4_ctm_state {
31 struct drm_private_state base;
32 struct drm_color_ctm *ctm;
36 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
38 return container_of(priv, struct vc4_ctm_state, base);
41 struct vc4_load_tracker_state {
42 struct drm_private_state base;
47 static struct vc4_load_tracker_state *
48 to_vc4_load_tracker_state(struct drm_private_state *priv)
50 return container_of(priv, struct vc4_load_tracker_state, base);
53 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
54 struct drm_private_obj *manager)
56 struct drm_device *dev = state->dev;
57 struct vc4_dev *vc4 = dev->dev_private;
58 struct drm_private_state *priv_state;
61 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
65 priv_state = drm_atomic_get_private_obj_state(state, manager);
66 if (IS_ERR(priv_state))
67 return ERR_CAST(priv_state);
69 return to_vc4_ctm_state(priv_state);
72 static struct drm_private_state *
73 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
75 struct vc4_ctm_state *state;
77 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
81 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
86 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
87 struct drm_private_state *state)
89 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
94 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
95 .atomic_duplicate_state = vc4_ctm_duplicate_state,
96 .atomic_destroy_state = vc4_ctm_destroy_state,
99 /* Converts a DRM S31.32 value to the HW S0.9 format. */
100 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
105 r = in & BIT_ULL(63) ? BIT(9) : 0;
107 if ((in & GENMASK_ULL(62, 32)) > 0) {
108 /* We have zero integer bits so we can only saturate here. */
111 /* Otherwise take the 9 most important fractional bits. */
112 r |= (in >> 23) & GENMASK(8, 0);
119 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
121 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
122 struct drm_color_ctm *ctm = ctm_state->ctm;
124 if (vc4->firmware_kms)
127 if (ctm_state->fifo) {
128 HVS_WRITE(SCALER_OLEDCOEF2,
129 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
130 SCALER_OLEDCOEF2_R_TO_R) |
131 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
132 SCALER_OLEDCOEF2_R_TO_G) |
133 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
134 SCALER_OLEDCOEF2_R_TO_B));
135 HVS_WRITE(SCALER_OLEDCOEF1,
136 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
137 SCALER_OLEDCOEF1_G_TO_R) |
138 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
139 SCALER_OLEDCOEF1_G_TO_G) |
140 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
141 SCALER_OLEDCOEF1_G_TO_B));
142 HVS_WRITE(SCALER_OLEDCOEF0,
143 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
144 SCALER_OLEDCOEF0_B_TO_R) |
145 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
146 SCALER_OLEDCOEF0_B_TO_G) |
147 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
148 SCALER_OLEDCOEF0_B_TO_B));
151 HVS_WRITE(SCALER_OLEDOFFS,
152 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
155 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
156 struct drm_atomic_state *state)
158 struct drm_crtc_state *crtc_state;
159 struct drm_crtc *crtc;
160 unsigned char dsp2_mux = 0;
161 unsigned char dsp3_mux = 3;
162 unsigned char dsp4_mux = 3;
163 unsigned char dsp5_mux = 3;
167 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
168 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
169 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
171 if (!crtc_state->active)
174 switch (vc4_crtc->data->hvs_output) {
176 dsp2_mux = (vc4_state->assigned_channel == 2) ? 1 : 0;
180 dsp3_mux = vc4_state->assigned_channel;
184 dsp4_mux = vc4_state->assigned_channel;
188 dsp5_mux = vc4_state->assigned_channel;
196 reg = HVS_READ(SCALER_DISPECTRL);
197 if (FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg) != dsp2_mux)
198 HVS_WRITE(SCALER_DISPECTRL,
199 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
200 VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
202 reg = HVS_READ(SCALER_DISPCTRL);
203 if (FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg) != dsp3_mux)
204 HVS_WRITE(SCALER_DISPCTRL,
205 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
206 VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
208 reg = HVS_READ(SCALER_DISPEOLN);
209 if (FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg) != dsp4_mux)
210 HVS_WRITE(SCALER_DISPEOLN,
211 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
212 VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
214 reg = HVS_READ(SCALER_DISPDITHER);
215 if (FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg) != dsp5_mux)
216 HVS_WRITE(SCALER_DISPDITHER,
217 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
218 VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
222 vc4_atomic_complete_commit(struct drm_atomic_state *state)
224 struct drm_device *dev = state->dev;
225 struct vc4_dev *vc4 = to_vc4_dev(dev);
226 struct vc4_hvs *hvs = vc4->hvs;
227 struct vc4_crtc *vc4_crtc;
230 for (i = 0; vc4->hvs && i < dev->mode_config.num_crtc; i++) {
231 struct __drm_crtcs_state *_state = &state->crtcs[i];
232 struct vc4_crtc_state *vc4_crtc_state;
234 if (!_state->ptr || !_state->commit)
237 vc4_crtc = to_vc4_crtc(_state->ptr);
238 vc4_crtc_state = to_vc4_crtc_state(_state->state);
239 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
243 clk_set_min_rate(hvs->core_clk, 500000000);
245 drm_atomic_helper_wait_for_fences(dev, state, false);
247 drm_atomic_helper_wait_for_dependencies(state);
249 drm_atomic_helper_commit_modeset_disables(dev, state);
251 if (!vc4->firmware_kms) {
252 vc4_ctm_commit(vc4, state);
253 vc4_hvs_pv_muxing_commit(vc4, state);
256 drm_atomic_helper_commit_planes(dev, state, 0);
258 drm_atomic_helper_commit_modeset_enables(dev, state);
260 drm_atomic_helper_fake_vblank(state);
262 drm_atomic_helper_commit_hw_done(state);
264 drm_atomic_helper_wait_for_flip_done(dev, state);
266 drm_atomic_helper_cleanup_planes(dev, state);
268 drm_atomic_helper_commit_cleanup_done(state);
270 drm_atomic_state_put(state);
272 up(&vc4->async_modeset);
275 static void commit_work(struct work_struct *work)
277 struct drm_atomic_state *state = container_of(work,
278 struct drm_atomic_state,
280 vc4_atomic_complete_commit(state);
284 * vc4_atomic_commit - commit validated state object
286 * @state: the driver state object
287 * @nonblock: nonblocking commit
289 * This function commits a with drm_atomic_helper_check() pre-validated state
290 * object. This can still fail when e.g. the framebuffer reservation fails. For
291 * now this doesn't implement asynchronous commits.
294 * Zero for success or -errno.
296 static int vc4_atomic_commit(struct drm_device *dev,
297 struct drm_atomic_state *state,
300 struct vc4_dev *vc4 = to_vc4_dev(dev);
303 if (state->async_update) {
304 ret = down_interruptible(&vc4->async_modeset);
308 ret = drm_atomic_helper_prepare_planes(dev, state);
310 up(&vc4->async_modeset);
314 drm_atomic_helper_async_commit(dev, state);
316 drm_atomic_helper_cleanup_planes(dev, state);
318 up(&vc4->async_modeset);
323 /* We know for sure we don't want an async update here. Set
324 * state->legacy_cursor_update to false to prevent
325 * drm_atomic_helper_setup_commit() from auto-completing
328 if (!vc4->firmware_kms)
329 state->legacy_cursor_update = false;
330 ret = drm_atomic_helper_setup_commit(state, nonblock);
334 INIT_WORK(&state->commit_work, commit_work);
336 ret = down_interruptible(&vc4->async_modeset);
340 ret = drm_atomic_helper_prepare_planes(dev, state);
342 up(&vc4->async_modeset);
347 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
349 drm_atomic_helper_cleanup_planes(dev, state);
350 up(&vc4->async_modeset);
356 * This is the point of no return - everything below never fails except
357 * when the hw goes bonghits. Which means we can commit the new state on
358 * the software side now.
361 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
364 * Everything below can be run asynchronously without the need to grab
365 * any modeset locks at all under one condition: It must be guaranteed
366 * that the asynchronous work has either been cancelled (if the driver
367 * supports it, which at least requires that the framebuffers get
368 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
369 * before the new state gets committed on the software side with
370 * drm_atomic_helper_swap_state().
372 * This scheme allows new atomic state updates to be prepared and
373 * checked in parallel to the asynchronous completion of the previous
374 * update. Which is important since compositors need to figure out the
375 * composition of the next frame right after having submitted the
379 drm_atomic_state_get(state);
381 queue_work(system_unbound_wq, &state->commit_work);
383 vc4_atomic_complete_commit(state);
388 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
389 struct drm_file *file_priv,
390 const struct drm_mode_fb_cmd2 *mode_cmd)
392 struct drm_mode_fb_cmd2 mode_cmd_local;
394 /* If the user didn't specify a modifier, use the
395 * vc4_set_tiling_ioctl() state for the BO.
397 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
398 struct drm_gem_object *gem_obj;
401 gem_obj = drm_gem_object_lookup(file_priv,
402 mode_cmd->handles[0]);
404 DRM_DEBUG("Failed to look up GEM BO %d\n",
405 mode_cmd->handles[0]);
406 return ERR_PTR(-ENOENT);
408 bo = to_vc4_bo(gem_obj);
410 mode_cmd_local = *mode_cmd;
413 mode_cmd_local.modifier[0] =
414 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
416 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
419 drm_gem_object_put_unlocked(gem_obj);
421 mode_cmd = &mode_cmd_local;
424 return drm_gem_fb_create(dev, file_priv, mode_cmd);
427 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
428 * at a time and the HW only supports S0.9 scalars. To account for the latter,
429 * we don't allow userland to set a CTM that we have no hope of approximating.
432 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
434 struct vc4_dev *vc4 = to_vc4_dev(dev);
435 struct vc4_ctm_state *ctm_state = NULL;
436 struct drm_crtc *crtc;
437 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
438 struct drm_color_ctm *ctm;
441 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
442 /* CTM is being disabled. */
443 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
444 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
445 if (IS_ERR(ctm_state))
446 return PTR_ERR(ctm_state);
451 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
452 if (new_crtc_state->ctm == old_crtc_state->ctm)
456 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
457 if (IS_ERR(ctm_state))
458 return PTR_ERR(ctm_state);
461 /* CTM is being enabled or the matrix changed. */
462 if (new_crtc_state->ctm) {
463 struct vc4_crtc_state *vc4_crtc_state =
464 to_vc4_crtc_state(new_crtc_state);
466 /* fifo is 1-based since 0 disables CTM. */
467 int fifo = vc4_crtc_state->assigned_channel + 1;
469 /* Check userland isn't trying to turn on CTM for more
470 * than one CRTC at a time.
472 if (ctm_state->fifo && ctm_state->fifo != fifo) {
473 DRM_DEBUG_DRIVER("Too many CTM configured\n");
477 /* Check we can approximate the specified CTM.
478 * We disallow scalars |c| > 1.0 since the HW has
481 ctm = new_crtc_state->ctm->data;
482 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
483 u64 val = ctm->matrix[i];
486 if (val > BIT_ULL(32))
490 ctm_state->fifo = fifo;
491 ctm_state->ctm = ctm;
498 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
500 struct drm_plane_state *old_plane_state, *new_plane_state;
501 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
502 struct vc4_load_tracker_state *load_state;
503 struct drm_private_state *priv_state;
504 struct drm_plane *plane;
507 if (!vc4->load_tracker_available)
510 priv_state = drm_atomic_get_private_obj_state(state,
512 if (IS_ERR(priv_state))
513 return PTR_ERR(priv_state);
515 load_state = to_vc4_load_tracker_state(priv_state);
516 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
517 new_plane_state, i) {
518 struct vc4_plane_state *vc4_plane_state;
520 if (old_plane_state->fb && old_plane_state->crtc) {
521 vc4_plane_state = to_vc4_plane_state(old_plane_state);
522 load_state->membus_load -= vc4_plane_state->membus_load;
523 load_state->hvs_load -= vc4_plane_state->hvs_load;
526 if (new_plane_state->fb && new_plane_state->crtc) {
527 vc4_plane_state = to_vc4_plane_state(new_plane_state);
528 load_state->membus_load += vc4_plane_state->membus_load;
529 load_state->hvs_load += vc4_plane_state->hvs_load;
533 /* Don't check the load when the tracker is disabled. */
534 if (!vc4->load_tracker_enabled)
537 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
538 * the system work when other blocks are accessing the memory.
540 if (load_state->membus_load > SZ_1G + SZ_512M)
543 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
544 * consider the maximum number of cycles is 240M.
546 if (load_state->hvs_load > 240000000ULL)
552 static struct drm_private_state *
553 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
555 struct vc4_load_tracker_state *state;
557 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
561 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
566 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
567 struct drm_private_state *state)
569 struct vc4_load_tracker_state *load_state;
571 load_state = to_vc4_load_tracker_state(state);
575 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
576 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
577 .atomic_destroy_state = vc4_load_tracker_destroy_state,
580 #define NUM_OUTPUTS 6
581 #define NUM_CHANNELS 3
584 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
586 unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
587 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
588 struct drm_crtc_state *crtc_state;
589 struct drm_crtc *crtc;
592 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
593 struct vc4_crtc_state *vc4_crtc_state =
594 to_vc4_crtc_state(crtc_state);
595 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
596 bool is_assigned = false;
597 unsigned int channel;
599 if (!crtc_state->active || vc4->firmware_kms)
603 * The problem we have to solve here is that we have
604 * up to 7 encoders, connected to up to 6 CRTCs.
606 * Those CRTCs, depending on the instance, can be
607 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
608 * the change the muxing between FIFOs and outputs in
609 * the HVS accordingly.
611 * It would be pretty hard to come up with an
612 * algorithm that would generically solve
613 * this. However, the current routing trees we support
614 * allow us to simplify a bit the problem.
616 * Indeed, with the current supported layouts, if we
617 * try to assign in the ascending crtc index order the
618 * FIFOs, we can't fall into the situation where an
619 * earlier CRTC that had multiple routes is assigned
620 * one that was the only option for a later CRTC.
622 * If the layout changes and doesn't give us that in
623 * the future, we will need to have something smarter,
624 * but it works so far.
626 for_each_set_bit(channel, &unassigned_channels,
627 sizeof(unassigned_channels)) {
629 if (!(BIT(channel) & vc4_crtc->data->hvs_available_channels))
632 vc4_crtc_state->assigned_channel = channel;
633 unassigned_channels &= ~BIT(channel);
642 ret = vc4_ctm_atomic_check(dev, state);
646 ret = drm_atomic_helper_check(dev, state);
650 return vc4_load_tracker_atomic_check(state);
653 static const struct drm_mode_config_funcs vc4_mode_funcs = {
654 .atomic_check = vc4_atomic_check,
655 .atomic_commit = vc4_atomic_commit,
656 .fb_create = vc4_fb_create,
659 int vc4_kms_load(struct drm_device *dev)
661 struct vc4_dev *vc4 = to_vc4_dev(dev);
662 struct vc4_ctm_state *ctm_state;
663 struct vc4_load_tracker_state *load_state;
666 if (!of_device_is_compatible(dev->dev->of_node, "brcm,bcm2711-vc5")) {
667 vc4->load_tracker_available = true;
669 /* Start with the load tracker enabled. Can be
670 * disabled through the debugfs load_tracker file.
672 vc4->load_tracker_enabled = true;
675 sema_init(&vc4->async_modeset, 1);
677 /* Set support for vblank irq fast disable, before drm_vblank_init() */
678 dev->vblank_disable_immediate = true;
680 dev->irq_enabled = true;
681 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
683 dev_err(dev->dev, "failed to initialize vblank\n");
687 if (!drm_core_check_feature(dev, DRIVER_RENDER)) {
688 /* No V3D as part of vc4. Assume this is Pi4. */
689 dev->mode_config.max_width = 7680;
690 dev->mode_config.max_height = 7680;
692 dev->mode_config.max_width = 2048;
693 dev->mode_config.max_height = 2048;
695 dev->mode_config.funcs = &vc4_mode_funcs;
696 dev->mode_config.preferred_depth = 24;
697 dev->mode_config.async_page_flip = true;
698 dev->mode_config.allow_fb_modifiers = true;
699 dev->mode_config.normalize_zpos = true;
701 drm_modeset_lock_init(&vc4->ctm_state_lock);
703 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
707 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
708 &vc4_ctm_state_funcs);
710 if (vc4->load_tracker_available) {
711 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
713 drm_atomic_private_obj_fini(&vc4->ctm_manager);
717 drm_atomic_private_obj_init(dev, &vc4->load_tracker,
719 &vc4_load_tracker_state_funcs);
722 drm_mode_config_reset(dev);
724 drm_kms_helper_poll_init(dev);