X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fvc4%2Fvc4_kms.c;h=a552221e9e05175b797044c3c06a061bf19e3608;hb=e805316d5d44b1f1f080fd8ae8a34b69329d940c;hp=78d4fb0499e39ac79af6f615c9ef95da2c0a4931;hpb=7897c04ad09f815aea1f2dbb05825887d4494a74;p=platform%2Fkernel%2Flinux-rpi.git diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 78d4fb0..a552221 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -11,6 +11,10 @@ * crtc, HDMI encoder). */ +#include +#include +#include + #include #include #include @@ -18,6 +22,7 @@ #include #include #include +#include #include "vc4_drv.h" #include "vc4_regs.h" @@ -116,6 +121,9 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); struct drm_color_ctm *ctm = ctm_state->ctm; + if (vc4->firmware_kms) + return; + if (ctm_state->fifo) { HVS_WRITE(SCALER_OLEDCOEF2, VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), @@ -144,22 +152,149 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + u32 dispctrl; + u32 dsp3_mux; + + if (!crtc_state->active) + continue; + + if (vc4_state->assigned_channel != 2) + continue; + + /* + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to + * FIFO X'. + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. + * + * DSP3 is connected to FIFO2 unless the transposer is + * enabled. In this case, FIFO 2 is directly accessed by the + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 + * route. + */ + if (vc4_state->feed_txp) + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); + else + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); + + dispctrl = HVS_READ(SCALER_DISPCTRL) & + ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); + } +} + +static struct drm_crtc_state * +drm_atomic_get_new_or_current_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (crtc_state) + return crtc_state; + + return crtc->state; +} + +#define for_each_new_or_current_crtc_state(__state, crtc, crtc_state) \ + list_for_each_entry(crtc, &__state->dev->mode_config.crtc_list, head) \ + for_each_if(crtc_state = drm_atomic_get_new_or_current_crtc_state(__state, crtc)) + +static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned char dsp2_mux = 0; + unsigned char dsp3_mux = 3; + unsigned char dsp4_mux = 3; + unsigned char dsp5_mux = 3; + u32 reg; + + for_each_new_or_current_crtc_state(state, crtc, crtc_state) { + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state; + + if (!crtc_state->active) + continue; + + vc4_state = to_vc4_crtc_state(crtc_state); + switch (vc4_crtc->data->hvs_output) { + case 2: + dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + break; + + case 3: + dsp3_mux = vc4_state->assigned_channel; + break; + + case 4: + dsp4_mux = vc4_state->assigned_channel; + break; + + case 5: + dsp5_mux = vc4_state->assigned_channel; + break; + + default: + break; + } + } + + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); +} + + static void vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc *vc4_crtc; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + for (i = 0; vc4->hvs && i < dev->mode_config.num_crtc; i++) { + struct __drm_crtcs_state *_state = &state->crtcs[i]; + struct vc4_crtc_state *vc4_crtc_state; + + if (!_state->ptr || !_state->commit) continue; - vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); - vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + vc4_crtc = to_vc4_crtc(_state->ptr); + vc4_crtc_state = to_vc4_crtc_state(_state->state); + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 500000000); + drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); @@ -168,6 +303,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); + if (vc4->hvs->hvs5) + vc5_hvs_pv_muxing_commit(vc4, state); + else + vc4_hvs_pv_muxing_commit(vc4, state); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -240,7 +380,8 @@ static int vc4_atomic_commit(struct drm_device *dev, * drm_atomic_helper_setup_commit() from auto-completing * commit->flip_done. */ - state->legacy_cursor_update = false; + if (!vc4->firmware_kms) + state->legacy_cursor_update = false; ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; @@ -374,8 +515,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) /* CTM is being enabled or the matrix changed. */ if (new_crtc_state->ctm) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + /* fifo is 1-based since 0 disables CTM. */ - int fifo = to_vc4_crtc(crtc)->channel + 1; + int fifo = vc4_crtc_state->assigned_channel + 1; /* Check userland isn't trying to turn on CTM for more * than one CRTC at a time. @@ -415,6 +559,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; + if (!vc4->load_tracker_available) + return 0; + priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -485,11 +632,133 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { .atomic_destroy_state = vc4_load_tracker_destroy_state, }; +#define NUM_OUTPUTS 6 +#define NUM_CHANNELS 3 + +/* + * The BCM2711 HVS has up to 7 output connected to the pixelvalves and + * the TXP (and therefore all the CRTCs found on that platform). + * + * The naive (and our initial) implementation would just iterate over + * all the active CRTCs, try to find a suitable FIFO, and then remove it + * from the available FIFOs pool. However, there's a few corner cases + * that need to be considered: + * + * - When running in a dual-display setup (so with two CRTCs involved), + * we can update the state of a single CRTC (for example by changing + * its mode using xrandr under X11) without affecting the other. In + * this case, the other CRTC wouldn't be in the state at all, so we + * need to consider all the running CRTCs in the DRM device to assign + * a FIFO, not just the one in the state. + * + * - To fix the above, we can't use drm_atomic_get_crtc_state on all + * enabled CRTCs to pull their CRTC state into the global state, since + * a page flip would start considering their vblank to complete. Since + * we don't have a guarantee that they are actually active, that + * vblank might never happen, and shouldn't even be considered if we + * want to do a page flip on a single CRTC. That can be tested by + * doing a modetest -v first on HDMI1 and then on HDMI0. + * + * - Since we need the pixelvalve to be disabled and enabled back when + * the FIFO is changed, we should keep the FIFO assigned for as long + * as the CRTC is enabled, only considering it free again once that + * CRTC has been disabled. This can be tested by booting X11 on a + * single display, and changing the resolution down and then back up. + */ +static int vc4_pv_muxing_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + /* + * Since the HVS FIFOs are shared across all the pixelvalves and + * the TXP (and thus all the CRTCs), we need to pull the current + * state of all the enabled CRTCs so that an update to a single + * CRTC still keeps the previous FIFOs enabled and assigned to + * the same CRTCs, instead of evaluating only the CRTC being + * modified. + */ + for_each_new_or_current_crtc_state(state, crtc, crtc_state) { + struct vc4_crtc_state *vc4_crtc_state; + if (!crtc_state->enable) + continue; + + vc4_crtc_state = to_vc4_crtc_state(crtc_state); + unassigned_channels &= ~BIT(vc4_crtc_state->assigned_channel); + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *new_vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + bool is_assigned = false; + unsigned int channel; + + if (old_crtc_state->enable && !new_crtc_state->enable) + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + + if (!new_crtc_state->enable) + continue; + + if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) + continue; + + /* + * The problem we have to solve here is that we have + * up to 7 encoders, connected to up to 6 CRTCs. + * + * Those CRTCs, depending on the instance, can be + * routed to 1, 2 or 3 HVS FIFOs, and we need to set + * the change the muxing between FIFOs and outputs in + * the HVS accordingly. + * + * It would be pretty hard to come up with an + * algorithm that would generically solve + * this. However, the current routing trees we support + * allow us to simplify a bit the problem. + * + * Indeed, with the current supported layouts, if we + * try to assign in the ascending crtc index order the + * FIFOs, we can't fall into the situation where an + * earlier CRTC that had multiple routes is assigned + * one that was the only option for a later CRTC. + * + * If the layout changes and doesn't give us that in + * the future, we will need to have something smarter, + * but it works so far. + */ + for_each_set_bit(channel, &unassigned_channels, + sizeof(unassigned_channels)) { + + if (!(BIT(channel) & vc4_crtc->data->hvs_available_channels)) + continue; + + new_vc4_crtc_state->assigned_channel = channel; + unassigned_channels &= ~BIT(channel); + is_assigned = true; + break; + } + + if (!is_assigned) + return -EINVAL; + } + + return 0; +} + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; + ret = vc4_pv_muxing_atomic_check(dev, state); + if (ret) + return ret; + ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) return ret; @@ -514,10 +783,14 @@ int vc4_kms_load(struct drm_device *dev) struct vc4_load_tracker_state *load_state; int ret; - /* Start with the load tracker enabled. Can be disabled through the - * debugfs load_tracker file. - */ - vc4->load_tracker_enabled = true; + if (!of_device_is_compatible(dev->dev->of_node, "brcm,bcm2711-vc5")) { + vc4->load_tracker_available = true; + + /* Start with the load tracker enabled. Can be + * disabled through the debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + } sema_init(&vc4->async_modeset, 1); @@ -531,12 +804,19 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + if (!drm_core_check_feature(dev, DRIVER_RENDER)) { + /* No V3D as part of vc4. Assume this is Pi4. */ + dev->mode_config.max_width = 7680; + dev->mode_config.max_height = 7680; + } else { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } dev->mode_config.funcs = &vc4_mode_funcs; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; dev->mode_config.allow_fb_modifiers = true; + dev->mode_config.normalize_zpos = true; drm_modeset_lock_init(&vc4->ctm_state_lock); @@ -547,14 +827,17 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) { - drm_atomic_private_obj_fini(&vc4->ctm_manager); - return -ENOMEM; - } + if (vc4->load_tracker_available) { + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } - drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, - &vc4_load_tracker_state_funcs); + drm_atomic_private_obj_init(dev, &vc4->load_tracker, + &load_state->base, + &vc4_load_tracker_state_funcs); + } drm_mode_config_reset(dev);