drm/dp_mst: Fix fractional DSC bpp handling
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 868946d..861b5e4 100644 (file)
@@ -1692,8 +1692,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
                         dce_version_to_string(adev->dm.dc->ctx->dce_version));
        } else {
-               DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
-                        dce_version_to_string(adev->dm.dc->ctx->dce_version));
+               DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
                goto error;
        }
 
@@ -2085,7 +2084,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        struct dmub_srv_create_params create_params;
        struct dmub_srv_region_params region_params;
        struct dmub_srv_region_info region_info;
-       struct dmub_srv_fb_params fb_params;
+       struct dmub_srv_memory_params memory_params;
        struct dmub_srv_fb_info *fb_info;
        struct dmub_srv *dmub_srv;
        const struct dmcub_firmware_header_v1_0 *hdr;
@@ -2185,6 +2184,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                PSP_HEADER_BYTES;
+       region_params.is_mailbox_in_inbox = false;
 
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
@@ -2208,10 +2208,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                return r;
 
        /* Rebase the regions on the framebuffer address. */
-       memset(&fb_params, 0, sizeof(fb_params));
-       fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
-       fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
-       fb_params.region_info = &region_info;
+       memset(&memory_params, 0, sizeof(memory_params));
+       memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+       memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+       memory_params.region_info = &region_info;
 
        adev->dm.dmub_fb_info =
                kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -2223,7 +2223,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                return -ENOMEM;
        }
 
-       status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+       status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
        if (status != DMUB_STATUS_OK) {
                DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
                return -EINVAL;
@@ -5170,6 +5170,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
        if (plane->type == DRM_PLANE_TYPE_CURSOR)
                return;
 
+       if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+               goto ffu;
+
        num_clips = drm_plane_get_damage_clips_count(new_plane_state);
        clips = drm_plane_get_damage_clips(new_plane_state);
 
@@ -6136,8 +6139,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
-
-       if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+       else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+                        stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+                        stream->signal == SIGNAL_TYPE_EDP) {
                //
                // should decide stream support vsc sdp colorimetry capability
                // before building vsc info packet
@@ -6153,8 +6157,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
                        tf = TRANSFER_FUNC_GAMMA_22;
                mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
-               aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 
+               if (stream->link->psr_settings.psr_feature_enabled)
+                       aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
        }
 finish:
        dc_sink_release(sink);
@@ -6236,7 +6241,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
                dm_new_state->underscan_enable = val;
                ret = 0;
        } else if (property == adev->mode_info.abm_level_property) {
-               dm_new_state->abm_level = val;
+               dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
                ret = 0;
        }
 
@@ -6281,7 +6286,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
                *val = dm_state->underscan_enable;
                ret = 0;
        } else if (property == adev->mode_info.abm_level_property) {
-               *val = dm_state->abm_level;
+               *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+                       dm_state->abm_level : 0;
                ret = 0;
        }
 
@@ -6354,7 +6360,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
                state->pbn = 0;
 
                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-                       state->abm_level = amdgpu_dm_abm_level;
+                       state->abm_level = amdgpu_dm_abm_level ?:
+                               ABM_LEVEL_IMMEDIATE_DISABLE;
 
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@ -6863,8 +6870,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
        if (IS_ERR(mst_state))
                return PTR_ERR(mst_state);
 
-       if (!mst_state->pbn_div)
-               mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+       mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
 
        if (!state->duplicated) {
                int max_bpc = conn_state->max_requested_bpc;
@@ -6876,7 +6882,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
                                                                    max_bpc);
                bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
                clock = adjusted_mode->clock;
-               dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+               dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
        }
 
        dm_new_connector_state->vcpi_slots =
@@ -7431,6 +7437,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
        int i;
        int result = -EIO;
 
+       if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+               return result;
+
        cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
 
        if (!cmd.payloads)
@@ -9539,14 +9548,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
        struct drm_plane *other;
        struct drm_plane_state *old_other_state, *new_other_state;
        struct drm_crtc_state *new_crtc_state;
+       struct amdgpu_device *adev = drm_to_adev(plane->dev);
        int i;
 
        /*
-        * TODO: Remove this hack once the checks below are sufficient
-        * enough to determine when we need to reset all the planes on
-        * the stream.
+        * TODO: Remove this hack for all asics once it proves that the
+        * fast updates works fine on DCN3.2+.
         */
-       if (state->allow_modeset)
+       if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
                return true;
 
        /* Exit early if we know that we're adding or removing the plane. */
@@ -9892,16 +9901,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
        }
 }
 
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+                  int *out_plane_scale_w, int *out_plane_scale_h)
+{
+       int plane_src_w, plane_src_h;
+
+       dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+       *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+       *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
                                struct drm_crtc *crtc,
                                struct drm_crtc_state *new_crtc_state)
 {
-       struct drm_plane *cursor = crtc->cursor, *underlying;
+       struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+       struct drm_plane_state *old_plane_state, *new_plane_state;
        struct drm_plane_state *new_cursor_state, *new_underlying_state;
        int i;
        int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
-       int cursor_src_w, cursor_src_h;
-       int underlying_src_w, underlying_src_h;
+       bool any_relevant_change = false;
 
        /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
         * cursor per pipe but it's going to inherit the scaling and
@@ -9909,13 +9929,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
         * blending properties match the underlying planes'.
         */
 
-       new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
-       if (!new_cursor_state || !new_cursor_state->fb)
+       /* If no plane was enabled or changed scaling, no need to check again */
+       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+               int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+               if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+                       continue;
+
+               if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+                       any_relevant_change = true;
+                       break;
+               }
+
+               if (new_plane_state->fb == old_plane_state->fb &&
+                   new_plane_state->crtc_w == old_plane_state->crtc_w &&
+                   new_plane_state->crtc_h == old_plane_state->crtc_h)
+                       continue;
+
+               dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+               dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+               if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+                       any_relevant_change = true;
+                       break;
+               }
+       }
+
+       if (!any_relevant_change)
                return 0;
 
-       dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
-       cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
-       cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
+       new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+       if (IS_ERR(new_cursor_state))
+               return PTR_ERR(new_cursor_state);
+
+       if (!new_cursor_state->fb)
+               return 0;
+
+       dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+       /* Need to check all enabled planes, even if this commit doesn't change
+        * their state
+        */
+       i = drm_atomic_add_affected_planes(state, crtc);
+       if (i)
+               return i;
 
        for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
                /* Narrow down to non-cursor planes on the same CRTC as the cursor */
@@ -9926,10 +9983,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
                if (!new_underlying_state->fb)
                        continue;
 
-               dm_get_oriented_plane_size(new_underlying_state,
-                                          &underlying_src_w, &underlying_src_h);
-               underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
-               underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
+               dm_get_plane_scale(new_underlying_state,
+                                  &underlying_scale_w, &underlying_scale_h);
 
                if (cursor_scale_w != underlying_scale_w ||
                    cursor_scale_h != underlying_scale_h) {