From a9fa161b8356a16048e77ad02acfb339d247d6d3 Mon Sep 17 00:00:00 2001 From: Wesley Chalmers Date: Thu, 3 Nov 2022 22:29:31 -0400 Subject: [PATCH] drm/amd/display: Do not set drr on pipe commit [ Upstream commit e101bf95ea87ccc03ac2f48dfc0757c6364ff3c7 ] [WHY] Writing to DRR registers such as OTG_V_TOTAL_MIN on the same frame as a pipe commit can cause underflow. [HOW] Move DMUB p-state delegate into optimze_bandwidth; enabling FAMS sets optimized_required. This change expects that Freesync requests are blocked when optimized_required is true. Reviewed-by: Rodrigo Siqueira Signed-off-by: Wesley Chalmers Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4ef6328..21fae81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2032,6 +2032,12 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index a1b3124..c97d3e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -987,11 +987,18 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + + dc_dmub_srv_p_state_delegate(dc, false, context); } -- 2.7.4