2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
58 #include "dce/dmub_outbox.h"
59 #include "inc/dc_link_dp.h"
60 #include "inc/link_dpcd.h"
62 #define DC_LOGGER_INIT(logger)
70 #define FN(reg_name, field_name) \
71 hws->shifts->field_name, hws->masks->field_name
73 /*print is 17 wide, first two characters are spaces*/
74 #define DTN_INFO_MICRO_SEC(ref_cycle) \
75 print_microsec(dc_ctx, log_ctx, ref_cycle)
77 #define GAMMA_HW_POINTS_NUM 256
79 #define PGFSM_POWER_ON 0
80 #define PGFSM_POWER_OFF 2
82 static void print_microsec(struct dc_context *dc_ctx,
83 struct dc_log_buffer_ctx *log_ctx,
86 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
87 static const unsigned int frac = 1000;
88 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
90 DTN_INFO(" %11d.%03d",
95 void dcn10_lock_all_pipes(struct dc *dc,
96 struct dc_state *context,
99 struct pipe_ctx *pipe_ctx;
100 struct timing_generator *tg;
103 for (i = 0; i < dc->res_pool->pipe_count; i++) {
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
111 if (pipe_ctx->top_pipe ||
113 !tg->funcs->is_tg_enabled(tg))
117 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
123 static void log_mpc_crc(struct dc *dc,
124 struct dc_log_buffer_ctx *log_ctx)
126 struct dc_context *dc_ctx = dc->ctx;
127 struct dce_hwseq *hws = dc->hwseq;
129 if (REG(MPC_CRC_RESULT_GB))
130 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
131 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
132 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
133 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
134 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
137 static void dcn10_log_hubbub_state(struct dc *dc,
138 struct dc_log_buffer_ctx *log_ctx)
140 struct dc_context *dc_ctx = dc->ctx;
141 struct dcn_hubbub_wm wm;
144 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
145 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
148 " sr_enter sr_exit dram_clk_change\n");
150 for (i = 0; i < 4; i++) {
151 struct dcn_hubbub_wm_set *s;
154 DTN_INFO("WM_Set[%d]:", s->wm_set);
155 DTN_INFO_MICRO_SEC(s->data_urgent);
156 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
157 DTN_INFO_MICRO_SEC(s->sr_enter);
158 DTN_INFO_MICRO_SEC(s->sr_exit);
159 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
166 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 struct dc_context *dc_ctx = dc->ctx;
169 struct resource_pool *pool = dc->res_pool;
173 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
174 for (i = 0; i < pool->pipe_count; i++) {
175 struct hubp *hubp = pool->hubps[i];
176 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178 hubp->funcs->hubp_read_state(hubp);
181 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
194 s->underflow_status);
195 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
196 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
197 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
202 DTN_INFO("\n=========RQ========\n");
203 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
204 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
206 for (i = 0; i < pool->pipe_count; i++) {
207 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
208 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
211 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
212 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
213 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
214 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
215 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
216 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
217 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
218 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
219 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
220 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
223 DTN_INFO("========DLG========\n");
224 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
225 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
226 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
227 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
228 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
229 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
230 " x_rp_dlay x_rr_sfl\n");
231 for (i = 0; i < pool->pipe_count; i++) {
232 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
233 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
236 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
237 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
239 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
240 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
241 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
242 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
243 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
244 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
245 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
246 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
247 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
248 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
249 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
250 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
251 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
252 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
253 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
254 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
255 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
256 dlg_regs->xfc_reg_remote_surface_flip_latency);
259 DTN_INFO("========TTU========\n");
260 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
261 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
262 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
263 for (i = 0; i < pool->pipe_count; i++) {
264 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
265 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
268 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
269 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
270 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
271 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
272 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
273 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
274 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
275 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
280 void dcn10_log_hw_state(struct dc *dc,
281 struct dc_log_buffer_ctx *log_ctx)
283 struct dc_context *dc_ctx = dc->ctx;
284 struct resource_pool *pool = dc->res_pool;
289 dcn10_log_hubbub_state(dc, log_ctx);
291 dcn10_log_hubp_states(dc, log_ctx);
293 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
294 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
295 "C31 C32 C33 C34\n");
296 for (i = 0; i < pool->pipe_count; i++) {
297 struct dpp *dpp = pool->dpps[i];
298 struct dcn_dpp_state s = {0};
300 dpp->funcs->dpp_read_state(dpp, &s);
305 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
306 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
309 (s.igam_lut_mode == 0) ? "BypassFixed" :
310 ((s.igam_lut_mode == 1) ? "BypassFloat" :
311 ((s.igam_lut_mode == 2) ? "RAM" :
312 ((s.igam_lut_mode == 3) ? "RAM" :
314 (s.dgam_lut_mode == 0) ? "Bypass" :
315 ((s.dgam_lut_mode == 1) ? "sRGB" :
316 ((s.dgam_lut_mode == 2) ? "Ycc" :
317 ((s.dgam_lut_mode == 3) ? "RAM" :
318 ((s.dgam_lut_mode == 4) ? "RAM" :
320 (s.rgam_lut_mode == 0) ? "Bypass" :
321 ((s.rgam_lut_mode == 1) ? "sRGB" :
322 ((s.rgam_lut_mode == 2) ? "Ycc" :
323 ((s.rgam_lut_mode == 3) ? "RAM" :
324 ((s.rgam_lut_mode == 4) ? "RAM" :
327 s.gamut_remap_c11_c12,
328 s.gamut_remap_c13_c14,
329 s.gamut_remap_c21_c22,
330 s.gamut_remap_c23_c24,
331 s.gamut_remap_c31_c32,
332 s.gamut_remap_c33_c34);
337 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
338 for (i = 0; i < pool->pipe_count; i++) {
339 struct mpcc_state s = {0};
341 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
344 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
345 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
350 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
352 for (i = 0; i < pool->timing_generator_count; i++) {
353 struct timing_generator *tg = pool->timing_generators[i];
354 struct dcn_otg_state s = {0};
355 /* Read shared OTG state registers for all DCNx */
356 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
359 * For DCN2 and greater, a register on the OPP is used to
360 * determine if the CRTC is blanked instead of the OTG. So use
361 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 * TODO: Implement DCN-specific read_otg_state hooks.
365 if (pool->opps[i]->funcs->dpg_is_blanked)
366 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 s.blank_enabled = tg->funcs->is_blanked(tg);
370 //only print if OTG master is enabled
371 if ((s.otg_enabled & 1) == 0)
374 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
392 s.underflow_occurred_status,
395 // Clear underflow for debug purposes
396 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
397 // This function is called only from Windows or Diags test environment, hence it's safe to clear
398 // it from here without affecting the original intent.
399 tg->funcs->clear_optc_underflow(tg);
403 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
404 // TODO: Update golden log header to reflect this name change
405 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
406 for (i = 0; i < pool->res_cap->num_dsc; i++) {
407 struct display_stream_compressor *dsc = pool->dscs[i];
408 struct dcn_dsc_state s = {0};
410 dsc->funcs->dsc_read_state(dsc, &s);
411 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
415 s.dsc_bits_per_pixel);
420 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
421 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
422 for (i = 0; i < pool->stream_enc_count; i++) {
423 struct stream_encoder *enc = pool->stream_enc[i];
424 struct enc_state s = {0};
426 if (enc->funcs->enc_read_state) {
427 enc->funcs->enc_read_state(enc, &s);
428 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
431 s.sec_gsp_pps_line_num,
432 s.vbid6_line_reference,
434 s.sec_gsp_pps_enable,
435 s.sec_stream_enable);
441 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
442 for (i = 0; i < dc->link_count; i++) {
443 struct link_encoder *lenc = dc->links[i]->link_enc;
445 struct link_enc_state s = {0};
447 if (lenc && lenc->funcs->read_state) {
448 lenc->funcs->read_state(lenc, &s);
449 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
452 s.dphy_fec_ready_shadow,
453 s.dphy_fec_active_status,
454 s.dp_link_training_complete);
460 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
461 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
462 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470 log_mpc_crc(dc, log_ctx);
473 if (pool->hpo_dp_stream_enc_count > 0) {
474 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
475 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
476 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
477 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
480 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
483 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
484 hpo_dp_se_state.stream_enc_enabled,
485 hpo_dp_se_state.otg_inst,
486 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
487 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
488 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
489 (hpo_dp_se_state.component_depth == 0) ? 6 :
490 ((hpo_dp_se_state.component_depth == 1) ? 8 :
491 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
492 hpo_dp_se_state.vid_stream_enabled,
493 hpo_dp_se_state.sdp_enabled,
494 hpo_dp_se_state.compressed_format,
495 hpo_dp_se_state.mapped_to_link_enc);
502 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
503 if (pool->hpo_dp_link_enc_count) {
504 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
506 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
507 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
508 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510 if (hpo_dp_link_enc->funcs->read_state) {
511 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
512 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
513 hpo_dp_link_enc->inst,
514 hpo_dp_le_state.link_enc_enabled,
515 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
516 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
517 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
518 hpo_dp_le_state.lane_count,
519 hpo_dp_le_state.stream_src[0],
520 hpo_dp_le_state.slot_count[0],
521 hpo_dp_le_state.vc_rate_x[0],
522 hpo_dp_le_state.vc_rate_y[0]);
534 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 struct hubp *hubp = pipe_ctx->plane_res.hubp;
537 struct timing_generator *tg = pipe_ctx->stream_res.tg;
539 if (tg->funcs->is_optc_underflow_occurred(tg)) {
540 tg->funcs->clear_optc_underflow(tg);
544 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
545 hubp->funcs->hubp_clear_underflow(hubp);
551 void dcn10_enable_power_gating_plane(
552 struct dce_hwseq *hws,
555 bool force_on = true; /* disable power gating */
561 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
562 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
567 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
573 void dcn10_disable_vga(
574 struct dce_hwseq *hws)
576 unsigned int in_vga1_mode = 0;
577 unsigned int in_vga2_mode = 0;
578 unsigned int in_vga3_mode = 0;
579 unsigned int in_vga4_mode = 0;
581 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
582 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
583 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
584 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
587 in_vga3_mode == 0 && in_vga4_mode == 0)
590 REG_WRITE(D1VGA_CONTROL, 0);
591 REG_WRITE(D2VGA_CONTROL, 0);
592 REG_WRITE(D3VGA_CONTROL, 0);
593 REG_WRITE(D4VGA_CONTROL, 0);
595 /* HW Engineer's Notes:
596 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
597 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
600 * VGA_TEST_ENABLE, to leave it in the same state as before.
602 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
607 * dcn10_dpp_pg_control - DPP power gate control.
609 * @hws: dce_hwseq reference.
610 * @dpp_inst: DPP instance reference.
611 * @power_on: true if we want to enable power gate, false otherwise.
613 * Enable or disable power gate in the specific DPP instance.
615 void dcn10_dpp_pg_control(
616 struct dce_hwseq *hws,
617 unsigned int dpp_inst,
620 uint32_t power_gate = power_on ? 0 : 1;
621 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623 if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 if (REG(DOMAIN1_PG_CONFIG) == 0)
630 REG_UPDATE(DOMAIN1_PG_CONFIG,
631 DOMAIN1_POWER_GATE, power_gate);
633 REG_WAIT(DOMAIN1_PG_STATUS,
634 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
638 REG_UPDATE(DOMAIN3_PG_CONFIG,
639 DOMAIN3_POWER_GATE, power_gate);
641 REG_WAIT(DOMAIN3_PG_STATUS,
642 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
646 REG_UPDATE(DOMAIN5_PG_CONFIG,
647 DOMAIN5_POWER_GATE, power_gate);
649 REG_WAIT(DOMAIN5_PG_STATUS,
650 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
654 REG_UPDATE(DOMAIN7_PG_CONFIG,
655 DOMAIN7_POWER_GATE, power_gate);
657 REG_WAIT(DOMAIN7_PG_STATUS,
658 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
668 * dcn10_hubp_pg_control - HUBP power gate control.
670 * @hws: dce_hwseq reference.
671 * @hubp_inst: DPP instance reference.
672 * @power_on: true if we want to enable power gate, false otherwise.
674 * Enable or disable power gate in the specific HUBP instance.
676 void dcn10_hubp_pg_control(
677 struct dce_hwseq *hws,
678 unsigned int hubp_inst,
681 uint32_t power_gate = power_on ? 0 : 1;
682 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 if (REG(DOMAIN0_PG_CONFIG) == 0)
690 case 0: /* DCHUBP0 */
691 REG_UPDATE(DOMAIN0_PG_CONFIG,
692 DOMAIN0_POWER_GATE, power_gate);
694 REG_WAIT(DOMAIN0_PG_STATUS,
695 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
698 case 1: /* DCHUBP1 */
699 REG_UPDATE(DOMAIN2_PG_CONFIG,
700 DOMAIN2_POWER_GATE, power_gate);
702 REG_WAIT(DOMAIN2_PG_STATUS,
703 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
706 case 2: /* DCHUBP2 */
707 REG_UPDATE(DOMAIN4_PG_CONFIG,
708 DOMAIN4_POWER_GATE, power_gate);
710 REG_WAIT(DOMAIN4_PG_STATUS,
711 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
714 case 3: /* DCHUBP3 */
715 REG_UPDATE(DOMAIN6_PG_CONFIG,
716 DOMAIN6_POWER_GATE, power_gate);
718 REG_WAIT(DOMAIN6_PG_STATUS,
719 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
728 static void power_on_plane(
729 struct dce_hwseq *hws,
732 DC_LOGGER_INIT(hws->ctx->logger);
733 if (REG(DC_IP_REQUEST_CNTL)) {
734 REG_SET(DC_IP_REQUEST_CNTL, 0,
737 if (hws->funcs.dpp_pg_control)
738 hws->funcs.dpp_pg_control(hws, plane_id, true);
740 if (hws->funcs.hubp_pg_control)
741 hws->funcs.hubp_pg_control(hws, plane_id, true);
743 REG_SET(DC_IP_REQUEST_CNTL, 0,
746 "Un-gated front end for pipe %d\n", plane_id);
750 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
752 struct dce_hwseq *hws = dc->hwseq;
753 struct hubp *hubp = dc->res_pool->hubps[0];
755 if (!hws->wa_state.DEGVIDCN10_253_applied)
758 hubp->funcs->set_blank(hubp, true);
760 REG_SET(DC_IP_REQUEST_CNTL, 0,
763 hws->funcs.hubp_pg_control(hws, 0, false);
764 REG_SET(DC_IP_REQUEST_CNTL, 0,
767 hws->wa_state.DEGVIDCN10_253_applied = false;
770 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
772 struct dce_hwseq *hws = dc->hwseq;
773 struct hubp *hubp = dc->res_pool->hubps[0];
776 if (dc->debug.disable_stutter)
779 if (!hws->wa.DEGVIDCN10_253)
782 for (i = 0; i < dc->res_pool->pipe_count; i++) {
783 if (!dc->res_pool->hubps[i]->power_gated)
787 /* all pipe power gated, apply work around to enable stutter. */
789 REG_SET(DC_IP_REQUEST_CNTL, 0,
792 hws->funcs.hubp_pg_control(hws, 0, true);
793 REG_SET(DC_IP_REQUEST_CNTL, 0,
796 hubp->funcs->set_hubp_blank_en(hubp, false);
797 hws->wa_state.DEGVIDCN10_253_applied = true;
800 void dcn10_bios_golden_init(struct dc *dc)
802 struct dce_hwseq *hws = dc->hwseq;
803 struct dc_bios *bp = dc->ctx->dc_bios;
805 bool allow_self_fresh_force_enable = true;
807 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
810 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
811 allow_self_fresh_force_enable =
812 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
815 /* WA for making DF sleep when idle after resume from S0i3.
816 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
817 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
818 * before calling command table and it changed to 1 after,
819 * it should be set back to 0.
822 /* initialize dcn global */
823 bp->funcs->enable_disp_power_gating(bp,
824 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
826 for (i = 0; i < dc->res_pool->pipe_count; i++) {
827 /* initialize dcn per pipe */
828 bp->funcs->enable_disp_power_gating(bp,
829 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
832 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
833 if (allow_self_fresh_force_enable == false &&
834 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
835 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
836 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
840 static void false_optc_underflow_wa(
842 const struct dc_stream_state *stream,
843 struct timing_generator *tg)
848 if (!dc->hwseq->wa.false_optc_underflow)
851 underflow = tg->funcs->is_optc_underflow_occurred(tg);
853 for (i = 0; i < dc->res_pool->pipe_count; i++) {
854 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
856 if (old_pipe_ctx->stream != stream)
859 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
862 if (tg->funcs->set_blank_data_double_buffer)
863 tg->funcs->set_blank_data_double_buffer(tg, true);
865 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
866 tg->funcs->clear_optc_underflow(tg);
869 enum dc_status dcn10_enable_stream_timing(
870 struct pipe_ctx *pipe_ctx,
871 struct dc_state *context,
874 struct dc_stream_state *stream = pipe_ctx->stream;
875 enum dc_color_space color_space;
876 struct tg_color black_color = {0};
878 /* by upper caller loop, pipe0 is parent pipe and be called first.
879 * back end is set up by for pipe0. Other children pipe share back end
880 * with pipe 0. No program is needed.
882 if (pipe_ctx->top_pipe != NULL)
885 /* TODO check if timing_changed, disable stream if timing changed */
887 /* HW program guide assume display already disable
888 * by unplug sequence. OTG assume stop.
890 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
892 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
893 pipe_ctx->clock_source,
894 &pipe_ctx->stream_res.pix_clk_params,
895 dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
896 &pipe_ctx->pll_settings)) {
898 return DC_ERROR_UNEXPECTED;
901 pipe_ctx->stream_res.tg->funcs->program_timing(
902 pipe_ctx->stream_res.tg,
904 pipe_ctx->pipe_dlg_param.vready_offset,
905 pipe_ctx->pipe_dlg_param.vstartup_start,
906 pipe_ctx->pipe_dlg_param.vupdate_offset,
907 pipe_ctx->pipe_dlg_param.vupdate_width,
908 pipe_ctx->stream->signal,
911 #if 0 /* move to after enable_crtc */
912 /* TODO: OPP FMT, ABM. etc. should be done here. */
913 /* or FPGA now. instance 0 only. TODO: move to opp.c */
915 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
917 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
918 pipe_ctx->stream_res.opp,
919 &stream->bit_depth_params,
922 /* program otg blank color */
923 color_space = stream->output_color_space;
924 color_space_to_black_color(dc, color_space, &black_color);
927 * The way 420 is packed, 2 channels carry Y component, 1 channel
928 * alternate between Cb and Cr, so both channels need the pixel
931 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
932 black_color.color_r_cr = black_color.color_g_y;
934 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
935 pipe_ctx->stream_res.tg->funcs->set_blank_color(
936 pipe_ctx->stream_res.tg,
939 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
940 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
941 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
942 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
943 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
946 /* VTG is within DCHUB command block. DCFCLK is always on */
947 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
949 return DC_ERROR_UNEXPECTED;
952 /* TODO program crtc source select for non-virtual signal*/
953 /* TODO program FMT */
954 /* TODO setup link_enc */
955 /* TODO set stream attributes */
956 /* TODO program audio */
957 /* TODO enable stream if timing changed */
958 /* TODO unblank stream if DP */
963 static void dcn10_reset_back_end_for_pipe(
965 struct pipe_ctx *pipe_ctx,
966 struct dc_state *context)
969 struct dc_link *link;
970 DC_LOGGER_INIT(dc->ctx->logger);
971 if (pipe_ctx->stream_res.stream_enc == NULL) {
972 pipe_ctx->stream = NULL;
976 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
977 link = pipe_ctx->stream->link;
978 /* DPMS may already disable or */
979 /* dpms_off status is incorrect due to fastboot
980 * feature. When system resume from S4 with second
981 * screen only, the dpms_off would be true but
982 * VBIOS lit up eDP, so check link status too.
984 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
985 core_link_disable_stream(pipe_ctx);
986 else if (pipe_ctx->stream_res.audio)
987 dc->hwss.disable_audio_stream(pipe_ctx);
989 if (pipe_ctx->stream_res.audio) {
990 /*disable az_endpoint*/
991 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
994 if (dc->caps.dynamic_audio == true) {
995 /*we have to dynamic arbitrate the audio endpoints*/
996 /*we free the resource, need reset is_audio_acquired*/
997 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
998 pipe_ctx->stream_res.audio, false);
999 pipe_ctx->stream_res.audio = NULL;
1004 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1005 * back end share by all pipes and will be disable only when disable
1008 if (pipe_ctx->top_pipe == NULL) {
1010 if (pipe_ctx->stream_res.abm)
1011 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1013 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1015 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1016 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1017 pipe_ctx->stream_res.tg->funcs->set_drr(
1018 pipe_ctx->stream_res.tg, NULL);
1021 for (i = 0; i < dc->res_pool->pipe_count; i++)
1022 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1025 if (i == dc->res_pool->pipe_count)
1028 pipe_ctx->stream = NULL;
1029 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1030 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1033 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1037 bool need_recover = true;
1039 if (!dc->debug.recovery_enabled)
1042 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1043 struct pipe_ctx *pipe_ctx =
1044 &dc->current_state->res_ctx.pipe_ctx[i];
1045 if (pipe_ctx != NULL) {
1046 hubp = pipe_ctx->plane_res.hubp;
1047 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1048 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1049 /* one pipe underflow, we will reset all the pipes*/
1050 need_recover = true;
1058 DCHUBP_CNTL:HUBP_BLANK_EN=1
1059 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1060 DCHUBP_CNTL:HUBP_DISABLE=1
1061 DCHUBP_CNTL:HUBP_DISABLE=0
1062 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1063 DCSURF_PRIMARY_SURFACE_ADDRESS
1064 DCHUBP_CNTL:HUBP_BLANK_EN=0
1067 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1068 struct pipe_ctx *pipe_ctx =
1069 &dc->current_state->res_ctx.pipe_ctx[i];
1070 if (pipe_ctx != NULL) {
1071 hubp = pipe_ctx->plane_res.hubp;
1072 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1073 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1074 hubp->funcs->set_hubp_blank_en(hubp, true);
1077 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1078 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1080 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1081 struct pipe_ctx *pipe_ctx =
1082 &dc->current_state->res_ctx.pipe_ctx[i];
1083 if (pipe_ctx != NULL) {
1084 hubp = pipe_ctx->plane_res.hubp;
1085 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1086 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1087 hubp->funcs->hubp_disable_control(hubp, true);
1090 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1091 struct pipe_ctx *pipe_ctx =
1092 &dc->current_state->res_ctx.pipe_ctx[i];
1093 if (pipe_ctx != NULL) {
1094 hubp = pipe_ctx->plane_res.hubp;
1095 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1096 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1097 hubp->funcs->hubp_disable_control(hubp, true);
1100 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1101 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1103 struct pipe_ctx *pipe_ctx =
1104 &dc->current_state->res_ctx.pipe_ctx[i];
1105 if (pipe_ctx != NULL) {
1106 hubp = pipe_ctx->plane_res.hubp;
1107 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1108 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1109 hubp->funcs->set_hubp_blank_en(hubp, true);
1116 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1118 struct hubbub *hubbub = dc->res_pool->hubbub;
1119 static bool should_log_hw_state; /* prevent hw state log by default */
1121 if (!hubbub->funcs->verify_allow_pstate_change_high)
1124 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1127 if (should_log_hw_state)
1128 dcn10_log_hw_state(dc, NULL);
1130 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1131 BREAK_TO_DEBUGGER();
1132 if (dcn10_hw_wa_force_recovery(dc)) {
1134 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1135 BREAK_TO_DEBUGGER();
1140 /* trigger HW to start disconnect plane from stream on the next vsync */
1141 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1143 struct dce_hwseq *hws = dc->hwseq;
1144 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1145 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1146 struct mpc *mpc = dc->res_pool->mpc;
1147 struct mpc_tree *mpc_tree_params;
1148 struct mpcc *mpcc_to_remove = NULL;
1149 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1151 mpc_tree_params = &(opp->mpc_tree_params);
1152 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1155 if (mpcc_to_remove == NULL)
1158 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1159 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1160 // so don't wait for MPCC_IDLE in the programming sequence
1161 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1162 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1164 dc->optimized_required = true;
1166 if (hubp->funcs->hubp_disconnect)
1167 hubp->funcs->hubp_disconnect(hubp);
1169 if (dc->debug.sanity_checks)
1170 hws->funcs.verify_allow_pstate_change_high(dc);
1174 * dcn10_plane_atomic_power_down - Power down plane components.
1176 * @dc: dc struct reference. used for grab hwseq.
1177 * @dpp: dpp struct reference.
1178 * @hubp: hubp struct reference.
1180 * Keep in mind that this operation requires a power gate configuration;
1181 * however, requests for switch power gate are precisely controlled to avoid
1182 * problems. For this reason, power gate request is usually disabled. This
1183 * function first needs to enable the power gate request before disabling DPP
1184 * and HUBP. Finally, it disables the power gate request again.
1186 void dcn10_plane_atomic_power_down(struct dc *dc,
1190 struct dce_hwseq *hws = dc->hwseq;
1191 DC_LOGGER_INIT(dc->ctx->logger);
1193 if (REG(DC_IP_REQUEST_CNTL)) {
1194 REG_SET(DC_IP_REQUEST_CNTL, 0,
1197 if (hws->funcs.dpp_pg_control)
1198 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1200 if (hws->funcs.hubp_pg_control)
1201 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1203 dpp->funcs->dpp_reset(dpp);
1204 REG_SET(DC_IP_REQUEST_CNTL, 0,
1207 "Power gated front end %d\n", hubp->inst);
1211 /* disable HW used by plane.
1212 * note: cannot disable until disconnect is complete
1214 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1216 struct dce_hwseq *hws = dc->hwseq;
1217 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1218 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1219 int opp_id = hubp->opp_id;
1221 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1223 hubp->funcs->hubp_clk_cntl(hubp, false);
1225 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1227 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1228 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1229 pipe_ctx->stream_res.opp,
1232 hubp->power_gated = true;
1233 dc->optimized_required = false; /* We're powering off, no need to optimize */
1235 hws->funcs.plane_atomic_power_down(dc,
1236 pipe_ctx->plane_res.dpp,
1237 pipe_ctx->plane_res.hubp);
1239 pipe_ctx->stream = NULL;
1240 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1241 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1242 pipe_ctx->top_pipe = NULL;
1243 pipe_ctx->bottom_pipe = NULL;
1244 pipe_ctx->plane_state = NULL;
1247 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1249 struct dce_hwseq *hws = dc->hwseq;
1250 DC_LOGGER_INIT(dc->ctx->logger);
1252 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1255 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1257 apply_DEGVIDCN10_253_wa(dc);
1259 DC_LOG_DC("Power down front end %d\n",
1260 pipe_ctx->pipe_idx);
1263 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1266 struct dce_hwseq *hws = dc->hwseq;
1267 struct hubbub *hubbub = dc->res_pool->hubbub;
1268 bool can_apply_seamless_boot = false;
1270 for (i = 0; i < context->stream_count; i++) {
1271 if (context->streams[i]->apply_seamless_boot_optimization) {
1272 can_apply_seamless_boot = true;
1277 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1278 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1279 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1281 /* There is assumption that pipe_ctx is not mapping irregularly
1282 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1283 * we will use the pipe, so don't disable
1285 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1288 /* Blank controller using driver code instead of
1291 if (tg->funcs->is_tg_enabled(tg)) {
1292 if (hws->funcs.init_blank != NULL) {
1293 hws->funcs.init_blank(dc, tg);
1294 tg->funcs->lock(tg);
1296 tg->funcs->lock(tg);
1297 tg->funcs->set_blank(tg, true);
1298 hwss_wait_for_blank_complete(tg);
1303 /* Reset det size */
1304 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1305 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1306 struct hubp *hubp = dc->res_pool->hubps[i];
1308 /* Do not need to reset for seamless boot */
1309 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1312 if (hubbub && hubp) {
1313 if (hubbub->funcs->program_det_size)
1314 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1318 /* num_opp will be equal to number of mpcc */
1319 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1320 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1322 /* Cannot reset the MPC mux if seamless boot */
1323 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1326 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1327 dc->res_pool->mpc, i);
1330 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1331 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1332 struct hubp *hubp = dc->res_pool->hubps[i];
1333 struct dpp *dpp = dc->res_pool->dpps[i];
1334 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1336 /* There is assumption that pipe_ctx is not mapping irregularly
1337 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1338 * we will use the pipe, so don't disable
1340 if (can_apply_seamless_boot &&
1341 pipe_ctx->stream != NULL &&
1342 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1343 pipe_ctx->stream_res.tg)) {
1344 // Enable double buffering for OTG_BLANK no matter if
1345 // seamless boot is enabled or not to suppress global sync
1346 // signals when OTG blanked. This is to prevent pipe from
1347 // requesting data while in PSR.
1348 tg->funcs->tg_init(tg);
1349 hubp->power_gated = true;
1353 /* Disable on the current state so the new one isn't cleared. */
1354 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1356 dpp->funcs->dpp_reset(dpp);
1358 pipe_ctx->stream_res.tg = tg;
1359 pipe_ctx->pipe_idx = i;
1361 pipe_ctx->plane_res.hubp = hubp;
1362 pipe_ctx->plane_res.dpp = dpp;
1363 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1364 hubp->mpcc_id = dpp->inst;
1365 hubp->opp_id = OPP_ID_INVALID;
1366 hubp->power_gated = false;
1368 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1369 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1370 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1371 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1373 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1375 if (tg->funcs->is_tg_enabled(tg))
1376 tg->funcs->unlock(tg);
1378 dc->hwss.disable_plane(dc, pipe_ctx);
1380 pipe_ctx->stream_res.tg = NULL;
1381 pipe_ctx->plane_res.hubp = NULL;
1383 if (tg->funcs->is_tg_enabled(tg)) {
1384 if (tg->funcs->init_odm)
1385 tg->funcs->init_odm(tg);
1388 tg->funcs->tg_init(tg);
1391 /* Power gate DSCs */
1392 if (hws->funcs.dsc_pg_control != NULL) {
1393 uint32_t num_opps = 0;
1394 uint32_t opp_id_src0 = OPP_ID_INVALID;
1395 uint32_t opp_id_src1 = OPP_ID_INVALID;
1397 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1398 // We can't use res_pool->res_cap->num_timing_generator to check
1399 // Because it records display pipes default setting built in driver,
1400 // not display pipes of the current chip.
1401 // Some ASICs would be fused display pipes less than the default setting.
1402 // In dcnxx_resource_construct function, driver would obatin real information.
1403 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1404 uint32_t optc_dsc_state = 0;
1405 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1407 if (tg->funcs->is_tg_enabled(tg)) {
1408 if (tg->funcs->get_dsc_status)
1409 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1410 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1411 // non-zero value is DSC enabled
1412 if (optc_dsc_state != 0) {
1413 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1419 // Step 2: To power down DSC but skip DSC of running OPTC
1420 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1421 struct dcn_dsc_state s = {0};
1423 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1425 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1426 s.dsc_clock_en && s.dsc_fw_en)
1429 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1434 void dcn10_init_hw(struct dc *dc)
1437 struct abm *abm = dc->res_pool->abm;
1438 struct dmcu *dmcu = dc->res_pool->dmcu;
1439 struct dce_hwseq *hws = dc->hwseq;
1440 struct dc_bios *dcb = dc->ctx->dc_bios;
1441 struct resource_pool *res_pool = dc->res_pool;
1442 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1443 bool is_optimized_init_done = false;
1445 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1446 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1448 /* Align bw context with hw config when system resume. */
1449 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1450 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1451 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1454 // Initialize the dccg
1455 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1456 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1458 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1460 REG_WRITE(REFCLK_CNTL, 0);
1461 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1462 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1464 if (!dc->debug.disable_clock_gate) {
1465 /* enable all DCN clock gating */
1466 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1468 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1470 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1473 //Enable ability to power gate / don't force power on permanently
1474 if (hws->funcs.enable_power_gating_plane)
1475 hws->funcs.enable_power_gating_plane(hws, true);
1480 if (!dcb->funcs->is_accelerated_mode(dcb))
1481 hws->funcs.disable_vga(dc->hwseq);
1483 hws->funcs.bios_golden_init(dc);
1485 if (dc->ctx->dc_bios->fw_info_valid) {
1486 res_pool->ref_clocks.xtalin_clock_inKhz =
1487 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1489 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1490 if (res_pool->dccg && res_pool->hubbub) {
1492 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1493 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1494 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1496 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1497 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1498 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1500 // Not all ASICs have DCCG sw component
1501 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1502 res_pool->ref_clocks.xtalin_clock_inKhz;
1503 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1504 res_pool->ref_clocks.xtalin_clock_inKhz;
1508 ASSERT_CRITICAL(false);
1510 for (i = 0; i < dc->link_count; i++) {
1511 /* Power up AND update implementation according to the
1512 * required signal (which may be different from the
1513 * default signal on connector).
1515 struct dc_link *link = dc->links[i];
1517 if (!is_optimized_init_done)
1518 link->link_enc->funcs->hw_init(link->link_enc);
1520 /* Check for enabled DIG to identify enabled display */
1521 if (link->link_enc->funcs->is_dig_enabled &&
1522 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1523 link->link_status.link_active = true;
1524 if (link->link_enc->funcs->fec_is_active &&
1525 link->link_enc->funcs->fec_is_active(link->link_enc))
1526 link->fec_state = dc_link_fec_enabled;
1530 /* we want to turn off all dp displays before doing detection */
1531 dc_link_blank_all_dp_displays(dc);
1533 if (hws->funcs.enable_power_gating_plane)
1534 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1536 /* If taking control over from VBIOS, we may want to optimize our first
1537 * mode set, so we need to skip powering down pipes until we know which
1538 * pipes we want to use.
1539 * Otherwise, if taking control is not possible, we need to power
1542 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1543 if (!is_optimized_init_done) {
1544 hws->funcs.init_pipes(dc, dc->current_state);
1545 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1546 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1547 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1551 if (!is_optimized_init_done) {
1553 for (i = 0; i < res_pool->audio_count; i++) {
1554 struct audio *audio = res_pool->audios[i];
1556 audio->funcs->hw_init(audio);
1559 for (i = 0; i < dc->link_count; i++) {
1560 struct dc_link *link = dc->links[i];
1562 if (link->panel_cntl)
1563 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1567 abm->funcs->abm_init(abm, backlight);
1569 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1570 dmcu->funcs->dmcu_init(dmcu);
1573 if (abm != NULL && dmcu != NULL)
1574 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1576 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1577 if (!is_optimized_init_done)
1578 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1580 if (!dc->debug.disable_clock_gate) {
1581 /* enable all DCN clock gating */
1582 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1584 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1586 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1589 if (dc->clk_mgr->funcs->notify_wm_ranges)
1590 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1593 /* In headless boot cases, DIG may be turned
1594 * on which causes HW/SW discrepancies.
1595 * To avoid this, power down hardware on boot
1596 * if DIG is turned on
1598 void dcn10_power_down_on_boot(struct dc *dc)
1600 struct dc_link *edp_links[MAX_NUM_EDP];
1601 struct dc_link *edp_link = NULL;
1605 get_edp_links(dc, edp_links, &edp_num);
1607 edp_link = edp_links[0];
1609 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1610 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1611 dc->hwseq->funcs.edp_backlight_control &&
1612 dc->hwss.power_down &&
1613 dc->hwss.edp_power_control) {
1614 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1615 dc->hwss.power_down(dc);
1616 dc->hwss.edp_power_control(edp_link, false);
1618 for (i = 0; i < dc->link_count; i++) {
1619 struct dc_link *link = dc->links[i];
1621 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1622 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1623 dc->hwss.power_down) {
1624 dc->hwss.power_down(dc);
1632 * Call update_clocks with empty context
1633 * to send DISPLAY_OFF
1634 * Otherwise DISPLAY_OFF may not be asserted
1636 if (dc->clk_mgr->funcs->set_low_power_state)
1637 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1640 void dcn10_reset_hw_ctx_wrap(
1642 struct dc_state *context)
1645 struct dce_hwseq *hws = dc->hwseq;
1648 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1649 struct pipe_ctx *pipe_ctx_old =
1650 &dc->current_state->res_ctx.pipe_ctx[i];
1651 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1653 if (!pipe_ctx_old->stream)
1656 if (pipe_ctx_old->top_pipe)
1659 if (!pipe_ctx->stream ||
1660 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1661 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1663 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1664 if (hws->funcs.enable_stream_gating)
1665 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1667 old_clk->funcs->cs_power_down(old_clk);
1672 static bool patch_address_for_sbs_tb_stereo(
1673 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1675 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1676 bool sec_split = pipe_ctx->top_pipe &&
1677 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1678 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1679 (pipe_ctx->stream->timing.timing_3d_format ==
1680 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1681 pipe_ctx->stream->timing.timing_3d_format ==
1682 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1683 *addr = plane_state->address.grph_stereo.left_addr;
1684 plane_state->address.grph_stereo.left_addr =
1685 plane_state->address.grph_stereo.right_addr;
1688 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1689 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1690 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1691 plane_state->address.grph_stereo.right_addr =
1692 plane_state->address.grph_stereo.left_addr;
1693 plane_state->address.grph_stereo.right_meta_addr =
1694 plane_state->address.grph_stereo.left_meta_addr;
1700 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1702 bool addr_patched = false;
1703 PHYSICAL_ADDRESS_LOC addr;
1704 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1706 if (plane_state == NULL)
1709 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1711 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1712 pipe_ctx->plane_res.hubp,
1713 &plane_state->address,
1714 plane_state->flip_immediate);
1716 plane_state->status.requested_address = plane_state->address;
1718 if (plane_state->flip_immediate)
1719 plane_state->status.current_address = plane_state->address;
1722 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1725 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1726 const struct dc_plane_state *plane_state)
1728 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1729 const struct dc_transfer_func *tf = NULL;
1732 if (dpp_base == NULL)
1735 if (plane_state->in_transfer_func)
1736 tf = plane_state->in_transfer_func;
1738 if (plane_state->gamma_correction &&
1739 !dpp_base->ctx->dc->debug.always_use_regamma
1740 && !plane_state->gamma_correction->is_identity
1741 && dce_use_lut(plane_state->format))
1742 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1745 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1746 else if (tf->type == TF_TYPE_PREDEFINED) {
1748 case TRANSFER_FUNCTION_SRGB:
1749 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1751 case TRANSFER_FUNCTION_BT709:
1752 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1754 case TRANSFER_FUNCTION_LINEAR:
1755 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1757 case TRANSFER_FUNCTION_PQ:
1758 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1759 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1760 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1767 } else if (tf->type == TF_TYPE_BYPASS) {
1768 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1770 cm_helper_translate_curve_to_degamma_hw_format(tf,
1771 &dpp_base->degamma_params);
1772 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1773 &dpp_base->degamma_params);
1780 #define MAX_NUM_HW_POINTS 0x200
1782 static void log_tf(struct dc_context *ctx,
1783 struct dc_transfer_func *tf, uint32_t hw_points_num)
1785 // DC_LOG_GAMMA is default logging of all hw points
1786 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1787 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1790 DC_LOGGER_INIT(ctx->logger);
1791 DC_LOG_GAMMA("Gamma Correction TF");
1792 DC_LOG_ALL_GAMMA("Logging all tf points...");
1793 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1795 for (i = 0; i < hw_points_num; i++) {
1796 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1797 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1798 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1801 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1802 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1803 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1804 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1808 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1809 const struct dc_stream_state *stream)
1811 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1816 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1818 if (stream->out_transfer_func &&
1819 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1820 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1821 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1823 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1826 else if (cm_helper_translate_curve_to_hw_format(
1827 stream->out_transfer_func,
1828 &dpp->regamma_params, false)) {
1829 dpp->funcs->dpp_program_regamma_pwl(
1831 &dpp->regamma_params, OPP_REGAMMA_USER);
1833 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1835 if (stream != NULL && stream->ctx != NULL &&
1836 stream->out_transfer_func != NULL) {
1838 stream->out_transfer_func,
1839 dpp->regamma_params.hw_points_num);
1845 void dcn10_pipe_control_lock(
1847 struct pipe_ctx *pipe,
1850 struct dce_hwseq *hws = dc->hwseq;
1852 /* use TG master update lock to lock everything on the TG
1853 * therefore only top pipe need to lock
1855 if (!pipe || pipe->top_pipe)
1858 if (dc->debug.sanity_checks)
1859 hws->funcs.verify_allow_pstate_change_high(dc);
1862 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1864 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1866 if (dc->debug.sanity_checks)
1867 hws->funcs.verify_allow_pstate_change_high(dc);
1871 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1873 * Software keepout workaround to prevent cursor update locking from stalling
1874 * out cursor updates indefinitely or from old values from being retained in
1875 * the case where the viewport changes in the same frame as the cursor.
1877 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1878 * too close to VUPDATE, then stall out until VUPDATE finishes.
1880 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1881 * to avoid the need for this workaround.
1883 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1885 struct dc_stream_state *stream = pipe_ctx->stream;
1886 struct crtc_position position;
1887 uint32_t vupdate_start, vupdate_end;
1888 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1889 unsigned int us_per_line, us_vupdate;
1891 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1894 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1897 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1900 dc->hwss.get_position(&pipe_ctx, 1, &position);
1901 vpos = position.vertical_count;
1903 /* Avoid wraparound calculation issues */
1904 vupdate_start += stream->timing.v_total;
1905 vupdate_end += stream->timing.v_total;
1906 vpos += stream->timing.v_total;
1908 if (vpos <= vupdate_start) {
1909 /* VPOS is in VACTIVE or back porch. */
1910 lines_to_vupdate = vupdate_start - vpos;
1911 } else if (vpos > vupdate_end) {
1912 /* VPOS is in the front porch. */
1915 /* VPOS is in VUPDATE. */
1916 lines_to_vupdate = 0;
1919 /* Calculate time until VUPDATE in microseconds. */
1921 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1922 us_to_vupdate = lines_to_vupdate * us_per_line;
1924 /* 70 us is a conservative estimate of cursor update time*/
1925 if (us_to_vupdate > 70)
1928 /* Stall out until the cursor update completes. */
1929 if (vupdate_end < vupdate_start)
1930 vupdate_end += stream->timing.v_total;
1931 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1932 udelay(us_to_vupdate + us_vupdate);
1935 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1937 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1938 if (!pipe || pipe->top_pipe)
1941 /* Prevent cursor lock from stalling out cursor updates. */
1943 delay_cursor_until_vupdate(dc, pipe);
1945 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1946 union dmub_hw_lock_flags hw_locks = { 0 };
1947 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1949 hw_locks.bits.lock_cursor = 1;
1950 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1952 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1957 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1958 pipe->stream_res.opp->inst, lock);
1961 static bool wait_for_reset_trigger_to_occur(
1962 struct dc_context *dc_ctx,
1963 struct timing_generator *tg)
1967 /* To avoid endless loop we wait at most
1968 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1969 const uint32_t frames_to_wait_on_triggered_reset = 10;
1972 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1974 if (!tg->funcs->is_counter_moving(tg)) {
1975 DC_ERROR("TG counter is not moving!\n");
1979 if (tg->funcs->did_triggered_reset_occur(tg)) {
1981 /* usually occurs at i=1 */
1982 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1987 /* Wait for one frame. */
1988 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1989 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1993 DC_ERROR("GSL: Timeout on reset trigger!\n");
1998 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
1999 uint64_t *denominator,
2000 bool checkUint32Bounary)
2003 bool ret = checkUint32Bounary == false;
2004 uint64_t max_int32 = 0xffffffff;
2005 uint64_t num, denom;
2006 static const uint16_t prime_numbers[] = {
2007 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2008 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2009 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2010 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2011 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2012 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2013 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2014 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2015 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2016 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2017 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2018 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2019 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2020 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2021 941, 947, 953, 967, 971, 977, 983, 991, 997};
2022 int count = ARRAY_SIZE(prime_numbers);
2025 denom = *denominator;
2026 for (i = 0; i < count; i++) {
2027 uint32_t num_remainder, denom_remainder;
2028 uint64_t num_result, denom_result;
2029 if (checkUint32Bounary &&
2030 num <= max_int32 && denom <= max_int32) {
2035 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2036 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2037 if (num_remainder == 0 && denom_remainder == 0) {
2039 denom = denom_result;
2041 } while (num_remainder == 0 && denom_remainder == 0);
2044 *denominator = denom;
2048 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2050 uint32_t master_pipe_refresh_rate =
2051 pipe->stream->timing.pix_clk_100hz * 100 /
2052 pipe->stream->timing.h_total /
2053 pipe->stream->timing.v_total;
2054 return master_pipe_refresh_rate <= 30;
2057 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2058 bool account_low_refresh_rate)
2060 uint32_t clock_divider = 1;
2061 uint32_t numpipes = 1;
2063 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2066 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2069 while (pipe->next_odm_pipe) {
2070 pipe = pipe->next_odm_pipe;
2073 clock_divider *= numpipes;
2075 return clock_divider;
2078 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2079 struct pipe_ctx *grouped_pipes[])
2081 struct dc_context *dc_ctx = dc->ctx;
2082 int i, master = -1, embedded = -1;
2083 struct dc_crtc_timing *hw_crtc_timing;
2084 uint64_t phase[MAX_PIPES];
2085 uint64_t modulo[MAX_PIPES];
2088 uint32_t embedded_pix_clk_100hz;
2089 uint16_t embedded_h_total;
2090 uint16_t embedded_v_total;
2091 uint32_t dp_ref_clk_100hz =
2092 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2094 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2095 if (!hw_crtc_timing)
2098 if (dc->config.vblank_alignment_dto_params &&
2099 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2101 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2103 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2104 embedded_pix_clk_100hz =
2105 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2107 for (i = 0; i < group_size; i++) {
2108 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2109 grouped_pipes[i]->stream_res.tg,
2110 &hw_crtc_timing[i]);
2111 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2112 dc->res_pool->dp_clock_source,
2113 grouped_pipes[i]->stream_res.tg->inst,
2115 hw_crtc_timing[i].pix_clk_100hz = pclk;
2116 if (dc_is_embedded_signal(
2117 grouped_pipes[i]->stream->signal)) {
2120 phase[i] = embedded_pix_clk_100hz*100;
2121 modulo[i] = dp_ref_clk_100hz*100;
2124 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2125 hw_crtc_timing[i].h_total*
2126 hw_crtc_timing[i].v_total;
2127 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2128 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2132 if (reduceSizeAndFraction(&phase[i],
2133 &modulo[i], true) == false) {
2135 * this will help to stop reporting
2136 * this timing synchronizable
2138 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2139 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2144 for (i = 0; i < group_size; i++) {
2145 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2146 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2147 dc->res_pool->dp_clock_source,
2148 grouped_pipes[i]->stream_res.tg->inst,
2149 phase[i], modulo[i]);
2150 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2151 dc->res_pool->dp_clock_source,
2152 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2153 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2154 pclk*get_clock_divider(grouped_pipes[i], false);
2162 kfree(hw_crtc_timing);
2166 void dcn10_enable_vblanks_synchronization(
2170 struct pipe_ctx *grouped_pipes[])
2172 struct dc_context *dc_ctx = dc->ctx;
2173 struct output_pixel_processor *opp;
2174 struct timing_generator *tg;
2175 int i, width, height, master;
2177 for (i = 1; i < group_size; i++) {
2178 opp = grouped_pipes[i]->stream_res.opp;
2179 tg = grouped_pipes[i]->stream_res.tg;
2180 tg->funcs->get_otg_active_size(tg, &width, &height);
2181 if (opp->funcs->opp_program_dpg_dimensions)
2182 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2185 for (i = 0; i < group_size; i++) {
2186 if (grouped_pipes[i]->stream == NULL)
2188 grouped_pipes[i]->stream->vblank_synchronized = false;
2189 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2192 DC_SYNC_INFO("Aligning DP DTOs\n");
2194 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2196 DC_SYNC_INFO("Synchronizing VBlanks\n");
2199 for (i = 0; i < group_size; i++) {
2200 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2201 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2202 grouped_pipes[master]->stream_res.tg,
2203 grouped_pipes[i]->stream_res.tg,
2204 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2205 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2206 get_clock_divider(grouped_pipes[master], false),
2207 get_clock_divider(grouped_pipes[i], false));
2208 grouped_pipes[i]->stream->vblank_synchronized = true;
2210 grouped_pipes[master]->stream->vblank_synchronized = true;
2211 DC_SYNC_INFO("Sync complete\n");
2214 for (i = 1; i < group_size; i++) {
2215 opp = grouped_pipes[i]->stream_res.opp;
2216 tg = grouped_pipes[i]->stream_res.tg;
2217 tg->funcs->get_otg_active_size(tg, &width, &height);
2218 if (opp->funcs->opp_program_dpg_dimensions)
2219 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2223 void dcn10_enable_timing_synchronization(
2227 struct pipe_ctx *grouped_pipes[])
2229 struct dc_context *dc_ctx = dc->ctx;
2230 struct output_pixel_processor *opp;
2231 struct timing_generator *tg;
2232 int i, width, height;
2234 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2236 for (i = 1; i < group_size; i++) {
2237 opp = grouped_pipes[i]->stream_res.opp;
2238 tg = grouped_pipes[i]->stream_res.tg;
2239 tg->funcs->get_otg_active_size(tg, &width, &height);
2240 if (opp->funcs->opp_program_dpg_dimensions)
2241 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2244 for (i = 0; i < group_size; i++) {
2245 if (grouped_pipes[i]->stream == NULL)
2247 grouped_pipes[i]->stream->vblank_synchronized = false;
2250 for (i = 1; i < group_size; i++)
2251 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2252 grouped_pipes[i]->stream_res.tg,
2253 grouped_pipes[0]->stream_res.tg->inst);
2255 DC_SYNC_INFO("Waiting for trigger\n");
2257 /* Need to get only check 1 pipe for having reset as all the others are
2258 * synchronized. Look at last pipe programmed to reset.
2261 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2262 for (i = 1; i < group_size; i++)
2263 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2264 grouped_pipes[i]->stream_res.tg);
2266 for (i = 1; i < group_size; i++) {
2267 opp = grouped_pipes[i]->stream_res.opp;
2268 tg = grouped_pipes[i]->stream_res.tg;
2269 tg->funcs->get_otg_active_size(tg, &width, &height);
2270 if (opp->funcs->opp_program_dpg_dimensions)
2271 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2274 DC_SYNC_INFO("Sync complete\n");
2277 void dcn10_enable_per_frame_crtc_position_reset(
2280 struct pipe_ctx *grouped_pipes[])
2282 struct dc_context *dc_ctx = dc->ctx;
2285 DC_SYNC_INFO("Setting up\n");
2286 for (i = 0; i < group_size; i++)
2287 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2288 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2289 grouped_pipes[i]->stream_res.tg,
2291 &grouped_pipes[i]->stream->triggered_crtc_reset);
2293 DC_SYNC_INFO("Waiting for trigger\n");
2295 for (i = 0; i < group_size; i++)
2296 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2298 DC_SYNC_INFO("Multi-display sync is complete\n");
2301 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2302 struct vm_system_aperture_param *apt,
2303 struct dce_hwseq *hws)
2305 PHYSICAL_ADDRESS_LOC physical_page_number;
2306 uint32_t logical_addr_low;
2307 uint32_t logical_addr_high;
2309 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2310 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2311 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2312 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2314 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2315 LOGICAL_ADDR, &logical_addr_low);
2317 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2318 LOGICAL_ADDR, &logical_addr_high);
2320 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2321 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2322 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2325 /* Temporary read settings, future will get values from kmd directly */
2326 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2327 struct vm_context0_param *vm0,
2328 struct dce_hwseq *hws)
2330 PHYSICAL_ADDRESS_LOC fb_base;
2331 PHYSICAL_ADDRESS_LOC fb_offset;
2332 uint32_t fb_base_value;
2333 uint32_t fb_offset_value;
2335 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2336 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2338 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2339 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2340 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2341 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2343 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2344 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2345 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2346 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2348 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2349 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2350 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2351 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2353 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2354 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2355 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2356 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2359 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2360 * Therefore we need to do
2361 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2362 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2364 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2365 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2366 vm0->pte_base.quad_part += fb_base.quad_part;
2367 vm0->pte_base.quad_part -= fb_offset.quad_part;
2371 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2373 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2374 struct vm_system_aperture_param apt = {0};
2375 struct vm_context0_param vm0 = {0};
2377 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2378 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2380 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2381 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2384 static void dcn10_enable_plane(
2386 struct pipe_ctx *pipe_ctx,
2387 struct dc_state *context)
2389 struct dce_hwseq *hws = dc->hwseq;
2391 if (dc->debug.sanity_checks) {
2392 hws->funcs.verify_allow_pstate_change_high(dc);
2395 undo_DEGVIDCN10_253_wa(dc);
2397 power_on_plane(dc->hwseq,
2398 pipe_ctx->plane_res.hubp->inst);
2400 /* enable DCFCLK current DCHUB */
2401 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2403 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2404 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2405 pipe_ctx->stream_res.opp,
2408 if (dc->config.gpu_vm_support)
2409 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2411 if (dc->debug.sanity_checks) {
2412 hws->funcs.verify_allow_pstate_change_high(dc);
2415 if (!pipe_ctx->top_pipe
2416 && pipe_ctx->plane_state
2417 && pipe_ctx->plane_state->flip_int_enabled
2418 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2419 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2423 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2426 struct dpp_grph_csc_adjustment adjust;
2427 memset(&adjust, 0, sizeof(adjust));
2428 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2431 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2432 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2433 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2434 adjust.temperature_matrix[i] =
2435 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2436 } else if (pipe_ctx->plane_state &&
2437 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2438 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2439 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2440 adjust.temperature_matrix[i] =
2441 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2444 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2448 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2450 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2451 if (pipe_ctx->top_pipe) {
2452 struct pipe_ctx *top = pipe_ctx->top_pipe;
2454 while (top->top_pipe)
2455 top = top->top_pipe; // Traverse to top pipe_ctx
2456 if (top->plane_state && top->plane_state->layer_index == 0)
2457 return true; // Front MPO plane not hidden
2463 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2465 // Override rear plane RGB bias to fix MPO brightness
2466 uint16_t rgb_bias = matrix[3];
2471 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2472 matrix[3] = rgb_bias;
2473 matrix[7] = rgb_bias;
2474 matrix[11] = rgb_bias;
2477 void dcn10_program_output_csc(struct dc *dc,
2478 struct pipe_ctx *pipe_ctx,
2479 enum dc_color_space colorspace,
2483 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2484 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2486 /* MPO is broken with RGB colorspaces when OCSC matrix
2487 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2488 * Blending adds offsets from front + rear to rear plane
2490 * Fix is to set RGB bias to 0 on rear plane, top plane
2491 * black value pixels add offset instead of rear + front
2494 int16_t rgb_bias = matrix[3];
2495 // matrix[3/7/11] are all the same offset value
2497 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2498 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2500 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2504 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2505 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2509 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2511 struct dc_bias_and_scale bns_params = {0};
2513 // program the input csc
2514 dpp->funcs->dpp_setup(dpp,
2515 plane_state->format,
2516 EXPANSION_MODE_ZERO,
2517 plane_state->input_csc_color_matrix,
2518 plane_state->color_space,
2521 //set scale and bias registers
2522 build_prescale_params(&bns_params, plane_state);
2523 if (dpp->funcs->dpp_program_bias_and_scale)
2524 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2527 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2529 struct mpc *mpc = dc->res_pool->mpc;
2531 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2532 get_hdr_visual_confirm_color(pipe_ctx, color);
2533 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2534 get_surface_visual_confirm_color(pipe_ctx, color);
2535 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2536 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2538 color_space_to_black_color(
2539 dc, pipe_ctx->stream->output_color_space, color);
2541 if (mpc->funcs->set_bg_color)
2542 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2545 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2547 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2548 struct mpcc_blnd_cfg blnd_cfg = {0};
2549 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2551 struct mpcc *new_mpcc;
2552 struct mpc *mpc = dc->res_pool->mpc;
2553 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2555 blnd_cfg.overlap_only = false;
2556 blnd_cfg.global_gain = 0xff;
2558 if (per_pixel_alpha) {
2559 /* DCN1.0 has output CM before MPC which seems to screw with
2560 * pre-multiplied alpha.
2562 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2563 pipe_ctx->stream->output_color_space)
2564 && pipe_ctx->plane_state->pre_multiplied_alpha);
2565 if (pipe_ctx->plane_state->global_alpha) {
2566 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2567 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2569 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2572 blnd_cfg.pre_multiplied_alpha = false;
2573 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2576 if (pipe_ctx->plane_state->global_alpha)
2577 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2579 blnd_cfg.global_alpha = 0xff;
2583 * Note: currently there is a bug in init_hw such that
2584 * on resume from hibernate, BIOS sets up MPCC0, and
2585 * we do mpcc_remove but the mpcc cannot go to idle
2586 * after remove. This cause us to pick mpcc1 here,
2587 * which causes a pstate hang for yet unknown reason.
2589 mpcc_id = hubp->inst;
2591 /* If there is no full update, don't need to touch MPC tree*/
2592 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2593 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2594 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2598 /* check if this MPCC is already being used */
2599 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2600 /* remove MPCC if being used */
2601 if (new_mpcc != NULL)
2602 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2604 if (dc->debug.sanity_checks)
2605 mpc->funcs->assert_mpcc_idle_before_connect(
2606 dc->res_pool->mpc, mpcc_id);
2608 /* Call MPC to insert new plane */
2609 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2616 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2618 ASSERT(new_mpcc != NULL);
2619 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2620 hubp->mpcc_id = mpcc_id;
2623 static void update_scaler(struct pipe_ctx *pipe_ctx)
2625 bool per_pixel_alpha =
2626 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2628 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2629 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2630 /* scaler configuration */
2631 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2632 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2635 static void dcn10_update_dchubp_dpp(
2637 struct pipe_ctx *pipe_ctx,
2638 struct dc_state *context)
2640 struct dce_hwseq *hws = dc->hwseq;
2641 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2642 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2643 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2644 struct plane_size size = plane_state->plane_size;
2645 unsigned int compat_level = 0;
2646 bool should_divided_by_2 = false;
2648 /* depends on DML calculation, DPP clock value may change dynamically */
2649 /* If request max dpp clk is lower than current dispclk, no need to
2652 if (plane_state->update_flags.bits.full_update) {
2654 /* new calculated dispclk, dppclk are stored in
2655 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2656 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2657 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2658 * dispclk will put in use after optimize_bandwidth when
2659 * ramp_up_dispclk_with_dpp is called.
2660 * there are two places for dppclk be put in use. One location
2661 * is the same as the location as dispclk. Another is within
2662 * update_dchubp_dpp which happens between pre_bandwidth and
2663 * optimize_bandwidth.
2664 * dppclk updated within update_dchubp_dpp will cause new
2665 * clock values of dispclk and dppclk not be in use at the same
2666 * time. when clocks are decreased, this may cause dppclk is
2667 * lower than previous configuration and let pipe stuck.
2668 * for example, eDP + external dp, change resolution of DP from
2669 * 1920x1080x144hz to 1280x960x60hz.
2670 * before change: dispclk = 337889 dppclk = 337889
2671 * change mode, dcn10_validate_bandwidth calculate
2672 * dispclk = 143122 dppclk = 143122
2673 * update_dchubp_dpp be executed before dispclk be updated,
2674 * dispclk = 337889, but dppclk use new value dispclk /2 =
2675 * 168944. this will cause pipe pstate warning issue.
2676 * solution: between pre_bandwidth and optimize_bandwidth, while
2677 * dispclk is going to be decreased, keep dppclk = dispclk
2679 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2680 dc->clk_mgr->clks.dispclk_khz)
2681 should_divided_by_2 = false;
2683 should_divided_by_2 =
2684 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2685 dc->clk_mgr->clks.dispclk_khz / 2;
2687 dpp->funcs->dpp_dppclk_control(
2689 should_divided_by_2,
2692 if (dc->res_pool->dccg)
2693 dc->res_pool->dccg->funcs->update_dpp_dto(
2696 pipe_ctx->plane_res.bw.dppclk_khz);
2698 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2699 dc->clk_mgr->clks.dispclk_khz / 2 :
2700 dc->clk_mgr->clks.dispclk_khz;
2703 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2704 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2705 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2707 if (plane_state->update_flags.bits.full_update) {
2708 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2710 hubp->funcs->hubp_setup(
2712 &pipe_ctx->dlg_regs,
2713 &pipe_ctx->ttu_regs,
2715 &pipe_ctx->pipe_dlg_param);
2716 hubp->funcs->hubp_setup_interdependent(
2718 &pipe_ctx->dlg_regs,
2719 &pipe_ctx->ttu_regs);
2722 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2724 if (plane_state->update_flags.bits.full_update ||
2725 plane_state->update_flags.bits.bpp_change)
2726 dcn10_update_dpp(dpp, plane_state);
2728 if (plane_state->update_flags.bits.full_update ||
2729 plane_state->update_flags.bits.per_pixel_alpha_change ||
2730 plane_state->update_flags.bits.global_alpha_change)
2731 hws->funcs.update_mpcc(dc, pipe_ctx);
2733 if (plane_state->update_flags.bits.full_update ||
2734 plane_state->update_flags.bits.per_pixel_alpha_change ||
2735 plane_state->update_flags.bits.global_alpha_change ||
2736 plane_state->update_flags.bits.scaling_change ||
2737 plane_state->update_flags.bits.position_change) {
2738 update_scaler(pipe_ctx);
2741 if (plane_state->update_flags.bits.full_update ||
2742 plane_state->update_flags.bits.scaling_change ||
2743 plane_state->update_flags.bits.position_change) {
2744 hubp->funcs->mem_program_viewport(
2746 &pipe_ctx->plane_res.scl_data.viewport,
2747 &pipe_ctx->plane_res.scl_data.viewport_c);
2750 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2751 dc->hwss.set_cursor_position(pipe_ctx);
2752 dc->hwss.set_cursor_attribute(pipe_ctx);
2754 if (dc->hwss.set_cursor_sdr_white_level)
2755 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2758 if (plane_state->update_flags.bits.full_update) {
2760 dc->hwss.program_gamut_remap(pipe_ctx);
2762 dc->hwss.program_output_csc(dc,
2764 pipe_ctx->stream->output_color_space,
2765 pipe_ctx->stream->csc_color_matrix.matrix,
2766 pipe_ctx->stream_res.opp->inst);
2769 if (plane_state->update_flags.bits.full_update ||
2770 plane_state->update_flags.bits.pixel_format_change ||
2771 plane_state->update_flags.bits.horizontal_mirror_change ||
2772 plane_state->update_flags.bits.rotation_change ||
2773 plane_state->update_flags.bits.swizzle_change ||
2774 plane_state->update_flags.bits.dcc_change ||
2775 plane_state->update_flags.bits.bpp_change ||
2776 plane_state->update_flags.bits.scaling_change ||
2777 plane_state->update_flags.bits.plane_size_change) {
2778 hubp->funcs->hubp_program_surface_config(
2780 plane_state->format,
2781 &plane_state->tiling_info,
2783 plane_state->rotation,
2785 plane_state->horizontal_mirror,
2789 hubp->power_gated = false;
2791 hws->funcs.update_plane_addr(dc, pipe_ctx);
2793 if (is_pipe_tree_visible(pipe_ctx))
2794 hubp->funcs->set_blank(hubp, false);
2797 void dcn10_blank_pixel_data(
2799 struct pipe_ctx *pipe_ctx,
2802 enum dc_color_space color_space;
2803 struct tg_color black_color = {0};
2804 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2805 struct dc_stream_state *stream = pipe_ctx->stream;
2807 /* program otg blank color */
2808 color_space = stream->output_color_space;
2809 color_space_to_black_color(dc, color_space, &black_color);
2812 * The way 420 is packed, 2 channels carry Y component, 1 channel
2813 * alternate between Cb and Cr, so both channels need the pixel
2816 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2817 black_color.color_r_cr = black_color.color_g_y;
2820 if (stream_res->tg->funcs->set_blank_color)
2821 stream_res->tg->funcs->set_blank_color(
2826 if (stream_res->tg->funcs->set_blank)
2827 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2828 if (stream_res->abm) {
2829 dc->hwss.set_pipe(pipe_ctx);
2830 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2833 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2834 if (stream_res->tg->funcs->set_blank) {
2835 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2836 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2841 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2843 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2844 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2845 struct custom_float_format fmt;
2847 fmt.exponenta_bits = 6;
2848 fmt.mantissa_bits = 12;
2852 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2853 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2855 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2856 pipe_ctx->plane_res.dpp, hw_mult);
2859 void dcn10_program_pipe(
2861 struct pipe_ctx *pipe_ctx,
2862 struct dc_state *context)
2864 struct dce_hwseq *hws = dc->hwseq;
2866 if (pipe_ctx->top_pipe == NULL) {
2867 bool blank = !is_pipe_tree_visible(pipe_ctx);
2869 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2870 pipe_ctx->stream_res.tg,
2871 pipe_ctx->pipe_dlg_param.vready_offset,
2872 pipe_ctx->pipe_dlg_param.vstartup_start,
2873 pipe_ctx->pipe_dlg_param.vupdate_offset,
2874 pipe_ctx->pipe_dlg_param.vupdate_width);
2876 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2877 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2879 if (hws->funcs.setup_vupdate_interrupt)
2880 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2882 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2885 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2886 dcn10_enable_plane(dc, pipe_ctx, context);
2888 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2890 hws->funcs.set_hdr_multiplier(pipe_ctx);
2892 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2893 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2894 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2895 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2897 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2898 * only do gamma programming for full update.
2899 * TODO: This can be further optimized/cleaned up
2900 * Always call this for now since it does memcmp inside before
2901 * doing heavy calculation and programming
2903 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2904 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2907 void dcn10_wait_for_pending_cleared(struct dc *dc,
2908 struct dc_state *context)
2910 struct pipe_ctx *pipe_ctx;
2911 struct timing_generator *tg;
2914 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2915 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2916 tg = pipe_ctx->stream_res.tg;
2919 * Only wait for top pipe's tg penindg bit
2920 * Also skip if pipe is disabled.
2922 if (pipe_ctx->top_pipe ||
2923 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2924 !tg->funcs->is_tg_enabled(tg))
2928 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2929 * For some reason waiting for OTG_UPDATE_PENDING cleared
2930 * seems to not trigger the update right away, and if we
2931 * lock again before VUPDATE then we don't get a separated
2934 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2935 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2939 void dcn10_post_unlock_program_front_end(
2941 struct dc_state *context)
2945 DC_LOGGER_INIT(dc->ctx->logger);
2947 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2948 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2950 if (!pipe_ctx->top_pipe &&
2951 !pipe_ctx->prev_odm_pipe &&
2953 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2955 if (context->stream_status[i].plane_count == 0)
2956 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2960 for (i = 0; i < dc->res_pool->pipe_count; i++)
2961 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2962 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2964 for (i = 0; i < dc->res_pool->pipe_count; i++)
2965 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2966 dc->hwss.optimize_bandwidth(dc, context);
2970 if (dc->hwseq->wa.DEGVIDCN10_254)
2971 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2974 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2978 for (i = 0; i < context->stream_count; i++) {
2979 if (context->streams[i]->timing.timing_3d_format
2980 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2984 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2990 void dcn10_prepare_bandwidth(
2992 struct dc_state *context)
2994 struct dce_hwseq *hws = dc->hwseq;
2995 struct hubbub *hubbub = dc->res_pool->hubbub;
2997 if (dc->debug.sanity_checks)
2998 hws->funcs.verify_allow_pstate_change_high(dc);
3000 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3001 if (context->stream_count == 0)
3002 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3004 dc->clk_mgr->funcs->update_clocks(
3010 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3011 &context->bw_ctx.bw.dcn.watermarks,
3012 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3014 dcn10_stereo_hw_frame_pack_wa(dc, context);
3016 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3018 dcn_bw_notify_pplib_of_wm_ranges(dc);
3022 if (dc->debug.sanity_checks)
3023 hws->funcs.verify_allow_pstate_change_high(dc);
3026 void dcn10_optimize_bandwidth(
3028 struct dc_state *context)
3030 struct dce_hwseq *hws = dc->hwseq;
3031 struct hubbub *hubbub = dc->res_pool->hubbub;
3033 if (dc->debug.sanity_checks)
3034 hws->funcs.verify_allow_pstate_change_high(dc);
3036 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3037 if (context->stream_count == 0)
3038 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3040 dc->clk_mgr->funcs->update_clocks(
3046 hubbub->funcs->program_watermarks(hubbub,
3047 &context->bw_ctx.bw.dcn.watermarks,
3048 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3051 dcn10_stereo_hw_frame_pack_wa(dc, context);
3053 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3055 dcn_bw_notify_pplib_of_wm_ranges(dc);
3059 if (dc->debug.sanity_checks)
3060 hws->funcs.verify_allow_pstate_change_high(dc);
3063 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3064 int num_pipes, struct dc_crtc_timing_adjust adjust)
3067 struct drr_params params = {0};
3068 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3069 unsigned int event_triggers = 0x800;
3070 // Note DRR trigger events are generated regardless of whether num frames met.
3071 unsigned int num_frames = 2;
3073 params.vertical_total_max = adjust.v_total_max;
3074 params.vertical_total_min = adjust.v_total_min;
3075 params.vertical_total_mid = adjust.v_total_mid;
3076 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3077 /* TODO: If multiple pipes are to be supported, you need
3078 * some GSL stuff. Static screen triggers may be programmed differently
3081 for (i = 0; i < num_pipes; i++) {
3082 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3083 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3084 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3085 pipe_ctx[i]->stream_res.tg, ¶ms);
3086 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3087 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3088 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3089 pipe_ctx[i]->stream_res.tg,
3090 event_triggers, num_frames);
3095 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3097 struct crtc_position *position)
3101 /* TODO: handle pipes > 1
3103 for (i = 0; i < num_pipes; i++)
3104 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3107 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3108 int num_pipes, const struct dc_static_screen_params *params)
3111 unsigned int triggers = 0;
3113 if (params->triggers.surface_update)
3115 if (params->triggers.cursor_update)
3117 if (params->triggers.force_trigger)
3120 for (i = 0; i < num_pipes; i++)
3121 pipe_ctx[i]->stream_res.tg->funcs->
3122 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3123 triggers, params->num_frames);
3126 static void dcn10_config_stereo_parameters(
3127 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3129 enum view_3d_format view_format = stream->view_format;
3130 enum dc_timing_3d_format timing_3d_format =\
3131 stream->timing.timing_3d_format;
3132 bool non_stereo_timing = false;
3134 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3135 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3136 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3137 non_stereo_timing = true;
3139 if (non_stereo_timing == false &&
3140 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3142 flags->PROGRAM_STEREO = 1;
3143 flags->PROGRAM_POLARITY = 1;
3144 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3145 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3146 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3147 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3148 enum display_dongle_type dongle = \
3149 stream->link->ddc->dongle_type;
3150 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3151 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3152 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3153 flags->DISABLE_STEREO_DP_SYNC = 1;
3155 flags->RIGHT_EYE_POLARITY =\
3156 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3157 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3158 flags->FRAME_PACKED = 1;
3164 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3166 struct crtc_stereo_flags flags = { 0 };
3167 struct dc_stream_state *stream = pipe_ctx->stream;
3169 dcn10_config_stereo_parameters(stream, &flags);
3171 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3172 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3173 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3175 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3178 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3179 pipe_ctx->stream_res.opp,
3180 flags.PROGRAM_STEREO == 1,
3183 pipe_ctx->stream_res.tg->funcs->program_stereo(
3184 pipe_ctx->stream_res.tg,
3191 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3195 for (i = 0; i < res_pool->pipe_count; i++) {
3196 if (res_pool->hubps[i]->inst == mpcc_inst)
3197 return res_pool->hubps[i];
3203 void dcn10_wait_for_mpcc_disconnect(
3205 struct resource_pool *res_pool,
3206 struct pipe_ctx *pipe_ctx)
3208 struct dce_hwseq *hws = dc->hwseq;
3211 if (dc->debug.sanity_checks) {
3212 hws->funcs.verify_allow_pstate_change_high(dc);
3215 if (!pipe_ctx->stream_res.opp)
3218 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3219 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3220 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3222 if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3223 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3224 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3225 hubp->funcs->set_blank(hubp, true);
3229 if (dc->debug.sanity_checks) {
3230 hws->funcs.verify_allow_pstate_change_high(dc);
3235 bool dcn10_dummy_display_power_gating(
3237 uint8_t controller_id,
3238 struct dc_bios *dcb,
3239 enum pipe_gating_control power_gating)
3244 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3246 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3247 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3249 struct dc *dc = pipe_ctx->stream->ctx->dc;
3251 if (plane_state == NULL)
3254 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3255 pipe_ctx->plane_res.hubp);
3257 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3260 plane_state->status.current_address = plane_state->status.requested_address;
3262 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3263 tg->funcs->is_stereo_left_eye) {
3264 plane_state->status.is_right_eye =
3265 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3268 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3269 struct dce_hwseq *hwseq = dc->hwseq;
3270 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3271 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3273 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3274 struct hubbub *hubbub = dc->res_pool->hubbub;
3276 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3277 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3282 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3284 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3286 /* In DCN, this programming sequence is owned by the hubbub */
3287 hubbub->funcs->update_dchub(hubbub, dh_data);
3290 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3292 struct pipe_ctx *test_pipe, *split_pipe;
3293 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3294 struct rect r1 = scl_data->recout, r2, r2_half;
3295 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3296 int cur_layer = pipe_ctx->plane_state->layer_index;
3299 * Disable the cursor if there's another pipe above this with a
3300 * plane that contains this pipe's viewport to prevent double cursor
3301 * and incorrect scaling artifacts.
3303 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3304 test_pipe = test_pipe->top_pipe) {
3305 // Skip invisible layer and pipe-split plane on same layer
3306 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3309 r2 = test_pipe->plane_res.scl_data.recout;
3310 r2_r = r2.x + r2.width;
3311 r2_b = r2.y + r2.height;
3312 split_pipe = test_pipe;
3315 * There is another half plane on same layer because of
3316 * pipe-split, merge together per same height.
3318 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3319 split_pipe = split_pipe->top_pipe)
3320 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3321 r2_half = split_pipe->plane_res.scl_data.recout;
3322 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3323 r2.width = r2.width + r2_half.width;
3324 r2_r = r2.x + r2.width;
3328 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3335 static bool dcn10_dmub_should_update_cursor_data(
3336 struct pipe_ctx *pipe_ctx,
3337 struct dc_debug_options *debug)
3339 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3342 if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
3345 if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
3346 debug->enable_sw_cntl_psr)
3352 static void dcn10_dmub_update_cursor_data(
3353 struct pipe_ctx *pipe_ctx,
3355 const struct dc_cursor_mi_param *param,
3356 const struct dc_cursor_position *cur_pos,
3357 const struct dc_cursor_attributes *cur_attr)
3359 union dmub_rb_cmd cmd;
3360 struct dmub_cmd_update_cursor_info_data *update_cursor_info;
3361 const struct dc_cursor_position *pos;
3362 const struct dc_cursor_attributes *attr;
3363 int src_x_offset = 0;
3364 int src_y_offset = 0;
3366 int cursor_height = 0;
3367 int cursor_width = 0;
3368 uint32_t cur_en = 0;
3369 unsigned int panel_inst = 0;
3371 struct dc_debug_options *debug = &hubp->ctx->dc->debug;
3373 if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
3376 * if cur_pos == NULL means the caller is from cursor_set_attribute
3377 * then driver use previous cursor position data
3378 * if cur_attr == NULL means the caller is from cursor_set_position
3379 * then driver use previous cursor attribute
3380 * if cur_pos or cur_attr is not NULL then update it
3382 if (cur_pos != NULL)
3385 pos = &hubp->curs_pos;
3387 if (cur_attr != NULL)
3390 attr = &hubp->curs_attr;
3392 if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
3395 src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
3396 src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
3397 x_hotspot = pos->x_hotspot;
3398 cursor_height = (int)attr->height;
3399 cursor_width = (int)attr->width;
3400 cur_en = pos->enable ? 1:0;
3402 // Rotated cursor width/height and hotspots tweaks for offset calculation
3403 if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
3404 swap(cursor_height, cursor_width);
3405 if (param->rotation == ROTATION_ANGLE_90) {
3406 src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
3407 src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
3409 } else if (param->rotation == ROTATION_ANGLE_180) {
3410 src_x_offset = pos->x - param->viewport.x;
3411 src_y_offset = pos->y - param->viewport.y;
3414 if (param->mirror) {
3415 x_hotspot = param->viewport.width - x_hotspot;
3416 src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
3419 if (src_x_offset >= (int)param->viewport.width)
3420 cur_en = 0; /* not visible beyond right edge*/
3422 if (src_x_offset + cursor_width <= 0)
3423 cur_en = 0; /* not visible beyond left edge*/
3425 if (src_y_offset >= (int)param->viewport.height)
3426 cur_en = 0; /* not visible beyond bottom edge*/
3428 if (src_y_offset + cursor_height <= 0)
3429 cur_en = 0; /* not visible beyond top edge*/
3431 // Cursor bitmaps have different hotspot values
3432 // There's a possibility that the above logic returns a negative value, so we clamp them to 0
3433 if (src_x_offset < 0)
3435 if (src_y_offset < 0)
3438 memset(&cmd, 0x0, sizeof(cmd));
3439 cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
3440 cmd.update_cursor_info.header.payload_bytes =
3441 sizeof(cmd.update_cursor_info.update_cursor_info_data);
3442 update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
3443 update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
3444 update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
3445 update_cursor_info->cursor_rect.width = attr->width;
3446 update_cursor_info->cursor_rect.height = attr->height;
3447 update_cursor_info->enable = cur_en;
3448 update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
3449 update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3450 update_cursor_info->panel_inst = panel_inst;
3451 dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
3452 dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
3453 dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
3456 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3458 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3459 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3460 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3461 struct dc_cursor_mi_param param = {
3462 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3463 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3464 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3465 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3466 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3467 .rotation = pipe_ctx->plane_state->rotation,
3468 .mirror = pipe_ctx->plane_state->horizontal_mirror
3470 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3471 (pipe_ctx->bottom_pipe != NULL);
3472 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3473 (pipe_ctx->prev_odm_pipe != NULL);
3475 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3476 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3477 int x_pos = pos_cpy.x;
3478 int y_pos = pos_cpy.y;
3481 * DC cursor is stream space, HW cursor is plane space and drawn
3482 * as part of the framebuffer.
3484 * Cursor position can't be negative, but hotspot can be used to
3485 * shift cursor out of the plane bounds. Hotspot must be smaller
3486 * than the cursor size.
3490 * Translate cursor from stream space to plane space.
3492 * If the cursor is scaled then we need to scale the position
3493 * to be in the approximately correct place. We can't do anything
3494 * about the actual size being incorrect, that's a limitation of
3497 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3498 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3499 pipe_ctx->plane_state->dst_rect.width;
3500 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3501 pipe_ctx->plane_state->dst_rect.height;
3503 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3504 pipe_ctx->plane_state->dst_rect.width;
3505 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3506 pipe_ctx->plane_state->dst_rect.height;
3510 * If the cursor's source viewport is clipped then we need to
3511 * translate the cursor to appear in the correct position on
3514 * This translation isn't affected by scaling so it needs to be
3515 * done *after* we adjust the position for the scale factor.
3517 * This is only done by opt-in for now since there are still
3518 * some usecases like tiled display that might enable the
3519 * cursor on both streams while expecting dc to clip it.
3521 if (pos_cpy.translate_by_source) {
3522 x_pos += pipe_ctx->plane_state->src_rect.x;
3523 y_pos += pipe_ctx->plane_state->src_rect.y;
3527 * If the position is negative then we need to add to the hotspot
3528 * to shift the cursor outside the plane.
3532 pos_cpy.x_hotspot -= x_pos;
3537 pos_cpy.y_hotspot -= y_pos;
3541 pos_cpy.x = (uint32_t)x_pos;
3542 pos_cpy.y = (uint32_t)y_pos;
3544 if (pipe_ctx->plane_state->address.type
3545 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3546 pos_cpy.enable = false;
3548 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3549 pos_cpy.enable = false;
3551 // Swap axis and mirror horizontally
3552 if (param.rotation == ROTATION_ANGLE_90) {
3553 uint32_t temp_x = pos_cpy.x;
3555 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3556 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3559 // Swap axis and mirror vertically
3560 else if (param.rotation == ROTATION_ANGLE_270) {
3561 uint32_t temp_y = pos_cpy.y;
3562 int viewport_height =
3563 pipe_ctx->plane_res.scl_data.viewport.height;
3565 pipe_ctx->plane_res.scl_data.viewport.y;
3568 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3569 * For pipe split cases:
3570 * - apply offset of viewport.y to normalize pos_cpy.x
3571 * - calculate the pos_cpy.y as before
3572 * - shift pos_cpy.y back by same offset to get final value
3573 * - since we iterate through both pipes, use the lower
3574 * viewport.y for offset
3575 * For non pipe split cases, use the same calculation for
3576 * pos_cpy.y as the 180 degree rotation case below,
3577 * but use pos_cpy.x as our input because we are rotating
3580 if (pipe_split_on || odm_combine_on) {
3581 int pos_cpy_x_offset;
3582 int other_pipe_viewport_y;
3584 if (pipe_split_on) {
3585 if (pipe_ctx->bottom_pipe) {
3586 other_pipe_viewport_y =
3587 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3589 other_pipe_viewport_y =
3590 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3593 if (pipe_ctx->next_odm_pipe) {
3594 other_pipe_viewport_y =
3595 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3597 other_pipe_viewport_y =
3598 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3601 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3602 other_pipe_viewport_y : viewport_y;
3603 pos_cpy.x -= pos_cpy_x_offset;
3604 if (pos_cpy.x > viewport_height) {
3605 pos_cpy.x = pos_cpy.x - viewport_height;
3606 pos_cpy.y = viewport_height - pos_cpy.x;
3608 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3610 pos_cpy.y += pos_cpy_x_offset;
3612 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3616 // Mirror horizontally and vertically
3617 else if (param.rotation == ROTATION_ANGLE_180) {
3618 int viewport_width =
3619 pipe_ctx->plane_res.scl_data.viewport.width;
3621 pipe_ctx->plane_res.scl_data.viewport.x;
3623 if (pipe_split_on || odm_combine_on) {
3624 if (pos_cpy.x >= viewport_width + viewport_x) {
3625 pos_cpy.x = 2 * viewport_width
3626 - pos_cpy.x + 2 * viewport_x;
3628 uint32_t temp_x = pos_cpy.x;
3630 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3631 if (temp_x >= viewport_x +
3632 (int)hubp->curs_attr.width || pos_cpy.x
3633 <= (int)hubp->curs_attr.width +
3634 pipe_ctx->plane_state->src_rect.x) {
3635 pos_cpy.x = temp_x + viewport_width;
3639 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3643 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3645 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3646 * pos_cpy.y_new = viewport.y + delta_from_bottom
3648 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3650 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3651 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3654 dcn10_dmub_update_cursor_data(pipe_ctx, hubp, ¶m, &pos_cpy, NULL);
3655 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3656 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3659 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3661 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3662 struct dc_cursor_mi_param param = { 0 };
3665 * If enter PSR without cursor attribute update
3666 * the cursor attribute of dmub_restore_plane
3667 * are initial value. call dmub to exit PSR and
3668 * restore plane then update cursor attribute to
3669 * avoid override with initial value
3671 if (pipe_ctx->plane_state != NULL) {
3672 param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
3673 param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
3674 param.viewport = pipe_ctx->plane_res.scl_data.viewport;
3675 param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
3676 param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
3677 param.rotation = pipe_ctx->plane_state->rotation;
3678 param.mirror = pipe_ctx->plane_state->horizontal_mirror;
3679 dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, ¶m, NULL, attributes);
3682 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3683 pipe_ctx->plane_res.hubp, attributes);
3684 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3685 pipe_ctx->plane_res.dpp, attributes);
3688 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3690 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3691 struct fixed31_32 multiplier;
3692 struct dpp_cursor_attributes opt_attr = { 0 };
3693 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3694 struct custom_float_format fmt;
3696 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3699 fmt.exponenta_bits = 5;
3700 fmt.mantissa_bits = 10;
3703 if (sdr_white_level > 80) {
3704 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3705 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3708 opt_attr.scale = hw_scale;
3711 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3712 pipe_ctx->plane_res.dpp, &opt_attr);
3716 * apply_front_porch_workaround TODO FPGA still need?
3718 * This is a workaround for a bug that has existed since R5xx and has not been
3719 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3721 static void apply_front_porch_workaround(
3722 struct dc_crtc_timing *timing)
3724 if (timing->flags.INTERLACE == 1) {
3725 if (timing->v_front_porch < 2)
3726 timing->v_front_porch = 2;
3728 if (timing->v_front_porch < 1)
3729 timing->v_front_porch = 1;
3733 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3735 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3736 struct dc_crtc_timing patched_crtc_timing;
3737 int vesa_sync_start;
3739 int interlace_factor;
3740 int vertical_line_start;
3742 patched_crtc_timing = *dc_crtc_timing;
3743 apply_front_porch_workaround(&patched_crtc_timing);
3745 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3747 vesa_sync_start = patched_crtc_timing.v_addressable +
3748 patched_crtc_timing.v_border_bottom +
3749 patched_crtc_timing.v_front_porch;
3751 asic_blank_end = (patched_crtc_timing.v_total -
3753 patched_crtc_timing.v_border_top)
3756 vertical_line_start = asic_blank_end -
3757 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3759 return vertical_line_start;
3762 void dcn10_calc_vupdate_position(
3764 struct pipe_ctx *pipe_ctx,
3765 uint32_t *start_line,
3768 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3769 int vline_int_offset_from_vupdate =
3770 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3771 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3774 if (vline_int_offset_from_vupdate > 0)
3775 vline_int_offset_from_vupdate--;
3776 else if (vline_int_offset_from_vupdate < 0)
3777 vline_int_offset_from_vupdate++;
3779 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3781 if (start_position >= 0)
3782 *start_line = start_position;
3784 *start_line = dc_crtc_timing->v_total + start_position - 1;
3786 *end_line = *start_line + 2;
3788 if (*end_line >= dc_crtc_timing->v_total)
3792 static void dcn10_cal_vline_position(
3794 struct pipe_ctx *pipe_ctx,
3795 enum vline_select vline,
3796 uint32_t *start_line,
3799 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3801 if (vline == VLINE0)
3802 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3803 else if (vline == VLINE1)
3804 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3806 switch (ref_point) {
3807 case START_V_UPDATE:
3808 dcn10_calc_vupdate_position(
3815 // Suppose to do nothing because vsync is 0;
3823 void dcn10_setup_periodic_interrupt(
3825 struct pipe_ctx *pipe_ctx,
3826 enum vline_select vline)
3828 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3830 if (vline == VLINE0) {
3831 uint32_t start_line = 0;
3832 uint32_t end_line = 0;
3834 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3836 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3838 } else if (vline == VLINE1) {
3839 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3841 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3845 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3847 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3848 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3850 if (start_line < 0) {
3855 if (tg->funcs->setup_vertical_interrupt2)
3856 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3859 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3860 struct dc_link_settings *link_settings)
3862 struct encoder_unblank_param params = {0};
3863 struct dc_stream_state *stream = pipe_ctx->stream;
3864 struct dc_link *link = stream->link;
3865 struct dce_hwseq *hws = link->dc->hwseq;
3867 /* only 3 items below are used by unblank */
3868 params.timing = pipe_ctx->stream->timing;
3870 params.link_settings.link_rate = link_settings->link_rate;
3872 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3873 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3874 params.timing.pix_clk_100hz /= 2;
3875 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3878 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3879 hws->funcs.edp_backlight_control(link, true);
3883 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3884 const uint8_t *custom_sdp_message,
3885 unsigned int sdp_message_size)
3887 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3888 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3889 pipe_ctx->stream_res.stream_enc,
3894 enum dc_status dcn10_set_clock(struct dc *dc,
3895 enum dc_clock_type clock_type,
3899 struct dc_state *context = dc->current_state;
3900 struct dc_clock_config clock_cfg = {0};
3901 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3903 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3904 return DC_FAIL_UNSUPPORTED_1;
3906 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3907 context, clock_type, &clock_cfg);
3909 if (clk_khz > clock_cfg.max_clock_khz)
3910 return DC_FAIL_CLK_EXCEED_MAX;
3912 if (clk_khz < clock_cfg.min_clock_khz)
3913 return DC_FAIL_CLK_BELOW_MIN;
3915 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3916 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3918 /*update internal request clock for update clock use*/
3919 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3920 current_clocks->dispclk_khz = clk_khz;
3921 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3922 current_clocks->dppclk_khz = clk_khz;
3924 return DC_ERROR_UNEXPECTED;
3926 if (dc->clk_mgr->funcs->update_clocks)
3927 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3933 void dcn10_get_clock(struct dc *dc,
3934 enum dc_clock_type clock_type,
3935 struct dc_clock_config *clock_cfg)
3937 struct dc_state *context = dc->current_state;
3939 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3940 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3944 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3946 struct resource_pool *pool = dc->res_pool;
3949 for (i = 0; i < pool->pipe_count; i++) {
3950 struct hubp *hubp = pool->hubps[i];
3951 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3953 hubp->funcs->hubp_read_state(hubp);
3956 dcc_en_bits[i] = s->dcc_en ? 1 : 0;