2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
97 struct pipe_ctx *pipe_ctx;
98 struct timing_generator *tg;
101 for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 tg = pipe_ctx->stream_res.tg;
106 * Only lock the top pipe's tg to prevent redundant
107 * (un)locking. Also skip if pipe is disabled.
109 if (pipe_ctx->top_pipe ||
111 !tg->funcs->is_tg_enabled(tg))
115 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
117 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 static void log_mpc_crc(struct dc *dc,
122 struct dc_log_buffer_ctx *log_ctx)
124 struct dc_context *dc_ctx = dc->ctx;
125 struct dce_hwseq *hws = dc->hwseq;
127 if (REG(MPC_CRC_RESULT_GB))
128 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
135 static void dcn10_log_hubbub_state(struct dc *dc,
136 struct dc_log_buffer_ctx *log_ctx)
138 struct dc_context *dc_ctx = dc->ctx;
139 struct dcn_hubbub_wm wm;
142 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
143 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
145 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
146 " sr_enter sr_exit dram_clk_change\n");
148 for (i = 0; i < 4; i++) {
149 struct dcn_hubbub_wm_set *s;
152 DTN_INFO("WM_Set[%d]:", s->wm_set);
153 DTN_INFO_MICRO_SEC(s->data_urgent);
154 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
155 DTN_INFO_MICRO_SEC(s->sr_enter);
156 DTN_INFO_MICRO_SEC(s->sr_exit);
157 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
164 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
166 struct dc_context *dc_ctx = dc->ctx;
167 struct resource_pool *pool = dc->res_pool;
171 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
172 for (i = 0; i < pool->pipe_count; i++) {
173 struct hubp *hubp = pool->hubps[i];
174 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
176 hubp->funcs->hubp_read_state(hubp);
179 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
192 s->underflow_status);
193 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
194 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
195 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
200 DTN_INFO("\n=========RQ========\n");
201 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
202 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
203 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
204 for (i = 0; i < pool->pipe_count; i++) {
205 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
206 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
209 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
210 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
211 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
212 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
213 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
214 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
215 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
216 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
217 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
218 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
221 DTN_INFO("========DLG========\n");
222 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
223 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
224 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
225 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
226 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
227 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
228 " x_rp_dlay x_rr_sfl\n");
229 for (i = 0; i < pool->pipe_count; i++) {
230 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
231 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
234 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
235 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
236 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
237 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
238 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
239 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
240 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
241 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
242 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
243 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
244 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
245 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
246 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
247 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
248 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
249 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
250 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
251 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
252 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
253 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
254 dlg_regs->xfc_reg_remote_surface_flip_latency);
257 DTN_INFO("========TTU========\n");
258 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
259 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
260 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
261 for (i = 0; i < pool->pipe_count; i++) {
262 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
263 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
266 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
267 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
268 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
269 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
270 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
271 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
272 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
273 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
278 void dcn10_log_hw_state(struct dc *dc,
279 struct dc_log_buffer_ctx *log_ctx)
281 struct dc_context *dc_ctx = dc->ctx;
282 struct resource_pool *pool = dc->res_pool;
287 dcn10_log_hubbub_state(dc, log_ctx);
289 dcn10_log_hubp_states(dc, log_ctx);
291 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
292 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
293 "C31 C32 C33 C34\n");
294 for (i = 0; i < pool->pipe_count; i++) {
295 struct dpp *dpp = pool->dpps[i];
296 struct dcn_dpp_state s = {0};
298 dpp->funcs->dpp_read_state(dpp, &s);
303 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
304 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
307 (s.igam_lut_mode == 0) ? "BypassFixed" :
308 ((s.igam_lut_mode == 1) ? "BypassFloat" :
309 ((s.igam_lut_mode == 2) ? "RAM" :
310 ((s.igam_lut_mode == 3) ? "RAM" :
312 (s.dgam_lut_mode == 0) ? "Bypass" :
313 ((s.dgam_lut_mode == 1) ? "sRGB" :
314 ((s.dgam_lut_mode == 2) ? "Ycc" :
315 ((s.dgam_lut_mode == 3) ? "RAM" :
316 ((s.dgam_lut_mode == 4) ? "RAM" :
318 (s.rgam_lut_mode == 0) ? "Bypass" :
319 ((s.rgam_lut_mode == 1) ? "sRGB" :
320 ((s.rgam_lut_mode == 2) ? "Ycc" :
321 ((s.rgam_lut_mode == 3) ? "RAM" :
322 ((s.rgam_lut_mode == 4) ? "RAM" :
325 s.gamut_remap_c11_c12,
326 s.gamut_remap_c13_c14,
327 s.gamut_remap_c21_c22,
328 s.gamut_remap_c23_c24,
329 s.gamut_remap_c31_c32,
330 s.gamut_remap_c33_c34);
335 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
336 for (i = 0; i < pool->pipe_count; i++) {
337 struct mpcc_state s = {0};
339 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
341 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
342 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
343 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
348 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
350 for (i = 0; i < pool->timing_generator_count; i++) {
351 struct timing_generator *tg = pool->timing_generators[i];
352 struct dcn_otg_state s = {0};
353 /* Read shared OTG state registers for all DCNx */
354 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
357 * For DCN2 and greater, a register on the OPP is used to
358 * determine if the CRTC is blanked instead of the OTG. So use
359 * dpg_is_blanked() if exists, otherwise fallback on otg.
361 * TODO: Implement DCN-specific read_otg_state hooks.
363 if (pool->opps[i]->funcs->dpg_is_blanked)
364 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
366 s.blank_enabled = tg->funcs->is_blanked(tg);
368 //only print if OTG master is enabled
369 if ((s.otg_enabled & 1) == 0)
372 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
390 s.underflow_occurred_status,
393 // Clear underflow for debug purposes
394 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
395 // This function is called only from Windows or Diags test environment, hence it's safe to clear
396 // it from here without affecting the original intent.
397 tg->funcs->clear_optc_underflow(tg);
401 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
402 // TODO: Update golden log header to reflect this name change
403 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
404 for (i = 0; i < pool->res_cap->num_dsc; i++) {
405 struct display_stream_compressor *dsc = pool->dscs[i];
406 struct dcn_dsc_state s = {0};
408 dsc->funcs->dsc_read_state(dsc, &s);
409 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 s.dsc_bits_per_pixel);
418 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
419 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
420 for (i = 0; i < pool->stream_enc_count; i++) {
421 struct stream_encoder *enc = pool->stream_enc[i];
422 struct enc_state s = {0};
424 if (enc->funcs->enc_read_state) {
425 enc->funcs->enc_read_state(enc, &s);
426 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
429 s.sec_gsp_pps_line_num,
430 s.vbid6_line_reference,
432 s.sec_gsp_pps_enable,
433 s.sec_stream_enable);
439 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
440 for (i = 0; i < dc->link_count; i++) {
441 struct link_encoder *lenc = dc->links[i]->link_enc;
443 struct link_enc_state s = {0};
445 if (lenc->funcs->read_state) {
446 lenc->funcs->read_state(lenc, &s);
447 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
450 s.dphy_fec_ready_shadow,
451 s.dphy_fec_active_status,
452 s.dp_link_training_complete);
458 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
459 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
460 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
461 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
462 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
468 log_mpc_crc(dc, log_ctx);
471 if (pool->hpo_dp_stream_enc_count > 0) {
472 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
473 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
474 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
475 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
477 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
478 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
480 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
481 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
482 hpo_dp_se_state.stream_enc_enabled,
483 hpo_dp_se_state.otg_inst,
484 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
485 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
486 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
487 (hpo_dp_se_state.component_depth == 0) ? 6 :
488 ((hpo_dp_se_state.component_depth == 1) ? 8 :
489 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
490 hpo_dp_se_state.vid_stream_enabled,
491 hpo_dp_se_state.sdp_enabled,
492 hpo_dp_se_state.compressed_format,
493 hpo_dp_se_state.mapped_to_link_enc);
500 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
501 if (pool->hpo_dp_link_enc_count) {
502 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
504 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
505 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
506 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
508 if (hpo_dp_link_enc->funcs->read_state) {
509 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
510 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
511 hpo_dp_link_enc->inst,
512 hpo_dp_le_state.link_enc_enabled,
513 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
514 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
515 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
516 hpo_dp_le_state.lane_count,
517 hpo_dp_le_state.stream_src[0],
518 hpo_dp_le_state.slot_count[0],
519 hpo_dp_le_state.vc_rate_x[0],
520 hpo_dp_le_state.vc_rate_y[0]);
532 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
534 struct hubp *hubp = pipe_ctx->plane_res.hubp;
535 struct timing_generator *tg = pipe_ctx->stream_res.tg;
537 if (tg->funcs->is_optc_underflow_occurred(tg)) {
538 tg->funcs->clear_optc_underflow(tg);
542 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
543 hubp->funcs->hubp_clear_underflow(hubp);
549 void dcn10_enable_power_gating_plane(
550 struct dce_hwseq *hws,
553 bool force_on = true; /* disable power gating */
559 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
560 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
561 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
562 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
566 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
567 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
571 void dcn10_disable_vga(
572 struct dce_hwseq *hws)
574 unsigned int in_vga1_mode = 0;
575 unsigned int in_vga2_mode = 0;
576 unsigned int in_vga3_mode = 0;
577 unsigned int in_vga4_mode = 0;
579 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
580 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
581 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
582 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
584 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
585 in_vga3_mode == 0 && in_vga4_mode == 0)
588 REG_WRITE(D1VGA_CONTROL, 0);
589 REG_WRITE(D2VGA_CONTROL, 0);
590 REG_WRITE(D3VGA_CONTROL, 0);
591 REG_WRITE(D4VGA_CONTROL, 0);
593 /* HW Engineer's Notes:
594 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
595 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
597 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
598 * VGA_TEST_ENABLE, to leave it in the same state as before.
600 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
601 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 * dcn10_dpp_pg_control - DPP power gate control.
607 * @hws: dce_hwseq reference.
608 * @dpp_inst: DPP instance reference.
609 * @power_on: true if we want to enable power gate, false otherwise.
611 * Enable or disable power gate in the specific DPP instance.
613 void dcn10_dpp_pg_control(
614 struct dce_hwseq *hws,
615 unsigned int dpp_inst,
618 uint32_t power_gate = power_on ? 0 : 1;
619 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
621 if (hws->ctx->dc->debug.disable_dpp_power_gate)
623 if (REG(DOMAIN1_PG_CONFIG) == 0)
628 REG_UPDATE(DOMAIN1_PG_CONFIG,
629 DOMAIN1_POWER_GATE, power_gate);
631 REG_WAIT(DOMAIN1_PG_STATUS,
632 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 REG_UPDATE(DOMAIN3_PG_CONFIG,
637 DOMAIN3_POWER_GATE, power_gate);
639 REG_WAIT(DOMAIN3_PG_STATUS,
640 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 REG_UPDATE(DOMAIN5_PG_CONFIG,
645 DOMAIN5_POWER_GATE, power_gate);
647 REG_WAIT(DOMAIN5_PG_STATUS,
648 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 REG_UPDATE(DOMAIN7_PG_CONFIG,
653 DOMAIN7_POWER_GATE, power_gate);
655 REG_WAIT(DOMAIN7_PG_STATUS,
656 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
666 * dcn10_hubp_pg_control - HUBP power gate control.
668 * @hws: dce_hwseq reference.
669 * @hubp_inst: DPP instance reference.
670 * @power_on: true if we want to enable power gate, false otherwise.
672 * Enable or disable power gate in the specific HUBP instance.
674 void dcn10_hubp_pg_control(
675 struct dce_hwseq *hws,
676 unsigned int hubp_inst,
679 uint32_t power_gate = power_on ? 0 : 1;
680 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
682 if (hws->ctx->dc->debug.disable_hubp_power_gate)
684 if (REG(DOMAIN0_PG_CONFIG) == 0)
688 case 0: /* DCHUBP0 */
689 REG_UPDATE(DOMAIN0_PG_CONFIG,
690 DOMAIN0_POWER_GATE, power_gate);
692 REG_WAIT(DOMAIN0_PG_STATUS,
693 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
696 case 1: /* DCHUBP1 */
697 REG_UPDATE(DOMAIN2_PG_CONFIG,
698 DOMAIN2_POWER_GATE, power_gate);
700 REG_WAIT(DOMAIN2_PG_STATUS,
701 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
704 case 2: /* DCHUBP2 */
705 REG_UPDATE(DOMAIN4_PG_CONFIG,
706 DOMAIN4_POWER_GATE, power_gate);
708 REG_WAIT(DOMAIN4_PG_STATUS,
709 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
712 case 3: /* DCHUBP3 */
713 REG_UPDATE(DOMAIN6_PG_CONFIG,
714 DOMAIN6_POWER_GATE, power_gate);
716 REG_WAIT(DOMAIN6_PG_STATUS,
717 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
726 static void power_on_plane(
727 struct dce_hwseq *hws,
730 DC_LOGGER_INIT(hws->ctx->logger);
731 if (REG(DC_IP_REQUEST_CNTL)) {
732 REG_SET(DC_IP_REQUEST_CNTL, 0,
735 if (hws->funcs.dpp_pg_control)
736 hws->funcs.dpp_pg_control(hws, plane_id, true);
738 if (hws->funcs.hubp_pg_control)
739 hws->funcs.hubp_pg_control(hws, plane_id, true);
741 REG_SET(DC_IP_REQUEST_CNTL, 0,
744 "Un-gated front end for pipe %d\n", plane_id);
748 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
750 struct dce_hwseq *hws = dc->hwseq;
751 struct hubp *hubp = dc->res_pool->hubps[0];
753 if (!hws->wa_state.DEGVIDCN10_253_applied)
756 hubp->funcs->set_blank(hubp, true);
758 REG_SET(DC_IP_REQUEST_CNTL, 0,
761 hws->funcs.hubp_pg_control(hws, 0, false);
762 REG_SET(DC_IP_REQUEST_CNTL, 0,
765 hws->wa_state.DEGVIDCN10_253_applied = false;
768 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
770 struct dce_hwseq *hws = dc->hwseq;
771 struct hubp *hubp = dc->res_pool->hubps[0];
774 if (dc->debug.disable_stutter)
777 if (!hws->wa.DEGVIDCN10_253)
780 for (i = 0; i < dc->res_pool->pipe_count; i++) {
781 if (!dc->res_pool->hubps[i]->power_gated)
785 /* all pipe power gated, apply work around to enable stutter. */
787 REG_SET(DC_IP_REQUEST_CNTL, 0,
790 hws->funcs.hubp_pg_control(hws, 0, true);
791 REG_SET(DC_IP_REQUEST_CNTL, 0,
794 hubp->funcs->set_hubp_blank_en(hubp, false);
795 hws->wa_state.DEGVIDCN10_253_applied = true;
798 void dcn10_bios_golden_init(struct dc *dc)
800 struct dce_hwseq *hws = dc->hwseq;
801 struct dc_bios *bp = dc->ctx->dc_bios;
803 bool allow_self_fresh_force_enable = true;
805 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
808 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
809 allow_self_fresh_force_enable =
810 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
813 /* WA for making DF sleep when idle after resume from S0i3.
814 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
815 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
816 * before calling command table and it changed to 1 after,
817 * it should be set back to 0.
820 /* initialize dcn global */
821 bp->funcs->enable_disp_power_gating(bp,
822 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
824 for (i = 0; i < dc->res_pool->pipe_count; i++) {
825 /* initialize dcn per pipe */
826 bp->funcs->enable_disp_power_gating(bp,
827 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
830 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
831 if (allow_self_fresh_force_enable == false &&
832 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
833 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
834 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
838 static void false_optc_underflow_wa(
840 const struct dc_stream_state *stream,
841 struct timing_generator *tg)
846 if (!dc->hwseq->wa.false_optc_underflow)
849 underflow = tg->funcs->is_optc_underflow_occurred(tg);
851 for (i = 0; i < dc->res_pool->pipe_count; i++) {
852 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
854 if (old_pipe_ctx->stream != stream)
857 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
860 if (tg->funcs->set_blank_data_double_buffer)
861 tg->funcs->set_blank_data_double_buffer(tg, true);
863 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
864 tg->funcs->clear_optc_underflow(tg);
867 enum dc_status dcn10_enable_stream_timing(
868 struct pipe_ctx *pipe_ctx,
869 struct dc_state *context,
872 struct dc_stream_state *stream = pipe_ctx->stream;
873 enum dc_color_space color_space;
874 struct tg_color black_color = {0};
876 /* by upper caller loop, pipe0 is parent pipe and be called first.
877 * back end is set up by for pipe0. Other children pipe share back end
878 * with pipe 0. No program is needed.
880 if (pipe_ctx->top_pipe != NULL)
883 /* TODO check if timing_changed, disable stream if timing changed */
885 /* HW program guide assume display already disable
886 * by unplug sequence. OTG assume stop.
888 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
890 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
891 pipe_ctx->clock_source,
892 &pipe_ctx->stream_res.pix_clk_params,
893 &pipe_ctx->pll_settings)) {
895 return DC_ERROR_UNEXPECTED;
898 pipe_ctx->stream_res.tg->funcs->program_timing(
899 pipe_ctx->stream_res.tg,
901 pipe_ctx->pipe_dlg_param.vready_offset,
902 pipe_ctx->pipe_dlg_param.vstartup_start,
903 pipe_ctx->pipe_dlg_param.vupdate_offset,
904 pipe_ctx->pipe_dlg_param.vupdate_width,
905 pipe_ctx->stream->signal,
908 #if 0 /* move to after enable_crtc */
909 /* TODO: OPP FMT, ABM. etc. should be done here. */
910 /* or FPGA now. instance 0 only. TODO: move to opp.c */
912 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
914 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
915 pipe_ctx->stream_res.opp,
916 &stream->bit_depth_params,
919 /* program otg blank color */
920 color_space = stream->output_color_space;
921 color_space_to_black_color(dc, color_space, &black_color);
924 * The way 420 is packed, 2 channels carry Y component, 1 channel
925 * alternate between Cb and Cr, so both channels need the pixel
928 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
929 black_color.color_r_cr = black_color.color_g_y;
931 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
932 pipe_ctx->stream_res.tg->funcs->set_blank_color(
933 pipe_ctx->stream_res.tg,
936 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
937 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
938 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
939 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
940 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
943 /* VTG is within DCHUB command block. DCFCLK is always on */
944 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
946 return DC_ERROR_UNEXPECTED;
949 /* TODO program crtc source select for non-virtual signal*/
950 /* TODO program FMT */
951 /* TODO setup link_enc */
952 /* TODO set stream attributes */
953 /* TODO program audio */
954 /* TODO enable stream if timing changed */
955 /* TODO unblank stream if DP */
960 static void dcn10_reset_back_end_for_pipe(
962 struct pipe_ctx *pipe_ctx,
963 struct dc_state *context)
966 struct dc_link *link;
967 DC_LOGGER_INIT(dc->ctx->logger);
968 if (pipe_ctx->stream_res.stream_enc == NULL) {
969 pipe_ctx->stream = NULL;
973 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
974 link = pipe_ctx->stream->link;
975 /* DPMS may already disable or */
976 /* dpms_off status is incorrect due to fastboot
977 * feature. When system resume from S4 with second
978 * screen only, the dpms_off would be true but
979 * VBIOS lit up eDP, so check link status too.
981 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
982 core_link_disable_stream(pipe_ctx);
983 else if (pipe_ctx->stream_res.audio)
984 dc->hwss.disable_audio_stream(pipe_ctx);
986 if (pipe_ctx->stream_res.audio) {
987 /*disable az_endpoint*/
988 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
991 if (dc->caps.dynamic_audio == true) {
992 /*we have to dynamic arbitrate the audio endpoints*/
993 /*we free the resource, need reset is_audio_acquired*/
994 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
995 pipe_ctx->stream_res.audio, false);
996 pipe_ctx->stream_res.audio = NULL;
1001 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1002 * back end share by all pipes and will be disable only when disable
1005 if (pipe_ctx->top_pipe == NULL) {
1007 if (pipe_ctx->stream_res.abm)
1008 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1010 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1012 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1013 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1014 pipe_ctx->stream_res.tg->funcs->set_drr(
1015 pipe_ctx->stream_res.tg, NULL);
1018 for (i = 0; i < dc->res_pool->pipe_count; i++)
1019 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1022 if (i == dc->res_pool->pipe_count)
1025 pipe_ctx->stream = NULL;
1026 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1027 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1030 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1034 bool need_recover = true;
1036 if (!dc->debug.recovery_enabled)
1039 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1040 struct pipe_ctx *pipe_ctx =
1041 &dc->current_state->res_ctx.pipe_ctx[i];
1042 if (pipe_ctx != NULL) {
1043 hubp = pipe_ctx->plane_res.hubp;
1044 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1045 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1046 /* one pipe underflow, we will reset all the pipes*/
1047 need_recover = true;
1055 DCHUBP_CNTL:HUBP_BLANK_EN=1
1056 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1057 DCHUBP_CNTL:HUBP_DISABLE=1
1058 DCHUBP_CNTL:HUBP_DISABLE=0
1059 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1060 DCSURF_PRIMARY_SURFACE_ADDRESS
1061 DCHUBP_CNTL:HUBP_BLANK_EN=0
1064 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1065 struct pipe_ctx *pipe_ctx =
1066 &dc->current_state->res_ctx.pipe_ctx[i];
1067 if (pipe_ctx != NULL) {
1068 hubp = pipe_ctx->plane_res.hubp;
1069 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1070 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1071 hubp->funcs->set_hubp_blank_en(hubp, true);
1074 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1075 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1077 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1078 struct pipe_ctx *pipe_ctx =
1079 &dc->current_state->res_ctx.pipe_ctx[i];
1080 if (pipe_ctx != NULL) {
1081 hubp = pipe_ctx->plane_res.hubp;
1082 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1083 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1084 hubp->funcs->hubp_disable_control(hubp, true);
1087 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1088 struct pipe_ctx *pipe_ctx =
1089 &dc->current_state->res_ctx.pipe_ctx[i];
1090 if (pipe_ctx != NULL) {
1091 hubp = pipe_ctx->plane_res.hubp;
1092 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1093 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1094 hubp->funcs->hubp_disable_control(hubp, true);
1097 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1098 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1099 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1100 struct pipe_ctx *pipe_ctx =
1101 &dc->current_state->res_ctx.pipe_ctx[i];
1102 if (pipe_ctx != NULL) {
1103 hubp = pipe_ctx->plane_res.hubp;
1104 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1105 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1106 hubp->funcs->set_hubp_blank_en(hubp, true);
1113 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1115 struct hubbub *hubbub = dc->res_pool->hubbub;
1116 static bool should_log_hw_state; /* prevent hw state log by default */
1118 if (!hubbub->funcs->verify_allow_pstate_change_high)
1121 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1124 if (should_log_hw_state)
1125 dcn10_log_hw_state(dc, NULL);
1127 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1128 BREAK_TO_DEBUGGER();
1129 if (dcn10_hw_wa_force_recovery(dc)) {
1131 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1132 BREAK_TO_DEBUGGER();
1137 /* trigger HW to start disconnect plane from stream on the next vsync */
1138 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1140 struct dce_hwseq *hws = dc->hwseq;
1141 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1142 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1143 struct mpc *mpc = dc->res_pool->mpc;
1144 struct mpc_tree *mpc_tree_params;
1145 struct mpcc *mpcc_to_remove = NULL;
1146 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1148 mpc_tree_params = &(opp->mpc_tree_params);
1149 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1152 if (mpcc_to_remove == NULL)
1155 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1157 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1159 dc->optimized_required = true;
1161 if (hubp->funcs->hubp_disconnect)
1162 hubp->funcs->hubp_disconnect(hubp);
1164 if (dc->debug.sanity_checks)
1165 hws->funcs.verify_allow_pstate_change_high(dc);
1169 * dcn10_plane_atomic_power_down - Power down plane components.
1171 * @dc: dc struct reference. used for grab hwseq.
1172 * @dpp: dpp struct reference.
1173 * @hubp: hubp struct reference.
1175 * Keep in mind that this operation requires a power gate configuration;
1176 * however, requests for switch power gate are precisely controlled to avoid
1177 * problems. For this reason, power gate request is usually disabled. This
1178 * function first needs to enable the power gate request before disabling DPP
1179 * and HUBP. Finally, it disables the power gate request again.
1181 void dcn10_plane_atomic_power_down(struct dc *dc,
1185 struct dce_hwseq *hws = dc->hwseq;
1186 DC_LOGGER_INIT(dc->ctx->logger);
1188 if (REG(DC_IP_REQUEST_CNTL)) {
1189 REG_SET(DC_IP_REQUEST_CNTL, 0,
1192 if (hws->funcs.dpp_pg_control)
1193 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1195 if (hws->funcs.hubp_pg_control)
1196 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1198 dpp->funcs->dpp_reset(dpp);
1199 REG_SET(DC_IP_REQUEST_CNTL, 0,
1202 "Power gated front end %d\n", hubp->inst);
1206 /* disable HW used by plane.
1207 * note: cannot disable until disconnect is complete
1209 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1211 struct dce_hwseq *hws = dc->hwseq;
1212 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1213 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1214 int opp_id = hubp->opp_id;
1216 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1218 hubp->funcs->hubp_clk_cntl(hubp, false);
1220 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1222 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1223 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1224 pipe_ctx->stream_res.opp,
1227 hubp->power_gated = true;
1228 dc->optimized_required = false; /* We're powering off, no need to optimize */
1230 hws->funcs.plane_atomic_power_down(dc,
1231 pipe_ctx->plane_res.dpp,
1232 pipe_ctx->plane_res.hubp);
1234 pipe_ctx->stream = NULL;
1235 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1236 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1237 pipe_ctx->top_pipe = NULL;
1238 pipe_ctx->bottom_pipe = NULL;
1239 pipe_ctx->plane_state = NULL;
1242 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1244 struct dce_hwseq *hws = dc->hwseq;
1245 DC_LOGGER_INIT(dc->ctx->logger);
1247 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1250 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1252 apply_DEGVIDCN10_253_wa(dc);
1254 DC_LOG_DC("Power down front end %d\n",
1255 pipe_ctx->pipe_idx);
1258 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1261 struct dce_hwseq *hws = dc->hwseq;
1262 bool can_apply_seamless_boot = false;
1264 for (i = 0; i < context->stream_count; i++) {
1265 if (context->streams[i]->apply_seamless_boot_optimization) {
1266 can_apply_seamless_boot = true;
1271 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1272 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1273 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1275 /* There is assumption that pipe_ctx is not mapping irregularly
1276 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1277 * we will use the pipe, so don't disable
1279 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1282 /* Blank controller using driver code instead of
1285 if (tg->funcs->is_tg_enabled(tg)) {
1286 if (hws->funcs.init_blank != NULL) {
1287 hws->funcs.init_blank(dc, tg);
1288 tg->funcs->lock(tg);
1290 tg->funcs->lock(tg);
1291 tg->funcs->set_blank(tg, true);
1292 hwss_wait_for_blank_complete(tg);
1297 /* num_opp will be equal to number of mpcc */
1298 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1299 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1301 /* Cannot reset the MPC mux if seamless boot */
1302 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1305 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1306 dc->res_pool->mpc, i);
1309 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1310 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1311 struct hubp *hubp = dc->res_pool->hubps[i];
1312 struct dpp *dpp = dc->res_pool->dpps[i];
1313 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1315 /* There is assumption that pipe_ctx is not mapping irregularly
1316 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1317 * we will use the pipe, so don't disable
1319 if (can_apply_seamless_boot &&
1320 pipe_ctx->stream != NULL &&
1321 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1322 pipe_ctx->stream_res.tg)) {
1323 // Enable double buffering for OTG_BLANK no matter if
1324 // seamless boot is enabled or not to suppress global sync
1325 // signals when OTG blanked. This is to prevent pipe from
1326 // requesting data while in PSR.
1327 tg->funcs->tg_init(tg);
1328 hubp->power_gated = true;
1332 /* Disable on the current state so the new one isn't cleared. */
1333 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1335 dpp->funcs->dpp_reset(dpp);
1337 pipe_ctx->stream_res.tg = tg;
1338 pipe_ctx->pipe_idx = i;
1340 pipe_ctx->plane_res.hubp = hubp;
1341 pipe_ctx->plane_res.dpp = dpp;
1342 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1343 hubp->mpcc_id = dpp->inst;
1344 hubp->opp_id = OPP_ID_INVALID;
1345 hubp->power_gated = false;
1347 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1348 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1349 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1350 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1352 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1354 if (tg->funcs->is_tg_enabled(tg))
1355 tg->funcs->unlock(tg);
1357 dc->hwss.disable_plane(dc, pipe_ctx);
1359 pipe_ctx->stream_res.tg = NULL;
1360 pipe_ctx->plane_res.hubp = NULL;
1362 tg->funcs->tg_init(tg);
1365 /* Power gate DSCs */
1366 if (hws->funcs.dsc_pg_control != NULL) {
1367 uint32_t num_opps = 0;
1368 uint32_t opp_id_src0 = OPP_ID_INVALID;
1369 uint32_t opp_id_src1 = OPP_ID_INVALID;
1371 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1372 // We can't use res_pool->res_cap->num_timing_generator to check
1373 // Because it records display pipes default setting built in driver,
1374 // not display pipes of the current chip.
1375 // Some ASICs would be fused display pipes less than the default setting.
1376 // In dcnxx_resource_construct function, driver would obatin real information.
1377 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1378 uint32_t optc_dsc_state = 0;
1379 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1381 if (tg->funcs->is_tg_enabled(tg)) {
1382 if (tg->funcs->get_dsc_status)
1383 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1384 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1385 // non-zero value is DSC enabled
1386 if (optc_dsc_state != 0) {
1387 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1393 // Step 2: To power down DSC but skip DSC of running OPTC
1394 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1395 struct dcn_dsc_state s = {0};
1397 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1399 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1400 s.dsc_clock_en && s.dsc_fw_en)
1403 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1408 void dcn10_init_hw(struct dc *dc)
1411 struct abm *abm = dc->res_pool->abm;
1412 struct dmcu *dmcu = dc->res_pool->dmcu;
1413 struct dce_hwseq *hws = dc->hwseq;
1414 struct dc_bios *dcb = dc->ctx->dc_bios;
1415 struct resource_pool *res_pool = dc->res_pool;
1416 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1417 bool is_optimized_init_done = false;
1419 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1420 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1422 /* Align bw context with hw config when system resume. */
1423 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1424 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1425 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1428 // Initialize the dccg
1429 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1430 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1432 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1434 REG_WRITE(REFCLK_CNTL, 0);
1435 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1436 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1438 if (!dc->debug.disable_clock_gate) {
1439 /* enable all DCN clock gating */
1440 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1442 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1444 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1447 //Enable ability to power gate / don't force power on permanently
1448 if (hws->funcs.enable_power_gating_plane)
1449 hws->funcs.enable_power_gating_plane(hws, true);
1454 if (!dcb->funcs->is_accelerated_mode(dcb))
1455 hws->funcs.disable_vga(dc->hwseq);
1457 hws->funcs.bios_golden_init(dc);
1459 if (dc->ctx->dc_bios->fw_info_valid) {
1460 res_pool->ref_clocks.xtalin_clock_inKhz =
1461 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1463 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1464 if (res_pool->dccg && res_pool->hubbub) {
1466 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1467 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1468 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1470 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1471 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1472 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1474 // Not all ASICs have DCCG sw component
1475 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1476 res_pool->ref_clocks.xtalin_clock_inKhz;
1477 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1478 res_pool->ref_clocks.xtalin_clock_inKhz;
1482 ASSERT_CRITICAL(false);
1484 for (i = 0; i < dc->link_count; i++) {
1485 /* Power up AND update implementation according to the
1486 * required signal (which may be different from the
1487 * default signal on connector).
1489 struct dc_link *link = dc->links[i];
1491 if (!is_optimized_init_done)
1492 link->link_enc->funcs->hw_init(link->link_enc);
1494 /* Check for enabled DIG to identify enabled display */
1495 if (link->link_enc->funcs->is_dig_enabled &&
1496 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1497 link->link_status.link_active = true;
1500 /* we want to turn off all dp displays before doing detection */
1501 dc_link_blank_all_dp_displays(dc);
1503 /* If taking control over from VBIOS, we may want to optimize our first
1504 * mode set, so we need to skip powering down pipes until we know which
1505 * pipes we want to use.
1506 * Otherwise, if taking control is not possible, we need to power
1509 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1510 if (!is_optimized_init_done) {
1511 hws->funcs.init_pipes(dc, dc->current_state);
1512 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1513 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1514 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1518 if (!is_optimized_init_done) {
1520 for (i = 0; i < res_pool->audio_count; i++) {
1521 struct audio *audio = res_pool->audios[i];
1523 audio->funcs->hw_init(audio);
1526 for (i = 0; i < dc->link_count; i++) {
1527 struct dc_link *link = dc->links[i];
1529 if (link->panel_cntl)
1530 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1534 abm->funcs->abm_init(abm, backlight);
1536 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1537 dmcu->funcs->dmcu_init(dmcu);
1540 if (abm != NULL && dmcu != NULL)
1541 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1543 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1544 if (!is_optimized_init_done)
1545 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1547 if (!dc->debug.disable_clock_gate) {
1548 /* enable all DCN clock gating */
1549 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1551 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1553 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1555 if (hws->funcs.enable_power_gating_plane)
1556 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1558 if (dc->clk_mgr->funcs->notify_wm_ranges)
1559 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1562 /* In headless boot cases, DIG may be turned
1563 * on which causes HW/SW discrepancies.
1564 * To avoid this, power down hardware on boot
1565 * if DIG is turned on
1567 void dcn10_power_down_on_boot(struct dc *dc)
1569 struct dc_link *edp_links[MAX_NUM_EDP];
1570 struct dc_link *edp_link = NULL;
1574 get_edp_links(dc, edp_links, &edp_num);
1576 edp_link = edp_links[0];
1578 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1579 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1580 dc->hwseq->funcs.edp_backlight_control &&
1581 dc->hwss.power_down &&
1582 dc->hwss.edp_power_control) {
1583 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1584 dc->hwss.power_down(dc);
1585 dc->hwss.edp_power_control(edp_link, false);
1587 for (i = 0; i < dc->link_count; i++) {
1588 struct dc_link *link = dc->links[i];
1590 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1591 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1592 dc->hwss.power_down) {
1593 dc->hwss.power_down(dc);
1601 * Call update_clocks with empty context
1602 * to send DISPLAY_OFF
1603 * Otherwise DISPLAY_OFF may not be asserted
1605 if (dc->clk_mgr->funcs->set_low_power_state)
1606 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1609 void dcn10_reset_hw_ctx_wrap(
1611 struct dc_state *context)
1614 struct dce_hwseq *hws = dc->hwseq;
1617 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1618 struct pipe_ctx *pipe_ctx_old =
1619 &dc->current_state->res_ctx.pipe_ctx[i];
1620 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1622 if (!pipe_ctx_old->stream)
1625 if (pipe_ctx_old->top_pipe)
1628 if (!pipe_ctx->stream ||
1629 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1630 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1632 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1633 if (hws->funcs.enable_stream_gating)
1634 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1636 old_clk->funcs->cs_power_down(old_clk);
1641 static bool patch_address_for_sbs_tb_stereo(
1642 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1644 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1645 bool sec_split = pipe_ctx->top_pipe &&
1646 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1647 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1648 (pipe_ctx->stream->timing.timing_3d_format ==
1649 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1650 pipe_ctx->stream->timing.timing_3d_format ==
1651 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1652 *addr = plane_state->address.grph_stereo.left_addr;
1653 plane_state->address.grph_stereo.left_addr =
1654 plane_state->address.grph_stereo.right_addr;
1657 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1658 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1659 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1660 plane_state->address.grph_stereo.right_addr =
1661 plane_state->address.grph_stereo.left_addr;
1662 plane_state->address.grph_stereo.right_meta_addr =
1663 plane_state->address.grph_stereo.left_meta_addr;
1669 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1671 bool addr_patched = false;
1672 PHYSICAL_ADDRESS_LOC addr;
1673 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1675 if (plane_state == NULL)
1678 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1680 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1681 pipe_ctx->plane_res.hubp,
1682 &plane_state->address,
1683 plane_state->flip_immediate);
1685 plane_state->status.requested_address = plane_state->address;
1687 if (plane_state->flip_immediate)
1688 plane_state->status.current_address = plane_state->address;
1691 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1694 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1695 const struct dc_plane_state *plane_state)
1697 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1698 const struct dc_transfer_func *tf = NULL;
1701 if (dpp_base == NULL)
1704 if (plane_state->in_transfer_func)
1705 tf = plane_state->in_transfer_func;
1707 if (plane_state->gamma_correction &&
1708 !dpp_base->ctx->dc->debug.always_use_regamma
1709 && !plane_state->gamma_correction->is_identity
1710 && dce_use_lut(plane_state->format))
1711 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1714 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1715 else if (tf->type == TF_TYPE_PREDEFINED) {
1717 case TRANSFER_FUNCTION_SRGB:
1718 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1720 case TRANSFER_FUNCTION_BT709:
1721 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1723 case TRANSFER_FUNCTION_LINEAR:
1724 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1726 case TRANSFER_FUNCTION_PQ:
1727 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1728 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1729 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1736 } else if (tf->type == TF_TYPE_BYPASS) {
1737 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1739 cm_helper_translate_curve_to_degamma_hw_format(tf,
1740 &dpp_base->degamma_params);
1741 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1742 &dpp_base->degamma_params);
1749 #define MAX_NUM_HW_POINTS 0x200
1751 static void log_tf(struct dc_context *ctx,
1752 struct dc_transfer_func *tf, uint32_t hw_points_num)
1754 // DC_LOG_GAMMA is default logging of all hw points
1755 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1756 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1759 DC_LOGGER_INIT(ctx->logger);
1760 DC_LOG_GAMMA("Gamma Correction TF");
1761 DC_LOG_ALL_GAMMA("Logging all tf points...");
1762 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1764 for (i = 0; i < hw_points_num; i++) {
1765 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1766 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1767 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1770 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1771 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1772 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1773 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1777 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1778 const struct dc_stream_state *stream)
1780 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1785 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1787 if (stream->out_transfer_func &&
1788 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1789 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1790 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1792 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1795 else if (cm_helper_translate_curve_to_hw_format(
1796 stream->out_transfer_func,
1797 &dpp->regamma_params, false)) {
1798 dpp->funcs->dpp_program_regamma_pwl(
1800 &dpp->regamma_params, OPP_REGAMMA_USER);
1802 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1804 if (stream != NULL && stream->ctx != NULL &&
1805 stream->out_transfer_func != NULL) {
1807 stream->out_transfer_func,
1808 dpp->regamma_params.hw_points_num);
1814 void dcn10_pipe_control_lock(
1816 struct pipe_ctx *pipe,
1819 struct dce_hwseq *hws = dc->hwseq;
1821 /* use TG master update lock to lock everything on the TG
1822 * therefore only top pipe need to lock
1824 if (!pipe || pipe->top_pipe)
1827 if (dc->debug.sanity_checks)
1828 hws->funcs.verify_allow_pstate_change_high(dc);
1831 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1833 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1835 if (dc->debug.sanity_checks)
1836 hws->funcs.verify_allow_pstate_change_high(dc);
1840 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1842 * Software keepout workaround to prevent cursor update locking from stalling
1843 * out cursor updates indefinitely or from old values from being retained in
1844 * the case where the viewport changes in the same frame as the cursor.
1846 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1847 * too close to VUPDATE, then stall out until VUPDATE finishes.
1849 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1850 * to avoid the need for this workaround.
1852 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1854 struct dc_stream_state *stream = pipe_ctx->stream;
1855 struct crtc_position position;
1856 uint32_t vupdate_start, vupdate_end;
1857 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1858 unsigned int us_per_line, us_vupdate;
1860 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1863 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1866 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1869 dc->hwss.get_position(&pipe_ctx, 1, &position);
1870 vpos = position.vertical_count;
1872 /* Avoid wraparound calculation issues */
1873 vupdate_start += stream->timing.v_total;
1874 vupdate_end += stream->timing.v_total;
1875 vpos += stream->timing.v_total;
1877 if (vpos <= vupdate_start) {
1878 /* VPOS is in VACTIVE or back porch. */
1879 lines_to_vupdate = vupdate_start - vpos;
1880 } else if (vpos > vupdate_end) {
1881 /* VPOS is in the front porch. */
1884 /* VPOS is in VUPDATE. */
1885 lines_to_vupdate = 0;
1888 /* Calculate time until VUPDATE in microseconds. */
1890 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1891 us_to_vupdate = lines_to_vupdate * us_per_line;
1893 /* 70 us is a conservative estimate of cursor update time*/
1894 if (us_to_vupdate > 70)
1897 /* Stall out until the cursor update completes. */
1898 if (vupdate_end < vupdate_start)
1899 vupdate_end += stream->timing.v_total;
1900 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1901 udelay(us_to_vupdate + us_vupdate);
1904 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1906 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1907 if (!pipe || pipe->top_pipe)
1910 /* Prevent cursor lock from stalling out cursor updates. */
1912 delay_cursor_until_vupdate(dc, pipe);
1914 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1915 union dmub_hw_lock_flags hw_locks = { 0 };
1916 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1918 hw_locks.bits.lock_cursor = 1;
1919 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1921 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1926 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1927 pipe->stream_res.opp->inst, lock);
1930 static bool wait_for_reset_trigger_to_occur(
1931 struct dc_context *dc_ctx,
1932 struct timing_generator *tg)
1936 /* To avoid endless loop we wait at most
1937 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1938 const uint32_t frames_to_wait_on_triggered_reset = 10;
1941 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1943 if (!tg->funcs->is_counter_moving(tg)) {
1944 DC_ERROR("TG counter is not moving!\n");
1948 if (tg->funcs->did_triggered_reset_occur(tg)) {
1950 /* usually occurs at i=1 */
1951 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1956 /* Wait for one frame. */
1957 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1958 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1962 DC_ERROR("GSL: Timeout on reset trigger!\n");
1967 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
1968 uint64_t *denominator,
1969 bool checkUint32Bounary)
1972 bool ret = checkUint32Bounary == false;
1973 uint64_t max_int32 = 0xffffffff;
1974 uint64_t num, denom;
1975 static const uint16_t prime_numbers[] = {
1976 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1977 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1978 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1979 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1980 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1981 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1982 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1983 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1984 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1985 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1986 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1987 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1988 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1989 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1990 941, 947, 953, 967, 971, 977, 983, 991, 997};
1991 int count = ARRAY_SIZE(prime_numbers);
1994 denom = *denominator;
1995 for (i = 0; i < count; i++) {
1996 uint32_t num_remainder, denom_remainder;
1997 uint64_t num_result, denom_result;
1998 if (checkUint32Bounary &&
1999 num <= max_int32 && denom <= max_int32) {
2004 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2005 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2006 if (num_remainder == 0 && denom_remainder == 0) {
2008 denom = denom_result;
2010 } while (num_remainder == 0 && denom_remainder == 0);
2013 *denominator = denom;
2017 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2019 uint32_t master_pipe_refresh_rate =
2020 pipe->stream->timing.pix_clk_100hz * 100 /
2021 pipe->stream->timing.h_total /
2022 pipe->stream->timing.v_total;
2023 return master_pipe_refresh_rate <= 30;
2026 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2027 bool account_low_refresh_rate)
2029 uint32_t clock_divider = 1;
2030 uint32_t numpipes = 1;
2032 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2035 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2038 while (pipe->next_odm_pipe) {
2039 pipe = pipe->next_odm_pipe;
2042 clock_divider *= numpipes;
2044 return clock_divider;
2047 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2048 struct pipe_ctx *grouped_pipes[])
2050 struct dc_context *dc_ctx = dc->ctx;
2051 int i, master = -1, embedded = -1;
2052 struct dc_crtc_timing *hw_crtc_timing;
2053 uint64_t phase[MAX_PIPES];
2054 uint64_t modulo[MAX_PIPES];
2057 uint32_t embedded_pix_clk_100hz;
2058 uint16_t embedded_h_total;
2059 uint16_t embedded_v_total;
2060 uint32_t dp_ref_clk_100hz =
2061 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2063 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2064 if (!hw_crtc_timing)
2067 if (dc->config.vblank_alignment_dto_params &&
2068 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2070 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2072 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2073 embedded_pix_clk_100hz =
2074 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2076 for (i = 0; i < group_size; i++) {
2077 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2078 grouped_pipes[i]->stream_res.tg,
2079 &hw_crtc_timing[i]);
2080 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2081 dc->res_pool->dp_clock_source,
2082 grouped_pipes[i]->stream_res.tg->inst,
2084 hw_crtc_timing[i].pix_clk_100hz = pclk;
2085 if (dc_is_embedded_signal(
2086 grouped_pipes[i]->stream->signal)) {
2089 phase[i] = embedded_pix_clk_100hz*100;
2090 modulo[i] = dp_ref_clk_100hz*100;
2093 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2094 hw_crtc_timing[i].h_total*
2095 hw_crtc_timing[i].v_total;
2096 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2097 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2101 if (reduceSizeAndFraction(&phase[i],
2102 &modulo[i], true) == false) {
2104 * this will help to stop reporting
2105 * this timing synchronizable
2107 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2108 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2113 for (i = 0; i < group_size; i++) {
2114 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2115 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2116 dc->res_pool->dp_clock_source,
2117 grouped_pipes[i]->stream_res.tg->inst,
2118 phase[i], modulo[i]);
2119 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2120 dc->res_pool->dp_clock_source,
2121 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2122 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2123 pclk*get_clock_divider(grouped_pipes[i], false);
2131 kfree(hw_crtc_timing);
2135 void dcn10_enable_vblanks_synchronization(
2139 struct pipe_ctx *grouped_pipes[])
2141 struct dc_context *dc_ctx = dc->ctx;
2142 struct output_pixel_processor *opp;
2143 struct timing_generator *tg;
2144 int i, width, height, master;
2146 for (i = 1; i < group_size; i++) {
2147 opp = grouped_pipes[i]->stream_res.opp;
2148 tg = grouped_pipes[i]->stream_res.tg;
2149 tg->funcs->get_otg_active_size(tg, &width, &height);
2150 if (opp->funcs->opp_program_dpg_dimensions)
2151 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2154 for (i = 0; i < group_size; i++) {
2155 if (grouped_pipes[i]->stream == NULL)
2157 grouped_pipes[i]->stream->vblank_synchronized = false;
2158 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2161 DC_SYNC_INFO("Aligning DP DTOs\n");
2163 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2165 DC_SYNC_INFO("Synchronizing VBlanks\n");
2168 for (i = 0; i < group_size; i++) {
2169 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2170 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2171 grouped_pipes[master]->stream_res.tg,
2172 grouped_pipes[i]->stream_res.tg,
2173 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2174 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2175 get_clock_divider(grouped_pipes[master], false),
2176 get_clock_divider(grouped_pipes[i], false));
2177 grouped_pipes[i]->stream->vblank_synchronized = true;
2179 grouped_pipes[master]->stream->vblank_synchronized = true;
2180 DC_SYNC_INFO("Sync complete\n");
2183 for (i = 1; i < group_size; i++) {
2184 opp = grouped_pipes[i]->stream_res.opp;
2185 tg = grouped_pipes[i]->stream_res.tg;
2186 tg->funcs->get_otg_active_size(tg, &width, &height);
2187 if (opp->funcs->opp_program_dpg_dimensions)
2188 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2192 void dcn10_enable_timing_synchronization(
2196 struct pipe_ctx *grouped_pipes[])
2198 struct dc_context *dc_ctx = dc->ctx;
2199 struct output_pixel_processor *opp;
2200 struct timing_generator *tg;
2201 int i, width, height;
2203 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2205 for (i = 1; i < group_size; i++) {
2206 opp = grouped_pipes[i]->stream_res.opp;
2207 tg = grouped_pipes[i]->stream_res.tg;
2208 tg->funcs->get_otg_active_size(tg, &width, &height);
2209 if (opp->funcs->opp_program_dpg_dimensions)
2210 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2213 for (i = 0; i < group_size; i++) {
2214 if (grouped_pipes[i]->stream == NULL)
2216 grouped_pipes[i]->stream->vblank_synchronized = false;
2219 for (i = 1; i < group_size; i++)
2220 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2221 grouped_pipes[i]->stream_res.tg,
2222 grouped_pipes[0]->stream_res.tg->inst);
2224 DC_SYNC_INFO("Waiting for trigger\n");
2226 /* Need to get only check 1 pipe for having reset as all the others are
2227 * synchronized. Look at last pipe programmed to reset.
2230 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2231 for (i = 1; i < group_size; i++)
2232 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2233 grouped_pipes[i]->stream_res.tg);
2235 for (i = 1; i < group_size; i++) {
2236 opp = grouped_pipes[i]->stream_res.opp;
2237 tg = grouped_pipes[i]->stream_res.tg;
2238 tg->funcs->get_otg_active_size(tg, &width, &height);
2239 if (opp->funcs->opp_program_dpg_dimensions)
2240 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2243 DC_SYNC_INFO("Sync complete\n");
2246 void dcn10_enable_per_frame_crtc_position_reset(
2249 struct pipe_ctx *grouped_pipes[])
2251 struct dc_context *dc_ctx = dc->ctx;
2254 DC_SYNC_INFO("Setting up\n");
2255 for (i = 0; i < group_size; i++)
2256 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2257 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2258 grouped_pipes[i]->stream_res.tg,
2260 &grouped_pipes[i]->stream->triggered_crtc_reset);
2262 DC_SYNC_INFO("Waiting for trigger\n");
2264 for (i = 0; i < group_size; i++)
2265 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2267 DC_SYNC_INFO("Multi-display sync is complete\n");
2270 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2271 struct vm_system_aperture_param *apt,
2272 struct dce_hwseq *hws)
2274 PHYSICAL_ADDRESS_LOC physical_page_number;
2275 uint32_t logical_addr_low;
2276 uint32_t logical_addr_high;
2278 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2279 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2280 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2281 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2283 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2284 LOGICAL_ADDR, &logical_addr_low);
2286 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2287 LOGICAL_ADDR, &logical_addr_high);
2289 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2290 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2291 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2294 /* Temporary read settings, future will get values from kmd directly */
2295 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2296 struct vm_context0_param *vm0,
2297 struct dce_hwseq *hws)
2299 PHYSICAL_ADDRESS_LOC fb_base;
2300 PHYSICAL_ADDRESS_LOC fb_offset;
2301 uint32_t fb_base_value;
2302 uint32_t fb_offset_value;
2304 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2305 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2307 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2308 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2309 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2310 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2312 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2313 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2314 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2315 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2317 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2318 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2319 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2320 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2322 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2323 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2324 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2325 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2328 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2329 * Therefore we need to do
2330 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2331 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2333 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2334 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2335 vm0->pte_base.quad_part += fb_base.quad_part;
2336 vm0->pte_base.quad_part -= fb_offset.quad_part;
2340 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2342 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2343 struct vm_system_aperture_param apt = {0};
2344 struct vm_context0_param vm0 = {0};
2346 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2347 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2349 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2350 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2353 static void dcn10_enable_plane(
2355 struct pipe_ctx *pipe_ctx,
2356 struct dc_state *context)
2358 struct dce_hwseq *hws = dc->hwseq;
2360 if (dc->debug.sanity_checks) {
2361 hws->funcs.verify_allow_pstate_change_high(dc);
2364 undo_DEGVIDCN10_253_wa(dc);
2366 power_on_plane(dc->hwseq,
2367 pipe_ctx->plane_res.hubp->inst);
2369 /* enable DCFCLK current DCHUB */
2370 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2372 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2373 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2374 pipe_ctx->stream_res.opp,
2377 if (dc->config.gpu_vm_support)
2378 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2380 if (dc->debug.sanity_checks) {
2381 hws->funcs.verify_allow_pstate_change_high(dc);
2384 if (!pipe_ctx->top_pipe
2385 && pipe_ctx->plane_state
2386 && pipe_ctx->plane_state->flip_int_enabled
2387 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2388 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2392 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2395 struct dpp_grph_csc_adjustment adjust;
2396 memset(&adjust, 0, sizeof(adjust));
2397 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2400 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2401 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2402 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2403 adjust.temperature_matrix[i] =
2404 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2405 } else if (pipe_ctx->plane_state &&
2406 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2407 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2408 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2409 adjust.temperature_matrix[i] =
2410 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2413 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2417 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2419 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2420 if (pipe_ctx->top_pipe) {
2421 struct pipe_ctx *top = pipe_ctx->top_pipe;
2423 while (top->top_pipe)
2424 top = top->top_pipe; // Traverse to top pipe_ctx
2425 if (top->plane_state && top->plane_state->layer_index == 0)
2426 return true; // Front MPO plane not hidden
2432 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2434 // Override rear plane RGB bias to fix MPO brightness
2435 uint16_t rgb_bias = matrix[3];
2440 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2441 matrix[3] = rgb_bias;
2442 matrix[7] = rgb_bias;
2443 matrix[11] = rgb_bias;
2446 void dcn10_program_output_csc(struct dc *dc,
2447 struct pipe_ctx *pipe_ctx,
2448 enum dc_color_space colorspace,
2452 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2453 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2455 /* MPO is broken with RGB colorspaces when OCSC matrix
2456 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2457 * Blending adds offsets from front + rear to rear plane
2459 * Fix is to set RGB bias to 0 on rear plane, top plane
2460 * black value pixels add offset instead of rear + front
2463 int16_t rgb_bias = matrix[3];
2464 // matrix[3/7/11] are all the same offset value
2466 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2467 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2469 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2473 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2474 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2478 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2480 struct dc_bias_and_scale bns_params = {0};
2482 // program the input csc
2483 dpp->funcs->dpp_setup(dpp,
2484 plane_state->format,
2485 EXPANSION_MODE_ZERO,
2486 plane_state->input_csc_color_matrix,
2487 plane_state->color_space,
2490 //set scale and bias registers
2491 build_prescale_params(&bns_params, plane_state);
2492 if (dpp->funcs->dpp_program_bias_and_scale)
2493 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2496 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2498 struct mpc *mpc = dc->res_pool->mpc;
2500 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2501 get_hdr_visual_confirm_color(pipe_ctx, color);
2502 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2503 get_surface_visual_confirm_color(pipe_ctx, color);
2504 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2505 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2507 color_space_to_black_color(
2508 dc, pipe_ctx->stream->output_color_space, color);
2510 if (mpc->funcs->set_bg_color)
2511 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2514 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2516 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2517 struct mpcc_blnd_cfg blnd_cfg = {0};
2518 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2520 struct mpcc *new_mpcc;
2521 struct mpc *mpc = dc->res_pool->mpc;
2522 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2524 if (per_pixel_alpha)
2525 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2527 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2529 blnd_cfg.overlap_only = false;
2530 blnd_cfg.global_gain = 0xff;
2532 if (pipe_ctx->plane_state->global_alpha)
2533 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2535 blnd_cfg.global_alpha = 0xff;
2537 /* DCN1.0 has output CM before MPC which seems to screw with
2538 * pre-multiplied alpha.
2540 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2541 pipe_ctx->stream->output_color_space)
2547 * Note: currently there is a bug in init_hw such that
2548 * on resume from hibernate, BIOS sets up MPCC0, and
2549 * we do mpcc_remove but the mpcc cannot go to idle
2550 * after remove. This cause us to pick mpcc1 here,
2551 * which causes a pstate hang for yet unknown reason.
2553 mpcc_id = hubp->inst;
2555 /* If there is no full update, don't need to touch MPC tree*/
2556 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2557 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2558 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2562 /* check if this MPCC is already being used */
2563 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2564 /* remove MPCC if being used */
2565 if (new_mpcc != NULL)
2566 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2568 if (dc->debug.sanity_checks)
2569 mpc->funcs->assert_mpcc_idle_before_connect(
2570 dc->res_pool->mpc, mpcc_id);
2572 /* Call MPC to insert new plane */
2573 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2580 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2582 ASSERT(new_mpcc != NULL);
2584 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2585 hubp->mpcc_id = mpcc_id;
2588 static void update_scaler(struct pipe_ctx *pipe_ctx)
2590 bool per_pixel_alpha =
2591 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2593 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2594 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2595 /* scaler configuration */
2596 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2597 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2600 static void dcn10_update_dchubp_dpp(
2602 struct pipe_ctx *pipe_ctx,
2603 struct dc_state *context)
2605 struct dce_hwseq *hws = dc->hwseq;
2606 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2607 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2608 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2609 struct plane_size size = plane_state->plane_size;
2610 unsigned int compat_level = 0;
2611 bool should_divided_by_2 = false;
2613 /* depends on DML calculation, DPP clock value may change dynamically */
2614 /* If request max dpp clk is lower than current dispclk, no need to
2617 if (plane_state->update_flags.bits.full_update) {
2619 /* new calculated dispclk, dppclk are stored in
2620 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2621 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2622 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2623 * dispclk will put in use after optimize_bandwidth when
2624 * ramp_up_dispclk_with_dpp is called.
2625 * there are two places for dppclk be put in use. One location
2626 * is the same as the location as dispclk. Another is within
2627 * update_dchubp_dpp which happens between pre_bandwidth and
2628 * optimize_bandwidth.
2629 * dppclk updated within update_dchubp_dpp will cause new
2630 * clock values of dispclk and dppclk not be in use at the same
2631 * time. when clocks are decreased, this may cause dppclk is
2632 * lower than previous configuration and let pipe stuck.
2633 * for example, eDP + external dp, change resolution of DP from
2634 * 1920x1080x144hz to 1280x960x60hz.
2635 * before change: dispclk = 337889 dppclk = 337889
2636 * change mode, dcn10_validate_bandwidth calculate
2637 * dispclk = 143122 dppclk = 143122
2638 * update_dchubp_dpp be executed before dispclk be updated,
2639 * dispclk = 337889, but dppclk use new value dispclk /2 =
2640 * 168944. this will cause pipe pstate warning issue.
2641 * solution: between pre_bandwidth and optimize_bandwidth, while
2642 * dispclk is going to be decreased, keep dppclk = dispclk
2644 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2645 dc->clk_mgr->clks.dispclk_khz)
2646 should_divided_by_2 = false;
2648 should_divided_by_2 =
2649 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2650 dc->clk_mgr->clks.dispclk_khz / 2;
2652 dpp->funcs->dpp_dppclk_control(
2654 should_divided_by_2,
2657 if (dc->res_pool->dccg)
2658 dc->res_pool->dccg->funcs->update_dpp_dto(
2661 pipe_ctx->plane_res.bw.dppclk_khz);
2663 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2664 dc->clk_mgr->clks.dispclk_khz / 2 :
2665 dc->clk_mgr->clks.dispclk_khz;
2668 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2669 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2670 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2672 if (plane_state->update_flags.bits.full_update) {
2673 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2675 hubp->funcs->hubp_setup(
2677 &pipe_ctx->dlg_regs,
2678 &pipe_ctx->ttu_regs,
2680 &pipe_ctx->pipe_dlg_param);
2681 hubp->funcs->hubp_setup_interdependent(
2683 &pipe_ctx->dlg_regs,
2684 &pipe_ctx->ttu_regs);
2687 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2689 if (plane_state->update_flags.bits.full_update ||
2690 plane_state->update_flags.bits.bpp_change)
2691 dcn10_update_dpp(dpp, plane_state);
2693 if (plane_state->update_flags.bits.full_update ||
2694 plane_state->update_flags.bits.per_pixel_alpha_change ||
2695 plane_state->update_flags.bits.global_alpha_change)
2696 hws->funcs.update_mpcc(dc, pipe_ctx);
2698 if (plane_state->update_flags.bits.full_update ||
2699 plane_state->update_flags.bits.per_pixel_alpha_change ||
2700 plane_state->update_flags.bits.global_alpha_change ||
2701 plane_state->update_flags.bits.scaling_change ||
2702 plane_state->update_flags.bits.position_change) {
2703 update_scaler(pipe_ctx);
2706 if (plane_state->update_flags.bits.full_update ||
2707 plane_state->update_flags.bits.scaling_change ||
2708 plane_state->update_flags.bits.position_change) {
2709 hubp->funcs->mem_program_viewport(
2711 &pipe_ctx->plane_res.scl_data.viewport,
2712 &pipe_ctx->plane_res.scl_data.viewport_c);
2715 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2716 dc->hwss.set_cursor_position(pipe_ctx);
2717 dc->hwss.set_cursor_attribute(pipe_ctx);
2719 if (dc->hwss.set_cursor_sdr_white_level)
2720 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2723 if (plane_state->update_flags.bits.full_update) {
2725 dc->hwss.program_gamut_remap(pipe_ctx);
2727 dc->hwss.program_output_csc(dc,
2729 pipe_ctx->stream->output_color_space,
2730 pipe_ctx->stream->csc_color_matrix.matrix,
2731 pipe_ctx->stream_res.opp->inst);
2734 if (plane_state->update_flags.bits.full_update ||
2735 plane_state->update_flags.bits.pixel_format_change ||
2736 plane_state->update_flags.bits.horizontal_mirror_change ||
2737 plane_state->update_flags.bits.rotation_change ||
2738 plane_state->update_flags.bits.swizzle_change ||
2739 plane_state->update_flags.bits.dcc_change ||
2740 plane_state->update_flags.bits.bpp_change ||
2741 plane_state->update_flags.bits.scaling_change ||
2742 plane_state->update_flags.bits.plane_size_change) {
2743 hubp->funcs->hubp_program_surface_config(
2745 plane_state->format,
2746 &plane_state->tiling_info,
2748 plane_state->rotation,
2750 plane_state->horizontal_mirror,
2754 hubp->power_gated = false;
2756 hws->funcs.update_plane_addr(dc, pipe_ctx);
2758 if (is_pipe_tree_visible(pipe_ctx))
2759 hubp->funcs->set_blank(hubp, false);
2762 void dcn10_blank_pixel_data(
2764 struct pipe_ctx *pipe_ctx,
2767 enum dc_color_space color_space;
2768 struct tg_color black_color = {0};
2769 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2770 struct dc_stream_state *stream = pipe_ctx->stream;
2772 /* program otg blank color */
2773 color_space = stream->output_color_space;
2774 color_space_to_black_color(dc, color_space, &black_color);
2777 * The way 420 is packed, 2 channels carry Y component, 1 channel
2778 * alternate between Cb and Cr, so both channels need the pixel
2781 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2782 black_color.color_r_cr = black_color.color_g_y;
2785 if (stream_res->tg->funcs->set_blank_color)
2786 stream_res->tg->funcs->set_blank_color(
2791 if (stream_res->tg->funcs->set_blank)
2792 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2793 if (stream_res->abm) {
2794 dc->hwss.set_pipe(pipe_ctx);
2795 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2798 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2799 if (stream_res->tg->funcs->set_blank) {
2800 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2801 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2806 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2808 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2809 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2810 struct custom_float_format fmt;
2812 fmt.exponenta_bits = 6;
2813 fmt.mantissa_bits = 12;
2817 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2818 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2820 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2821 pipe_ctx->plane_res.dpp, hw_mult);
2824 void dcn10_program_pipe(
2826 struct pipe_ctx *pipe_ctx,
2827 struct dc_state *context)
2829 struct dce_hwseq *hws = dc->hwseq;
2831 if (pipe_ctx->top_pipe == NULL) {
2832 bool blank = !is_pipe_tree_visible(pipe_ctx);
2834 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2835 pipe_ctx->stream_res.tg,
2836 pipe_ctx->pipe_dlg_param.vready_offset,
2837 pipe_ctx->pipe_dlg_param.vstartup_start,
2838 pipe_ctx->pipe_dlg_param.vupdate_offset,
2839 pipe_ctx->pipe_dlg_param.vupdate_width);
2841 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2842 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2844 if (hws->funcs.setup_vupdate_interrupt)
2845 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2847 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2850 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2851 dcn10_enable_plane(dc, pipe_ctx, context);
2853 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2855 hws->funcs.set_hdr_multiplier(pipe_ctx);
2857 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2858 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2859 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2860 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2862 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2863 * only do gamma programming for full update.
2864 * TODO: This can be further optimized/cleaned up
2865 * Always call this for now since it does memcmp inside before
2866 * doing heavy calculation and programming
2868 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2869 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2872 void dcn10_wait_for_pending_cleared(struct dc *dc,
2873 struct dc_state *context)
2875 struct pipe_ctx *pipe_ctx;
2876 struct timing_generator *tg;
2879 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2880 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2881 tg = pipe_ctx->stream_res.tg;
2884 * Only wait for top pipe's tg penindg bit
2885 * Also skip if pipe is disabled.
2887 if (pipe_ctx->top_pipe ||
2888 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2889 !tg->funcs->is_tg_enabled(tg))
2893 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2894 * For some reason waiting for OTG_UPDATE_PENDING cleared
2895 * seems to not trigger the update right away, and if we
2896 * lock again before VUPDATE then we don't get a separated
2899 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2900 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2904 void dcn10_post_unlock_program_front_end(
2906 struct dc_state *context)
2910 DC_LOGGER_INIT(dc->ctx->logger);
2912 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2913 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2915 if (!pipe_ctx->top_pipe &&
2916 !pipe_ctx->prev_odm_pipe &&
2918 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2920 if (context->stream_status[i].plane_count == 0)
2921 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2925 for (i = 0; i < dc->res_pool->pipe_count; i++)
2926 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2927 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2929 for (i = 0; i < dc->res_pool->pipe_count; i++)
2930 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2931 dc->hwss.optimize_bandwidth(dc, context);
2935 if (dc->hwseq->wa.DEGVIDCN10_254)
2936 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2939 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2943 for (i = 0; i < context->stream_count; i++) {
2944 if (context->streams[i]->timing.timing_3d_format
2945 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2949 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2955 void dcn10_prepare_bandwidth(
2957 struct dc_state *context)
2959 struct dce_hwseq *hws = dc->hwseq;
2960 struct hubbub *hubbub = dc->res_pool->hubbub;
2962 if (dc->debug.sanity_checks)
2963 hws->funcs.verify_allow_pstate_change_high(dc);
2965 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2966 if (context->stream_count == 0)
2967 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2969 dc->clk_mgr->funcs->update_clocks(
2975 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2976 &context->bw_ctx.bw.dcn.watermarks,
2977 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2979 dcn10_stereo_hw_frame_pack_wa(dc, context);
2981 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2982 dcn_bw_notify_pplib_of_wm_ranges(dc);
2984 if (dc->debug.sanity_checks)
2985 hws->funcs.verify_allow_pstate_change_high(dc);
2988 void dcn10_optimize_bandwidth(
2990 struct dc_state *context)
2992 struct dce_hwseq *hws = dc->hwseq;
2993 struct hubbub *hubbub = dc->res_pool->hubbub;
2995 if (dc->debug.sanity_checks)
2996 hws->funcs.verify_allow_pstate_change_high(dc);
2998 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2999 if (context->stream_count == 0)
3000 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3002 dc->clk_mgr->funcs->update_clocks(
3008 hubbub->funcs->program_watermarks(hubbub,
3009 &context->bw_ctx.bw.dcn.watermarks,
3010 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3013 dcn10_stereo_hw_frame_pack_wa(dc, context);
3015 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3016 dcn_bw_notify_pplib_of_wm_ranges(dc);
3018 if (dc->debug.sanity_checks)
3019 hws->funcs.verify_allow_pstate_change_high(dc);
3022 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3023 int num_pipes, struct dc_crtc_timing_adjust adjust)
3026 struct drr_params params = {0};
3027 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3028 unsigned int event_triggers = 0x800;
3029 // Note DRR trigger events are generated regardless of whether num frames met.
3030 unsigned int num_frames = 2;
3032 params.vertical_total_max = adjust.v_total_max;
3033 params.vertical_total_min = adjust.v_total_min;
3034 params.vertical_total_mid = adjust.v_total_mid;
3035 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3036 /* TODO: If multiple pipes are to be supported, you need
3037 * some GSL stuff. Static screen triggers may be programmed differently
3040 for (i = 0; i < num_pipes; i++) {
3041 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3042 pipe_ctx[i]->stream_res.tg, ¶ms);
3043 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3044 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3045 pipe_ctx[i]->stream_res.tg,
3046 event_triggers, num_frames);
3050 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3052 struct crtc_position *position)
3056 /* TODO: handle pipes > 1
3058 for (i = 0; i < num_pipes; i++)
3059 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3062 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3063 int num_pipes, const struct dc_static_screen_params *params)
3066 unsigned int triggers = 0;
3068 if (params->triggers.surface_update)
3070 if (params->triggers.cursor_update)
3072 if (params->triggers.force_trigger)
3075 for (i = 0; i < num_pipes; i++)
3076 pipe_ctx[i]->stream_res.tg->funcs->
3077 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3078 triggers, params->num_frames);
3081 static void dcn10_config_stereo_parameters(
3082 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3084 enum view_3d_format view_format = stream->view_format;
3085 enum dc_timing_3d_format timing_3d_format =\
3086 stream->timing.timing_3d_format;
3087 bool non_stereo_timing = false;
3089 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3090 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3091 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3092 non_stereo_timing = true;
3094 if (non_stereo_timing == false &&
3095 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3097 flags->PROGRAM_STEREO = 1;
3098 flags->PROGRAM_POLARITY = 1;
3099 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3100 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3101 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3102 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3103 enum display_dongle_type dongle = \
3104 stream->link->ddc->dongle_type;
3105 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3106 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3107 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3108 flags->DISABLE_STEREO_DP_SYNC = 1;
3110 flags->RIGHT_EYE_POLARITY =\
3111 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3112 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3113 flags->FRAME_PACKED = 1;
3119 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3121 struct crtc_stereo_flags flags = { 0 };
3122 struct dc_stream_state *stream = pipe_ctx->stream;
3124 dcn10_config_stereo_parameters(stream, &flags);
3126 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3127 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3128 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3130 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3133 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3134 pipe_ctx->stream_res.opp,
3135 flags.PROGRAM_STEREO == 1,
3138 pipe_ctx->stream_res.tg->funcs->program_stereo(
3139 pipe_ctx->stream_res.tg,
3146 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3150 for (i = 0; i < res_pool->pipe_count; i++) {
3151 if (res_pool->hubps[i]->inst == mpcc_inst)
3152 return res_pool->hubps[i];
3158 void dcn10_wait_for_mpcc_disconnect(
3160 struct resource_pool *res_pool,
3161 struct pipe_ctx *pipe_ctx)
3163 struct dce_hwseq *hws = dc->hwseq;
3166 if (dc->debug.sanity_checks) {
3167 hws->funcs.verify_allow_pstate_change_high(dc);
3170 if (!pipe_ctx->stream_res.opp)
3173 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3174 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3175 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3177 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3178 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3179 hubp->funcs->set_blank(hubp, true);
3183 if (dc->debug.sanity_checks) {
3184 hws->funcs.verify_allow_pstate_change_high(dc);
3189 bool dcn10_dummy_display_power_gating(
3191 uint8_t controller_id,
3192 struct dc_bios *dcb,
3193 enum pipe_gating_control power_gating)
3198 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3200 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3201 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3203 struct dc *dc = plane_state->ctx->dc;
3205 if (plane_state == NULL)
3208 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3209 pipe_ctx->plane_res.hubp);
3211 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3214 plane_state->status.current_address = plane_state->status.requested_address;
3216 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3217 tg->funcs->is_stereo_left_eye) {
3218 plane_state->status.is_right_eye =
3219 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3222 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3223 struct dce_hwseq *hwseq = dc->hwseq;
3224 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3225 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3227 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3228 struct hubbub *hubbub = dc->res_pool->hubbub;
3230 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3231 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3236 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3238 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3240 /* In DCN, this programming sequence is owned by the hubbub */
3241 hubbub->funcs->update_dchub(hubbub, dh_data);
3244 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3246 struct pipe_ctx *test_pipe, *split_pipe;
3247 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3248 struct rect r1 = scl_data->recout, r2, r2_half;
3249 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3250 int cur_layer = pipe_ctx->plane_state->layer_index;
3253 * Disable the cursor if there's another pipe above this with a
3254 * plane that contains this pipe's viewport to prevent double cursor
3255 * and incorrect scaling artifacts.
3257 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3258 test_pipe = test_pipe->top_pipe) {
3259 // Skip invisible layer and pipe-split plane on same layer
3260 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3263 r2 = test_pipe->plane_res.scl_data.recout;
3264 r2_r = r2.x + r2.width;
3265 r2_b = r2.y + r2.height;
3266 split_pipe = test_pipe;
3269 * There is another half plane on same layer because of
3270 * pipe-split, merge together per same height.
3272 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3273 split_pipe = split_pipe->top_pipe)
3274 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3275 r2_half = split_pipe->plane_res.scl_data.recout;
3276 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3277 r2.width = r2.width + r2_half.width;
3278 r2_r = r2.x + r2.width;
3282 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3289 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3291 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3292 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3293 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3294 struct dc_cursor_mi_param param = {
3295 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3296 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3297 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3298 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3299 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3300 .rotation = pipe_ctx->plane_state->rotation,
3301 .mirror = pipe_ctx->plane_state->horizontal_mirror
3303 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3304 (pipe_ctx->bottom_pipe != NULL);
3305 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3306 (pipe_ctx->prev_odm_pipe != NULL);
3308 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3309 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3310 int x_pos = pos_cpy.x;
3311 int y_pos = pos_cpy.y;
3314 * DC cursor is stream space, HW cursor is plane space and drawn
3315 * as part of the framebuffer.
3317 * Cursor position can't be negative, but hotspot can be used to
3318 * shift cursor out of the plane bounds. Hotspot must be smaller
3319 * than the cursor size.
3323 * Translate cursor from stream space to plane space.
3325 * If the cursor is scaled then we need to scale the position
3326 * to be in the approximately correct place. We can't do anything
3327 * about the actual size being incorrect, that's a limitation of
3330 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3331 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3332 pipe_ctx->plane_state->dst_rect.width;
3333 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3334 pipe_ctx->plane_state->dst_rect.height;
3336 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3337 pipe_ctx->plane_state->dst_rect.width;
3338 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3339 pipe_ctx->plane_state->dst_rect.height;
3343 * If the cursor's source viewport is clipped then we need to
3344 * translate the cursor to appear in the correct position on
3347 * This translation isn't affected by scaling so it needs to be
3348 * done *after* we adjust the position for the scale factor.
3350 * This is only done by opt-in for now since there are still
3351 * some usecases like tiled display that might enable the
3352 * cursor on both streams while expecting dc to clip it.
3354 if (pos_cpy.translate_by_source) {
3355 x_pos += pipe_ctx->plane_state->src_rect.x;
3356 y_pos += pipe_ctx->plane_state->src_rect.y;
3360 * If the position is negative then we need to add to the hotspot
3361 * to shift the cursor outside the plane.
3365 pos_cpy.x_hotspot -= x_pos;
3370 pos_cpy.y_hotspot -= y_pos;
3374 pos_cpy.x = (uint32_t)x_pos;
3375 pos_cpy.y = (uint32_t)y_pos;
3377 if (pipe_ctx->plane_state->address.type
3378 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3379 pos_cpy.enable = false;
3381 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3382 pos_cpy.enable = false;
3384 // Swap axis and mirror horizontally
3385 if (param.rotation == ROTATION_ANGLE_90) {
3386 uint32_t temp_x = pos_cpy.x;
3388 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3389 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3392 // Swap axis and mirror vertically
3393 else if (param.rotation == ROTATION_ANGLE_270) {
3394 uint32_t temp_y = pos_cpy.y;
3395 int viewport_height =
3396 pipe_ctx->plane_res.scl_data.viewport.height;
3398 pipe_ctx->plane_res.scl_data.viewport.y;
3401 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3402 * For pipe split cases:
3403 * - apply offset of viewport.y to normalize pos_cpy.x
3404 * - calculate the pos_cpy.y as before
3405 * - shift pos_cpy.y back by same offset to get final value
3406 * - since we iterate through both pipes, use the lower
3407 * viewport.y for offset
3408 * For non pipe split cases, use the same calculation for
3409 * pos_cpy.y as the 180 degree rotation case below,
3410 * but use pos_cpy.x as our input because we are rotating
3413 if (pipe_split_on || odm_combine_on) {
3414 int pos_cpy_x_offset;
3415 int other_pipe_viewport_y;
3417 if (pipe_split_on) {
3418 if (pipe_ctx->bottom_pipe) {
3419 other_pipe_viewport_y =
3420 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3422 other_pipe_viewport_y =
3423 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3426 if (pipe_ctx->next_odm_pipe) {
3427 other_pipe_viewport_y =
3428 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3430 other_pipe_viewport_y =
3431 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3434 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3435 other_pipe_viewport_y : viewport_y;
3436 pos_cpy.x -= pos_cpy_x_offset;
3437 if (pos_cpy.x > viewport_height) {
3438 pos_cpy.x = pos_cpy.x - viewport_height;
3439 pos_cpy.y = viewport_height - pos_cpy.x;
3441 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3443 pos_cpy.y += pos_cpy_x_offset;
3445 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3449 // Mirror horizontally and vertically
3450 else if (param.rotation == ROTATION_ANGLE_180) {
3451 int viewport_width =
3452 pipe_ctx->plane_res.scl_data.viewport.width;
3454 pipe_ctx->plane_res.scl_data.viewport.x;
3456 if (pipe_split_on || odm_combine_on) {
3457 if (pos_cpy.x >= viewport_width + viewport_x) {
3458 pos_cpy.x = 2 * viewport_width
3459 - pos_cpy.x + 2 * viewport_x;
3461 uint32_t temp_x = pos_cpy.x;
3463 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3464 if (temp_x >= viewport_x +
3465 (int)hubp->curs_attr.width || pos_cpy.x
3466 <= (int)hubp->curs_attr.width +
3467 pipe_ctx->plane_state->src_rect.x) {
3468 pos_cpy.x = temp_x + viewport_width;
3472 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3476 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3478 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3479 * pos_cpy.y_new = viewport.y + delta_from_bottom
3481 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3483 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3484 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3487 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3488 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3491 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3493 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3495 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3496 pipe_ctx->plane_res.hubp, attributes);
3497 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3498 pipe_ctx->plane_res.dpp, attributes);
3501 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3503 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3504 struct fixed31_32 multiplier;
3505 struct dpp_cursor_attributes opt_attr = { 0 };
3506 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3507 struct custom_float_format fmt;
3509 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3512 fmt.exponenta_bits = 5;
3513 fmt.mantissa_bits = 10;
3516 if (sdr_white_level > 80) {
3517 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3518 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3521 opt_attr.scale = hw_scale;
3524 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3525 pipe_ctx->plane_res.dpp, &opt_attr);
3529 * apply_front_porch_workaround TODO FPGA still need?
3531 * This is a workaround for a bug that has existed since R5xx and has not been
3532 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3534 static void apply_front_porch_workaround(
3535 struct dc_crtc_timing *timing)
3537 if (timing->flags.INTERLACE == 1) {
3538 if (timing->v_front_porch < 2)
3539 timing->v_front_porch = 2;
3541 if (timing->v_front_porch < 1)
3542 timing->v_front_porch = 1;
3546 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3548 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3549 struct dc_crtc_timing patched_crtc_timing;
3550 int vesa_sync_start;
3552 int interlace_factor;
3553 int vertical_line_start;
3555 patched_crtc_timing = *dc_crtc_timing;
3556 apply_front_porch_workaround(&patched_crtc_timing);
3558 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3560 vesa_sync_start = patched_crtc_timing.v_addressable +
3561 patched_crtc_timing.v_border_bottom +
3562 patched_crtc_timing.v_front_porch;
3564 asic_blank_end = (patched_crtc_timing.v_total -
3566 patched_crtc_timing.v_border_top)
3569 vertical_line_start = asic_blank_end -
3570 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3572 return vertical_line_start;
3575 void dcn10_calc_vupdate_position(
3577 struct pipe_ctx *pipe_ctx,
3578 uint32_t *start_line,
3581 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3582 int vline_int_offset_from_vupdate =
3583 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3584 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3587 if (vline_int_offset_from_vupdate > 0)
3588 vline_int_offset_from_vupdate--;
3589 else if (vline_int_offset_from_vupdate < 0)
3590 vline_int_offset_from_vupdate++;
3592 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3594 if (start_position >= 0)
3595 *start_line = start_position;
3597 *start_line = dc_crtc_timing->v_total + start_position - 1;
3599 *end_line = *start_line + 2;
3601 if (*end_line >= dc_crtc_timing->v_total)
3605 static void dcn10_cal_vline_position(
3607 struct pipe_ctx *pipe_ctx,
3608 enum vline_select vline,
3609 uint32_t *start_line,
3612 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3614 if (vline == VLINE0)
3615 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3616 else if (vline == VLINE1)
3617 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3619 switch (ref_point) {
3620 case START_V_UPDATE:
3621 dcn10_calc_vupdate_position(
3628 // Suppose to do nothing because vsync is 0;
3636 void dcn10_setup_periodic_interrupt(
3638 struct pipe_ctx *pipe_ctx,
3639 enum vline_select vline)
3641 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3643 if (vline == VLINE0) {
3644 uint32_t start_line = 0;
3645 uint32_t end_line = 0;
3647 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3649 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3651 } else if (vline == VLINE1) {
3652 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3654 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3658 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3660 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3661 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3663 if (start_line < 0) {
3668 if (tg->funcs->setup_vertical_interrupt2)
3669 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3672 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3673 struct dc_link_settings *link_settings)
3675 struct encoder_unblank_param params = {0};
3676 struct dc_stream_state *stream = pipe_ctx->stream;
3677 struct dc_link *link = stream->link;
3678 struct dce_hwseq *hws = link->dc->hwseq;
3680 /* only 3 items below are used by unblank */
3681 params.timing = pipe_ctx->stream->timing;
3683 params.link_settings.link_rate = link_settings->link_rate;
3685 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3686 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3687 params.timing.pix_clk_100hz /= 2;
3688 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3691 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3692 hws->funcs.edp_backlight_control(link, true);
3696 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3697 const uint8_t *custom_sdp_message,
3698 unsigned int sdp_message_size)
3700 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3701 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3702 pipe_ctx->stream_res.stream_enc,
3707 enum dc_status dcn10_set_clock(struct dc *dc,
3708 enum dc_clock_type clock_type,
3712 struct dc_state *context = dc->current_state;
3713 struct dc_clock_config clock_cfg = {0};
3714 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3716 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3717 return DC_FAIL_UNSUPPORTED_1;
3719 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3720 context, clock_type, &clock_cfg);
3722 if (clk_khz > clock_cfg.max_clock_khz)
3723 return DC_FAIL_CLK_EXCEED_MAX;
3725 if (clk_khz < clock_cfg.min_clock_khz)
3726 return DC_FAIL_CLK_BELOW_MIN;
3728 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3729 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3731 /*update internal request clock for update clock use*/
3732 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3733 current_clocks->dispclk_khz = clk_khz;
3734 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3735 current_clocks->dppclk_khz = clk_khz;
3737 return DC_ERROR_UNEXPECTED;
3739 if (dc->clk_mgr->funcs->update_clocks)
3740 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3746 void dcn10_get_clock(struct dc *dc,
3747 enum dc_clock_type clock_type,
3748 struct dc_clock_config *clock_cfg)
3750 struct dc_state *context = dc->current_state;
3752 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3753 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3757 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3759 struct resource_pool *pool = dc->res_pool;
3762 for (i = 0; i < pool->pipe_count; i++) {
3763 struct hubp *hubp = pool->hubps[i];
3764 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3766 hubp->funcs->hubp_read_state(hubp);
3769 dcc_en_bits[i] = s->dcc_en ? 1 : 0;