1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2013, NVIDIA Corporation.
4 * Copyright 2014 Google Inc.
13 #include <video_bridge.h>
15 #include <asm/arch-tegra/dc.h>
16 #include <linux/delay.h>
20 #include "displayport.h"
22 #define DO_FAST_LINK_TRAINING 1
24 struct tegra_dp_plat {
29 * struct tegra_dp_priv - private displayport driver info
31 * @dc_dev: Display controller device that is sending the video feed
33 struct tegra_dp_priv {
35 struct udevice *dc_dev;
36 struct dpaux_ctlr *regs;
41 struct tegra_dp_priv dp_data;
43 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
45 return readl((u32 *)dp->regs + reg);
48 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
51 writel(val, (u32 *)dp->regs + reg);
54 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
55 u32 reg, u32 mask, u32 exp_val,
60 u32 temp = timeout_us;
63 udelay(poll_interval_us);
64 reg_val = tegra_dpaux_readl(dp, reg);
65 if (timeout_us > poll_interval_us)
66 timeout_us -= poll_interval_us;
69 } while ((reg_val & mask) != exp_val);
71 if ((reg_val & mask) == exp_val)
72 return 0; /* success */
73 debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
74 reg, reg_val, mask, exp_val);
78 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
80 /* According to DP spec, each aux transaction needs to finish
82 if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
83 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
84 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
85 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
86 debug("dp: DPAUX transaction timeout\n");
92 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
93 u32 addr, u8 *data, u32 *size,
98 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
99 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
102 if (*size > DP_AUX_MAX_BYTES)
103 return -1; /* only write one chunk of data */
105 /* Make sure the command is write command */
107 case DPAUX_DP_AUXCTL_CMD_I2CWR:
108 case DPAUX_DP_AUXCTL_CMD_MOTWR:
109 case DPAUX_DP_AUXCTL_CMD_AUXWR:
112 debug("dp: aux write cmd 0x%x is invalid\n", cmd);
116 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
117 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
118 memcpy(&temp_data, data, 4);
119 tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
123 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
124 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
126 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
127 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
129 while ((timeout_retries > 0) && (defer_retries > 0)) {
130 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
131 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
134 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
135 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
137 if (tegra_dpaux_wait_transaction(dp))
138 debug("dp: aux write transaction timeout\n");
140 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
142 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
143 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
144 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
145 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
146 if (timeout_retries-- > 0) {
147 debug("dp: aux write retry (0x%x) -- %d\n",
148 *aux_stat, timeout_retries);
149 /* clear the error bits */
150 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
154 debug("dp: aux write got error (0x%x)\n",
160 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
161 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
162 if (defer_retries-- > 0) {
163 debug("dp: aux write defer (0x%x) -- %d\n",
164 *aux_stat, defer_retries);
165 /* clear the error bits */
166 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
170 debug("dp: aux write defer exceeds max retries (0x%x)\n",
176 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
177 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
178 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
181 debug("dp: aux write failed (0x%x)\n", *aux_stat);
185 /* Should never come to here */
189 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
190 u32 addr, u8 *data, u32 *size,
194 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
195 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
197 if (*size > DP_AUX_MAX_BYTES) {
198 debug("only read one chunk\n");
199 return -EIO; /* only read one chunk */
202 /* Check to make sure the command is read command */
204 case DPAUX_DP_AUXCTL_CMD_I2CRD:
205 case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
206 case DPAUX_DP_AUXCTL_CMD_MOTRD:
207 case DPAUX_DP_AUXCTL_CMD_AUXRD:
210 debug("dp: aux read cmd 0x%x is invalid\n", cmd);
214 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
215 if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
216 debug("dp: HPD is not detected\n");
220 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
222 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
223 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
225 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
226 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
227 while ((timeout_retries > 0) && (defer_retries > 0)) {
228 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
229 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
230 udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
232 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
233 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
235 if (tegra_dpaux_wait_transaction(dp))
236 debug("dp: aux read transaction timeout\n");
238 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
240 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
241 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
242 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
243 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
244 if (timeout_retries-- > 0) {
245 debug("dp: aux read retry (0x%x) -- %d\n",
246 *aux_stat, timeout_retries);
247 /* clear the error bits */
248 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
250 continue; /* retry */
252 debug("dp: aux read got error (0x%x)\n",
258 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
259 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
260 if (defer_retries-- > 0) {
261 debug("dp: aux read defer (0x%x) -- %d\n",
262 *aux_stat, defer_retries);
263 /* clear the error bits */
264 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
268 debug("dp: aux read defer exceeds max retries (0x%x)\n",
274 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
275 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
279 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
280 temp_data[i] = tegra_dpaux_readl(dp,
281 DPAUX_DP_AUXDATA_READ_W(i));
283 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
284 memcpy(data, temp_data, *size);
288 debug("dp: aux read failed (0x%x\n", *aux_stat);
292 /* Should never come to here */
293 debug("%s: can't\n", __func__);
298 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
299 u8 *data, u32 *size, u32 *aux_stat)
306 cur_size = *size - finished;
307 if (cur_size > DP_AUX_MAX_BYTES)
308 cur_size = DP_AUX_MAX_BYTES;
310 ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
311 data, &cur_size, aux_stat);
315 /* cur_size should be the real size returned */
318 finished += cur_size;
320 } while (*size > finished);
326 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
333 ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
334 cmd, data_ptr, &size, &status);
336 debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
343 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
350 ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
351 cmd, &data, &size, &status);
353 debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
360 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
361 u8 addr, u8 *data, u32 size, u32 *aux_stat)
367 u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
370 ret = tegra_dc_dpaux_write_chunk(
371 dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
372 &addr, &len, aux_stat);
374 debug("%s: error sending address to read.\n",
379 ret = tegra_dc_dpaux_read_chunk(
380 dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
381 data, &cur_size, aux_stat);
383 debug("%s: error reading data.\n", __func__);
387 /* cur_size should be the real size returned */
390 finished += cur_size;
391 } while (size > finished);
396 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
398 /* clear interrupt */
399 tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
400 /* do not enable interrupt for now. Enable them when Isr in place */
401 tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
403 tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
404 DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
405 DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
406 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
407 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
409 tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
410 DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
414 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
415 const struct tegra_dp_link_config *link_cfg)
417 debug("DP config: cfg_name cfg_value\n");
418 debug(" Lane Count %d\n",
419 link_cfg->max_lane_count);
420 debug(" SupportEnhancedFraming %s\n",
421 link_cfg->support_enhanced_framing ? "Y" : "N");
422 debug(" Bandwidth %d\n",
423 link_cfg->max_link_bw);
425 link_cfg->bits_per_pixel);
426 debug(" EnhancedFraming %s\n",
427 link_cfg->enhanced_framing ? "Y" : "N");
428 debug(" Scramble_enabled %s\n",
429 link_cfg->scramble_ena ? "Y" : "N");
430 debug(" LinkBW %d\n",
432 debug(" lane_count %d\n",
433 link_cfg->lane_count);
434 debug(" activespolarity %d\n",
435 link_cfg->activepolarity);
436 debug(" active_count %d\n",
437 link_cfg->active_count);
438 debug(" tu_size %d\n",
440 debug(" active_frac %d\n",
441 link_cfg->active_frac);
442 debug(" watermark %d\n",
443 link_cfg->watermark);
444 debug(" hblank_sym %d\n",
445 link_cfg->hblank_sym);
446 debug(" vblank_sym %d\n",
447 link_cfg->vblank_sym);
451 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
452 struct tegra_dp_link_config *cfg)
454 switch (cfg->link_bw) {
455 case SOR_LINK_SPEED_G1_62:
456 if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
457 cfg->link_bw = SOR_LINK_SPEED_G2_7;
458 cfg->lane_count /= 2;
460 case SOR_LINK_SPEED_G2_7:
461 cfg->link_bw = SOR_LINK_SPEED_G1_62;
463 case SOR_LINK_SPEED_G5_4:
464 if (cfg->lane_count == 1) {
465 cfg->link_bw = SOR_LINK_SPEED_G2_7;
466 cfg->lane_count = cfg->max_lane_count;
468 cfg->lane_count /= 2;
472 debug("dp: Error link rate %d\n", cfg->link_bw);
476 return (cfg->lane_count > 0) ? 0 : -ENOLINK;
480 * Calcuate if given cfg can meet the mode request.
481 * Return 0 if mode is possible, -1 otherwise
483 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
484 const struct display_timing *timing,
485 struct tegra_dp_link_config *link_cfg)
487 const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
488 const u64 f = 100000; /* precision factor */
489 u32 num_linkclk_line; /* Number of link clocks per line */
490 u64 ratio_f; /* Ratio of incoming to outgoing data rate */
492 u64 activesym_f; /* Activesym per TU */
498 u64 accumulated_error_f = 0;
499 u32 lowest_neg_activecount = 0;
500 u32 lowest_neg_activepolarity = 0;
501 u32 lowest_neg_tusize = 64;
502 u32 num_symbols_per_line;
503 u64 lowest_neg_activefrac = 0;
504 u64 lowest_neg_error_f = 64 * f;
509 if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
510 !link_cfg->bits_per_pixel)
513 if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
514 (u64)link_rate * 8 * link_cfg->lane_count)
517 num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
518 timing->pixelclock.typ));
520 ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
522 do_div(ratio_f, link_rate * link_cfg->lane_count);
524 for (i = 64; i >= 32; --i) {
525 activesym_f = ratio_f * i;
526 activecount_f = lldiv(activesym_f, (u32)f) * f;
527 frac_f = activesym_f - activecount_f;
528 activecount = (u32)(lldiv(activecount_f, (u32)f));
530 if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
538 /* warning: frac_f should be 64-bit */
539 frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
540 if (frac_f > (15 * f))
541 activefrac = activepolarity ? 1 : 15;
543 activefrac = activepolarity ?
544 (u32)lldiv(frac_f, (u32)f) + 1 :
545 (u32)lldiv(frac_f, (u32)f);
551 if (activepolarity == 1)
552 approx_value_f = activefrac ? lldiv(
553 (activecount_f + (activefrac * f - f) * f),
557 approx_value_f = activefrac ?
558 activecount_f + lldiv(f, activefrac) :
561 if (activesym_f < approx_value_f) {
562 accumulated_error_f = num_linkclk_line *
563 lldiv(approx_value_f - activesym_f, i);
566 accumulated_error_f = num_linkclk_line *
567 lldiv(activesym_f - approx_value_f, i);
571 if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
572 (accumulated_error_f == 0)) {
573 lowest_neg_error_f = accumulated_error_f;
574 lowest_neg_tusize = i;
575 lowest_neg_activecount = activecount;
576 lowest_neg_activepolarity = activepolarity;
577 lowest_neg_activefrac = activefrac;
579 if (accumulated_error_f == 0)
584 if (lowest_neg_activefrac == 0) {
585 link_cfg->activepolarity = 0;
586 link_cfg->active_count = lowest_neg_activepolarity ?
587 lowest_neg_activecount : lowest_neg_activecount - 1;
588 link_cfg->tu_size = lowest_neg_tusize;
589 link_cfg->active_frac = 1;
591 link_cfg->activepolarity = lowest_neg_activepolarity;
592 link_cfg->active_count = (u32)lowest_neg_activecount;
593 link_cfg->tu_size = lowest_neg_tusize;
594 link_cfg->active_frac = (u32)lowest_neg_activefrac;
597 watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
598 link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
599 f)) + link_cfg->bits_per_pixel / 4 - 1;
600 num_symbols_per_line = (timing->hactive.typ *
601 link_cfg->bits_per_pixel) /
602 (8 * link_cfg->lane_count);
604 if (link_cfg->watermark > 30) {
605 debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
606 link_cfg->watermark = 30;
608 } else if (link_cfg->watermark > num_symbols_per_line) {
609 debug("dp: sor setting: force watermark to the number of symbols in the line\n");
610 link_cfg->watermark = num_symbols_per_line;
615 * Refer to dev_disp.ref for more information.
616 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
617 * SetRasterBlankStart.X - 7) * link_clk / pclk)
618 * - 3 * enhanced_framing - Y
619 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
621 link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
622 timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
623 link_rate, timing->pixelclock.typ) -
624 3 * link_cfg->enhanced_framing -
625 (12 / link_cfg->lane_count);
627 if (link_cfg->hblank_sym < 0)
628 link_cfg->hblank_sym = 0;
632 * Refer to dev_disp.ref for more information.
633 * # symbols/vblank = ((SetRasterBlankStart.X -
634 * SetRasterBlankEen.X - 25) * link_clk / pclk)
636 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
638 link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
639 * link_rate, timing->pixelclock.typ) - (36 /
640 link_cfg->lane_count) - 4;
642 if (link_cfg->vblank_sym < 0)
643 link_cfg->vblank_sym = 0;
645 link_cfg->is_valid = 1;
647 tegra_dc_dp_dump_link_cfg(dp, link_cfg);
653 static int tegra_dc_dp_init_max_link_cfg(
654 const struct display_timing *timing,
655 struct tegra_dp_priv *dp,
656 struct tegra_dp_link_config *link_cfg)
658 const int drive_current = 0x40404040;
659 const int preemphasis = 0x0f0f0f0f;
660 const int postcursor = 0;
664 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
667 link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
668 link_cfg->tps3_supported = (dpcd_data &
669 DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
671 link_cfg->support_enhanced_framing =
672 (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
675 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
678 link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
681 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
682 &link_cfg->aux_rd_interval);
685 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
686 &link_cfg->max_link_bw);
691 * Set to a high value for link training and attach.
692 * Will be re-programmed when dp is enabled.
694 link_cfg->drive_current = drive_current;
695 link_cfg->preemphasis = preemphasis;
696 link_cfg->postcursor = postcursor;
698 ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
702 link_cfg->alt_scramber_reset_cap =
703 (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
705 link_cfg->only_enhanced_framing =
706 (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
709 link_cfg->lane_count = link_cfg->max_lane_count;
710 link_cfg->link_bw = link_cfg->max_link_bw;
711 link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
712 link_cfg->frame_in_ms = (1000 / 60) + 1;
714 tegra_dc_dp_calc_config(dp, timing, link_cfg);
718 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
719 struct udevice *sor, int ena)
724 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
725 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
727 ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
732 /* Also reset the scrambler to 0xfffe */
733 tegra_dc_sor_set_internal_panel(sor, ena);
737 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
741 tegra_dc_sor_set_link_bandwidth(sor, link_bw);
744 return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
747 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
748 const struct tegra_dp_link_config *link_cfg,
754 /* check if panel support enhanched_framing */
755 dpcd_data = link_cfg->lane_count;
756 if (link_cfg->enhanced_framing)
757 dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
758 ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
762 tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
764 /* Also power down lanes that will not be used */
768 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
769 const struct tegra_dp_link_config *cfg)
776 for (lane = 0; lane < cfg->lane_count; ++lane) {
777 ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
778 DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
783 NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
784 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
785 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
787 DP_LANE_CHANNEL_EQ_DONE |
788 DP_LANE_SYMBOL_LOCKED;
789 if ((data & mask) != mask)
795 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
796 const struct tegra_dp_link_config *cfg)
799 u32 n_lanes = cfg->lane_count;
804 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
805 ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
810 ce_done = (data & (0x1 <<
811 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
813 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
815 } else if (!(data & (0x1 <<
816 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
818 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
820 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
822 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
827 ret = tegra_dc_dp_dpcd_read(dp,
828 DP_LANE_ALIGN_STATUS_UPDATED,
832 if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
836 return ce_done ? 0 : -EIO;
839 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
840 const struct tegra_dp_link_config *cfg)
843 u32 n_lanes = cfg->lane_count;
847 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
848 ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
854 return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
856 else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
857 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
864 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
865 u32 pc[4], u8 pc_supported,
866 const struct tegra_dp_link_config *cfg)
870 u32 n_lanes = cfg->lane_count;
873 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
874 ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
878 pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
879 NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
880 vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
881 NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
883 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
884 NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
886 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
887 NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
890 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
894 for (cnt = 0; cnt < n_lanes; cnt++) {
895 pc[cnt] = (data_ptr >>
896 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
897 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
904 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
905 bool is_clk_recovery,
906 const struct tegra_dp_link_config *cfg)
908 if (!cfg->aux_rd_interval)
909 udelay(is_clk_recovery ? 200 : 500);
911 mdelay(cfg->aux_rd_interval * 4);
914 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
915 const struct tegra_dp_link_config *cfg)
917 u8 data = (tp == training_pattern_disabled)
918 ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
919 : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
921 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
922 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
925 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
926 const struct tegra_dp_link_config *link_cfg)
932 if (link_cfg->lane_count == 0) {
933 debug("dp: error: lane count is 0. Can not set link config.\n");
937 /* Set power state if it is not in normal level */
938 ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
942 if (dpcd_data == DP_SET_POWER_D3) {
943 dpcd_data = DP_SET_POWER_D0;
945 /* DP spec requires 3 retries */
946 for (retry = 3; retry > 0; --retry) {
947 ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
952 debug("dp: Failed to set DP panel power\n");
958 /* Enable ASSR if possible */
959 if (link_cfg->alt_scramber_reset_cap) {
960 ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
965 ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
967 debug("dp: Failed to set link bandwidth\n");
970 ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
972 debug("dp: Failed to set lane count\n");
975 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
981 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
982 const struct display_timing *timing,
983 struct tegra_dp_link_config *cfg)
985 struct tegra_dp_link_config tmp_cfg;
991 ret = _tegra_dp_lower_link_config(dp, cfg);
993 ret = tegra_dc_dp_calc_config(dp, timing, cfg);
995 ret = tegra_dp_link_config(dp, cfg);
1003 tegra_dp_link_config(dp, &tmp_cfg);
1007 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1008 u32 pc[4], const struct tegra_dp_link_config *cfg)
1010 struct udevice *sor = dp->sor;
1011 u32 n_lanes = cfg->lane_count;
1012 u8 pc_supported = cfg->tps3_supported;
1016 for (cnt = 0; cnt < n_lanes; cnt++) {
1018 u32 pe_reg, vs_reg, pc_reg;
1023 mask = PR_LANE2_DP_LANE0_MASK;
1024 shift = PR_LANE2_DP_LANE0_SHIFT;
1027 mask = PR_LANE1_DP_LANE1_MASK;
1028 shift = PR_LANE1_DP_LANE1_SHIFT;
1031 mask = PR_LANE0_DP_LANE2_MASK;
1032 shift = PR_LANE0_DP_LANE2_SHIFT;
1035 mask = PR_LANE3_DP_LANE3_MASK;
1036 shift = PR_LANE3_DP_LANE3_SHIFT;
1039 debug("dp: incorrect lane cnt\n");
1043 pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1044 vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1045 pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1047 tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1048 vs_reg << shift, pc_reg << shift,
1052 tegra_dp_disable_tx_pu(dp->sor);
1055 for (cnt = 0; cnt < n_lanes; cnt++) {
1056 u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1057 u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1059 val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1061 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1062 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1063 (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1065 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1066 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1067 tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1071 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1072 u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1073 u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1074 val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1076 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1077 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1079 NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1081 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1082 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1083 tegra_dc_dp_dpcd_write(dp,
1084 NV_DPCD_TRAINING_LANE0_1_SET2 +
1092 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1093 u32 vs[4], u32 pc[4], u8 pc_supported,
1095 const struct tegra_dp_link_config *cfg)
1099 for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1103 ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1107 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1110 tegra_dp_wait_aux_training(dp, false, cfg);
1112 if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1113 debug("dp: CR failed in channel EQ sequence!\n");
1117 if (!tegra_dp_channel_eq_status(dp, cfg))
1124 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1126 const struct tegra_dp_link_config *cfg)
1128 u32 n_lanes = cfg->lane_count;
1129 u8 pc_supported = cfg->tps3_supported;
1131 u32 tp_src = training_pattern_2;
1134 tp_src = training_pattern_3;
1136 tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1138 ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1140 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1145 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1146 u32 vs[4], u32 pc[4], u8 pc_supported,
1148 const struct tegra_dp_link_config *cfg)
1154 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1155 tegra_dp_wait_aux_training(dp, true, cfg);
1157 if (tegra_dp_clock_recovery_status(dp, cfg))
1160 memcpy(vs_temp, vs, sizeof(vs_temp));
1161 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1163 if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1167 } while (retry_cnt < 5);
1172 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1173 u32 vs[4], u32 pc[4],
1174 const struct tegra_dp_link_config *cfg)
1176 u32 n_lanes = cfg->lane_count;
1177 u8 pc_supported = cfg->tps3_supported;
1180 tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1182 err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1185 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1190 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1191 const struct display_timing *timing,
1192 struct tegra_dp_link_config *cfg)
1194 struct udevice *sor = dp->sor;
1196 u32 pe[4], vs[4], pc[4];
1198 tegra_sor_precharge_lanes(sor, cfg);
1201 memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1202 memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1203 memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1205 err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1207 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1210 debug("dp: clk recovery failed\n");
1214 err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1216 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1219 debug("dp: channel equalization failed\n");
1223 tegra_dc_dp_dump_link_cfg(dp, cfg);
1232 * All link training functions are ported from kernel dc driver.
1233 * See more details at drivers/video/tegra/dc/dp.c
1235 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1236 const struct tegra_dp_link_config *link_cfg,
1237 struct udevice *sor)
1246 u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1248 tegra_dc_sor_set_lane_parm(sor, link_cfg);
1249 tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1253 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1254 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1255 DP_TRAINING_PATTERN_1);
1257 for (j = 0; j < link_cfg->lane_count; ++j)
1258 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1261 size = sizeof(data16);
1262 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1263 DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1264 status = mask & 0x1111;
1265 if ((data16 & status) != status) {
1266 debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1272 tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1273 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1275 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1276 link_cfg->link_bw == 20 ? 0x23 : 0x22);
1277 for (j = 0; j < link_cfg->lane_count; ++j)
1278 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1281 size = sizeof(data32);
1282 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1283 (u8 *)&data32, &size, &status);
1284 if ((data32 & mask) != (0x7777 & mask)) {
1285 debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1289 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1291 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1293 if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1294 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1295 debug("Fast link training failed, link bw %d, lane # %d\n",
1296 link_bw, lane_count);
1300 debug("Fast link training succeeded, link bw %d, lane %d\n",
1301 link_cfg->link_bw, link_cfg->lane_count);
1306 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1307 struct tegra_dp_link_config *link_cfg,
1308 const struct display_timing *timing,
1309 struct udevice *sor)
1315 if (DO_FAST_LINK_TRAINING) {
1316 ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1318 debug("dp: fast link training failed\n");
1321 * set to a known-good drive setting if fast link
1322 * succeeded. Ignore any error.
1324 ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1326 debug("Failed to set voltage swing\n");
1332 /* Try full link training then */
1333 ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1335 debug("dp: full link training failed\n");
1340 /* Everything is good; double check the link config */
1341 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1343 if ((link_cfg->link_bw == link_bw) &&
1344 (link_cfg->lane_count == lane_count))
1350 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1351 struct tegra_dp_link_config *link_cfg,
1352 struct udevice *sor,
1353 const struct display_timing *timing)
1355 struct tegra_dp_link_config temp_cfg;
1357 if (!timing->pixelclock.typ || !timing->hactive.typ ||
1358 !timing->vactive.typ) {
1359 debug("dp: error mode configuration");
1362 if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1363 debug("dp: error link configuration");
1367 link_cfg->is_valid = 0;
1369 memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1371 temp_cfg.link_bw = temp_cfg.max_link_bw;
1372 temp_cfg.lane_count = temp_cfg.max_lane_count;
1375 * set to max link config
1377 if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1378 (!tegra_dp_link_config(dp, &temp_cfg)) &&
1379 (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1380 /* the max link cfg is doable */
1381 memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1383 return link_cfg->is_valid ? 0 : -EFAULT;
1386 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1388 const int vdd_to_hpd_delay_ms = 200;
1392 start = get_timer(0);
1394 val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1395 if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1398 } while (get_timer(start) < vdd_to_hpd_delay_ms);
1403 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1409 debug("%s: delay=%d\n", __func__, delay_ms);
1411 ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1415 out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1417 debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1419 debug("SINK is in synchronization\n");
1424 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1425 struct tegra_dp_link_config *link_cfg,
1426 const struct display_timing *timing)
1428 const int max_retry = 5;
1433 * DP TCON may skip some main stream frames, thus we need to wait
1434 * some delay before reading the DPCD SINK STATUS register, starting
1439 retries = max_retry;
1443 if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1447 debug("%s: retries left %d\n", __func__, retries);
1449 printf("DP: Out of sync after %d retries\n", max_retry);
1452 ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
1455 if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1457 debug("dp: %s: error to configure link\n", __func__);
1461 tegra_dc_sor_set_power_state(dp->sor, 1);
1462 tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
1464 /* Increase delay_frame for next try in case the sink is
1465 skipping more frames */
1470 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1471 const struct display_timing *timing)
1473 struct tegra_dp_priv *priv = dev_get_priv(dev);
1474 struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1475 struct udevice *sor;
1480 memset(link_cfg, '\0', sizeof(*link_cfg));
1481 link_cfg->is_valid = 0;
1482 link_cfg->scramble_ena = 1;
1484 tegra_dc_dpaux_enable(priv);
1486 if (tegra_dp_hpd_plug(priv) < 0) {
1487 debug("dp: hpd plug failed\n");
1491 link_cfg->bits_per_pixel = panel_bpp;
1492 if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1493 debug("dp: failed to init link configuration\n");
1497 ret = uclass_first_device_err(UCLASS_VIDEO_BRIDGE, &sor);
1499 debug("dp: failed to find SOR device: ret=%d\n", ret);
1503 ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1507 tegra_dc_sor_set_panel_power(sor, 1);
1509 /* Write power on to DPCD */
1510 data = DP_SET_POWER_D0;
1513 ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1514 } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1516 if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1517 debug("dp: failed to power on panel (0x%x)\n", ret);
1518 return -ENETUNREACH;
1522 /* Confirm DP plugging status */
1523 if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1524 DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1525 debug("dp: could not detect HPD\n");
1529 /* Check DP version */
1530 if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1531 debug("dp: failed to read the revision number from sink\n");
1535 if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1536 debug("dp: error configuring link\n");
1540 tegra_dc_sor_set_power_state(sor, 1);
1541 ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
1542 if (ret && ret != -EEXIST)
1546 * This takes a long time, but can apparently resolve a failure to
1547 * bring up the display correctly.
1550 ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1555 /* Power down the unused lanes to save power - a few hundred mW */
1556 tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1558 ret = video_bridge_set_backlight(sor, 80);
1560 debug("dp: failed to set backlight\n");
1564 priv->enabled = true;
1569 static int tegra_dp_of_to_plat(struct udevice *dev)
1571 struct tegra_dp_plat *plat = dev_get_plat(dev);
1573 plat->base = dev_read_addr(dev);
1578 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1580 struct tegra_dp_priv *priv = dev_get_priv(dev);
1581 const int tegra_edid_i2c_address = 0x50;
1584 tegra_dc_dpaux_enable(priv);
1586 return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1587 buf_size, &aux_stat);
1590 static const struct dm_display_ops dp_tegra_ops = {
1591 .read_edid = tegra_dp_read_edid,
1592 .enable = tegra_dp_enable,
1595 static int dp_tegra_probe(struct udevice *dev)
1597 struct tegra_dp_plat *plat = dev_get_plat(dev);
1598 struct tegra_dp_priv *priv = dev_get_priv(dev);
1599 struct display_plat *disp_uc_plat = dev_get_uclass_plat(dev);
1601 priv->regs = (struct dpaux_ctlr *)plat->base;
1602 priv->enabled = false;
1604 /* Remember the display controller that is sending us video */
1605 priv->dc_dev = disp_uc_plat->src_dev;
1610 static const struct udevice_id tegra_dp_ids[] = {
1611 { .compatible = "nvidia,tegra124-dpaux" },
1612 { .compatible = "nvidia,tegra210-dpaux" },
1616 U_BOOT_DRIVER(dp_tegra) = {
1617 .name = "dpaux_tegra",
1618 .id = UCLASS_DISPLAY,
1619 .of_match = tegra_dp_ids,
1620 .of_to_plat = tegra_dp_of_to_plat,
1621 .probe = dp_tegra_probe,
1622 .ops = &dp_tegra_ops,
1623 .priv_auto = sizeof(struct tegra_dp_priv),
1624 .plat_auto = sizeof(struct tegra_dp_plat),