1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
5 #include "intel_atomic.h"
8 #include "intel_display_types.h"
12 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
14 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
15 return crtc_state->fdi_lanes;
20 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
21 struct intel_crtc_state *pipe_config)
23 struct drm_i915_private *dev_priv = to_i915(dev);
24 struct drm_atomic_state *state = pipe_config->uapi.state;
25 struct intel_crtc *other_crtc;
26 struct intel_crtc_state *other_crtc_state;
28 drm_dbg_kms(&dev_priv->drm,
29 "checking fdi config on pipe %c, lanes %i\n",
30 pipe_name(pipe), pipe_config->fdi_lanes);
31 if (pipe_config->fdi_lanes > 4) {
32 drm_dbg_kms(&dev_priv->drm,
33 "invalid fdi lane config on pipe %c: %i lanes\n",
34 pipe_name(pipe), pipe_config->fdi_lanes);
38 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
39 if (pipe_config->fdi_lanes > 2) {
40 drm_dbg_kms(&dev_priv->drm,
41 "only 2 lanes on haswell, required: %i lanes\n",
42 pipe_config->fdi_lanes);
49 if (INTEL_NUM_PIPES(dev_priv) == 2)
52 /* Ivybridge 3 pipe is really complicated */
57 if (pipe_config->fdi_lanes <= 2)
60 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
62 intel_atomic_get_crtc_state(state, other_crtc);
63 if (IS_ERR(other_crtc_state))
64 return PTR_ERR(other_crtc_state);
66 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
67 drm_dbg_kms(&dev_priv->drm,
68 "invalid shared fdi lane config on pipe %c: %i lanes\n",
69 pipe_name(pipe), pipe_config->fdi_lanes);
74 if (pipe_config->fdi_lanes > 2) {
75 drm_dbg_kms(&dev_priv->drm,
76 "only 2 lanes on pipe %c: required %i lanes\n",
77 pipe_name(pipe), pipe_config->fdi_lanes);
81 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
83 intel_atomic_get_crtc_state(state, other_crtc);
84 if (IS_ERR(other_crtc_state))
85 return PTR_ERR(other_crtc_state);
87 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
88 drm_dbg_kms(&dev_priv->drm,
89 "fdi link B uses too many lanes to enable link C\n");
98 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
100 if (IS_IRONLAKE(i915)) {
102 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
104 i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
105 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
106 i915->fdi_pll_freq = 270000;
111 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
114 int intel_fdi_link_freq(struct drm_i915_private *i915,
115 const struct intel_crtc_state *pipe_config)
118 return pipe_config->port_clock; /* SPLL */
120 return i915->fdi_pll_freq;
123 int ilk_fdi_compute_config(struct intel_crtc *crtc,
124 struct intel_crtc_state *pipe_config)
126 struct drm_device *dev = crtc->base.dev;
127 struct drm_i915_private *i915 = to_i915(dev);
128 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
129 int lane, link_bw, fdi_dotclock, ret;
130 bool needs_recompute = false;
133 /* FDI is a binary signal running at ~2.7GHz, encoding
134 * each output octet as 10 bits. The actual frequency
135 * is stored as a divider into a 100MHz clock, and the
136 * mode pixel clock is stored in units of 1KHz.
137 * Hence the bw of each lane in terms of the mode signal
140 link_bw = intel_fdi_link_freq(i915, pipe_config);
142 fdi_dotclock = adjusted_mode->crtc_clock;
144 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
145 pipe_config->pipe_bpp);
147 pipe_config->fdi_lanes = lane;
149 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
150 link_bw, &pipe_config->fdi_m_n, false, false);
152 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
156 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
157 pipe_config->pipe_bpp -= 2*3;
158 drm_dbg_kms(&i915->drm,
159 "fdi link bw constraint, reducing pipe bpp to %i\n",
160 pipe_config->pipe_bpp);
161 needs_recompute = true;
162 pipe_config->bw_constrained = true;
168 return I915_DISPLAY_CONFIG_RETRY;
173 void intel_fdi_normal_train(struct intel_crtc *crtc)
175 struct drm_device *dev = crtc->base.dev;
176 struct drm_i915_private *dev_priv = to_i915(dev);
177 enum pipe pipe = crtc->pipe;
181 /* enable normal train */
182 reg = FDI_TX_CTL(pipe);
183 temp = intel_de_read(dev_priv, reg);
184 if (IS_IVYBRIDGE(dev_priv)) {
185 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
186 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
188 temp &= ~FDI_LINK_TRAIN_NONE;
189 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
191 intel_de_write(dev_priv, reg, temp);
193 reg = FDI_RX_CTL(pipe);
194 temp = intel_de_read(dev_priv, reg);
195 if (HAS_PCH_CPT(dev_priv)) {
196 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
197 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
199 temp &= ~FDI_LINK_TRAIN_NONE;
200 temp |= FDI_LINK_TRAIN_NONE;
202 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
204 /* wait one idle pattern time */
205 intel_de_posting_read(dev_priv, reg);
208 /* IVB wants error correction enabled */
209 if (IS_IVYBRIDGE(dev_priv))
210 intel_de_write(dev_priv, reg,
211 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
214 /* The FDI link training functions for ILK/Ibexpeak. */
215 static void ilk_fdi_link_train(struct intel_crtc *crtc,
216 const struct intel_crtc_state *crtc_state)
218 struct drm_device *dev = crtc->base.dev;
219 struct drm_i915_private *dev_priv = to_i915(dev);
220 enum pipe pipe = crtc->pipe;
224 /* FDI needs bits from pipe first */
225 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
227 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
229 reg = FDI_RX_IMR(pipe);
230 temp = intel_de_read(dev_priv, reg);
231 temp &= ~FDI_RX_SYMBOL_LOCK;
232 temp &= ~FDI_RX_BIT_LOCK;
233 intel_de_write(dev_priv, reg, temp);
234 intel_de_read(dev_priv, reg);
237 /* enable CPU FDI TX and PCH FDI RX */
238 reg = FDI_TX_CTL(pipe);
239 temp = intel_de_read(dev_priv, reg);
240 temp &= ~FDI_DP_PORT_WIDTH_MASK;
241 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
242 temp &= ~FDI_LINK_TRAIN_NONE;
243 temp |= FDI_LINK_TRAIN_PATTERN_1;
244 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
246 reg = FDI_RX_CTL(pipe);
247 temp = intel_de_read(dev_priv, reg);
248 temp &= ~FDI_LINK_TRAIN_NONE;
249 temp |= FDI_LINK_TRAIN_PATTERN_1;
250 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
252 intel_de_posting_read(dev_priv, reg);
255 /* Ironlake workaround, enable clock pointer after FDI enable*/
256 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
257 FDI_RX_PHASE_SYNC_POINTER_OVR);
258 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
259 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
261 reg = FDI_RX_IIR(pipe);
262 for (tries = 0; tries < 5; tries++) {
263 temp = intel_de_read(dev_priv, reg);
264 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
266 if ((temp & FDI_RX_BIT_LOCK)) {
267 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
268 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
273 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
276 reg = FDI_TX_CTL(pipe);
277 temp = intel_de_read(dev_priv, reg);
278 temp &= ~FDI_LINK_TRAIN_NONE;
279 temp |= FDI_LINK_TRAIN_PATTERN_2;
280 intel_de_write(dev_priv, reg, temp);
282 reg = FDI_RX_CTL(pipe);
283 temp = intel_de_read(dev_priv, reg);
284 temp &= ~FDI_LINK_TRAIN_NONE;
285 temp |= FDI_LINK_TRAIN_PATTERN_2;
286 intel_de_write(dev_priv, reg, temp);
288 intel_de_posting_read(dev_priv, reg);
291 reg = FDI_RX_IIR(pipe);
292 for (tries = 0; tries < 5; tries++) {
293 temp = intel_de_read(dev_priv, reg);
294 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
296 if (temp & FDI_RX_SYMBOL_LOCK) {
297 intel_de_write(dev_priv, reg,
298 temp | FDI_RX_SYMBOL_LOCK);
299 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
304 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
306 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
310 static const int snb_b_fdi_train_param[] = {
311 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
312 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
313 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
314 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
317 /* The FDI link training functions for SNB/Cougarpoint. */
318 static void gen6_fdi_link_train(struct intel_crtc *crtc,
319 const struct intel_crtc_state *crtc_state)
321 struct drm_device *dev = crtc->base.dev;
322 struct drm_i915_private *dev_priv = to_i915(dev);
323 enum pipe pipe = crtc->pipe;
327 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
329 reg = FDI_RX_IMR(pipe);
330 temp = intel_de_read(dev_priv, reg);
331 temp &= ~FDI_RX_SYMBOL_LOCK;
332 temp &= ~FDI_RX_BIT_LOCK;
333 intel_de_write(dev_priv, reg, temp);
335 intel_de_posting_read(dev_priv, reg);
338 /* enable CPU FDI TX and PCH FDI RX */
339 reg = FDI_TX_CTL(pipe);
340 temp = intel_de_read(dev_priv, reg);
341 temp &= ~FDI_DP_PORT_WIDTH_MASK;
342 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
343 temp &= ~FDI_LINK_TRAIN_NONE;
344 temp |= FDI_LINK_TRAIN_PATTERN_1;
345 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
347 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
348 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
350 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
351 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
353 reg = FDI_RX_CTL(pipe);
354 temp = intel_de_read(dev_priv, reg);
355 if (HAS_PCH_CPT(dev_priv)) {
356 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
357 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
359 temp &= ~FDI_LINK_TRAIN_NONE;
360 temp |= FDI_LINK_TRAIN_PATTERN_1;
362 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
364 intel_de_posting_read(dev_priv, reg);
367 for (i = 0; i < 4; i++) {
368 reg = FDI_TX_CTL(pipe);
369 temp = intel_de_read(dev_priv, reg);
370 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
371 temp |= snb_b_fdi_train_param[i];
372 intel_de_write(dev_priv, reg, temp);
374 intel_de_posting_read(dev_priv, reg);
377 for (retry = 0; retry < 5; retry++) {
378 reg = FDI_RX_IIR(pipe);
379 temp = intel_de_read(dev_priv, reg);
380 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
381 if (temp & FDI_RX_BIT_LOCK) {
382 intel_de_write(dev_priv, reg,
383 temp | FDI_RX_BIT_LOCK);
384 drm_dbg_kms(&dev_priv->drm,
385 "FDI train 1 done.\n");
394 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
397 reg = FDI_TX_CTL(pipe);
398 temp = intel_de_read(dev_priv, reg);
399 temp &= ~FDI_LINK_TRAIN_NONE;
400 temp |= FDI_LINK_TRAIN_PATTERN_2;
401 if (IS_SANDYBRIDGE(dev_priv)) {
402 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
404 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
406 intel_de_write(dev_priv, reg, temp);
408 reg = FDI_RX_CTL(pipe);
409 temp = intel_de_read(dev_priv, reg);
410 if (HAS_PCH_CPT(dev_priv)) {
411 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
412 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
414 temp &= ~FDI_LINK_TRAIN_NONE;
415 temp |= FDI_LINK_TRAIN_PATTERN_2;
417 intel_de_write(dev_priv, reg, temp);
419 intel_de_posting_read(dev_priv, reg);
422 for (i = 0; i < 4; i++) {
423 reg = FDI_TX_CTL(pipe);
424 temp = intel_de_read(dev_priv, reg);
425 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
426 temp |= snb_b_fdi_train_param[i];
427 intel_de_write(dev_priv, reg, temp);
429 intel_de_posting_read(dev_priv, reg);
432 for (retry = 0; retry < 5; retry++) {
433 reg = FDI_RX_IIR(pipe);
434 temp = intel_de_read(dev_priv, reg);
435 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
436 if (temp & FDI_RX_SYMBOL_LOCK) {
437 intel_de_write(dev_priv, reg,
438 temp | FDI_RX_SYMBOL_LOCK);
439 drm_dbg_kms(&dev_priv->drm,
440 "FDI train 2 done.\n");
449 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
451 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
454 /* Manual link training for Ivy Bridge A0 parts */
455 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
456 const struct intel_crtc_state *crtc_state)
458 struct drm_device *dev = crtc->base.dev;
459 struct drm_i915_private *dev_priv = to_i915(dev);
460 enum pipe pipe = crtc->pipe;
464 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
466 reg = FDI_RX_IMR(pipe);
467 temp = intel_de_read(dev_priv, reg);
468 temp &= ~FDI_RX_SYMBOL_LOCK;
469 temp &= ~FDI_RX_BIT_LOCK;
470 intel_de_write(dev_priv, reg, temp);
472 intel_de_posting_read(dev_priv, reg);
475 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
476 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
478 /* Try each vswing and preemphasis setting twice before moving on */
479 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
480 /* disable first in case we need to retry */
481 reg = FDI_TX_CTL(pipe);
482 temp = intel_de_read(dev_priv, reg);
483 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
484 temp &= ~FDI_TX_ENABLE;
485 intel_de_write(dev_priv, reg, temp);
487 reg = FDI_RX_CTL(pipe);
488 temp = intel_de_read(dev_priv, reg);
489 temp &= ~FDI_LINK_TRAIN_AUTO;
490 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
491 temp &= ~FDI_RX_ENABLE;
492 intel_de_write(dev_priv, reg, temp);
494 /* enable CPU FDI TX and PCH FDI RX */
495 reg = FDI_TX_CTL(pipe);
496 temp = intel_de_read(dev_priv, reg);
497 temp &= ~FDI_DP_PORT_WIDTH_MASK;
498 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
499 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
500 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
501 temp |= snb_b_fdi_train_param[j/2];
502 temp |= FDI_COMPOSITE_SYNC;
503 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
505 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
506 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
508 reg = FDI_RX_CTL(pipe);
509 temp = intel_de_read(dev_priv, reg);
510 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
511 temp |= FDI_COMPOSITE_SYNC;
512 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
514 intel_de_posting_read(dev_priv, reg);
515 udelay(1); /* should be 0.5us */
517 for (i = 0; i < 4; i++) {
518 reg = FDI_RX_IIR(pipe);
519 temp = intel_de_read(dev_priv, reg);
520 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
522 if (temp & FDI_RX_BIT_LOCK ||
523 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
524 intel_de_write(dev_priv, reg,
525 temp | FDI_RX_BIT_LOCK);
526 drm_dbg_kms(&dev_priv->drm,
527 "FDI train 1 done, level %i.\n",
531 udelay(1); /* should be 0.5us */
534 drm_dbg_kms(&dev_priv->drm,
535 "FDI train 1 fail on vswing %d\n", j / 2);
540 reg = FDI_TX_CTL(pipe);
541 temp = intel_de_read(dev_priv, reg);
542 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
543 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
544 intel_de_write(dev_priv, reg, temp);
546 reg = FDI_RX_CTL(pipe);
547 temp = intel_de_read(dev_priv, reg);
548 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
549 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
550 intel_de_write(dev_priv, reg, temp);
552 intel_de_posting_read(dev_priv, reg);
553 udelay(2); /* should be 1.5us */
555 for (i = 0; i < 4; i++) {
556 reg = FDI_RX_IIR(pipe);
557 temp = intel_de_read(dev_priv, reg);
558 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
560 if (temp & FDI_RX_SYMBOL_LOCK ||
561 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
562 intel_de_write(dev_priv, reg,
563 temp | FDI_RX_SYMBOL_LOCK);
564 drm_dbg_kms(&dev_priv->drm,
565 "FDI train 2 done, level %i.\n",
569 udelay(2); /* should be 1.5us */
572 drm_dbg_kms(&dev_priv->drm,
573 "FDI train 2 fail on vswing %d\n", j / 2);
577 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
580 /* Starting with Haswell, different DDI ports can work in FDI mode for
581 * connection to the PCH-located connectors. For this, it is necessary to train
582 * both the DDI port and PCH receiver for the desired DDI buffer settings.
584 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
585 * please note that when FDI mode is active on DDI E, it shares 2 lines with
586 * DDI A (which is used for eDP)
588 void hsw_fdi_link_train(struct intel_encoder *encoder,
589 const struct intel_crtc_state *crtc_state)
591 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
593 u32 temp, i, rx_ctl_val;
596 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
598 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
600 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
601 * mode set "sequence for CRT port" document:
602 * - TP1 to TP2 time with the default value
605 * WaFDIAutoLinkSetTimingOverrride:hsw
607 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
608 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
610 /* Enable the PCH Receiver FDI PLL */
611 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
613 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
614 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
615 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
618 /* Switch from Rawclk to PCDclk */
619 rx_ctl_val |= FDI_PCDCLK;
620 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
622 /* Configure Port Clock Select */
623 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
624 intel_ddi_enable_clock(encoder, crtc_state);
626 /* Start the training iterating through available voltages and emphasis,
627 * testing each value twice. */
628 for (i = 0; i < n_entries * 2; i++) {
629 /* Configure DP_TP_CTL with auto-training */
630 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
631 DP_TP_CTL_FDI_AUTOTRAIN |
632 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
633 DP_TP_CTL_LINK_TRAIN_PAT1 |
636 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
637 * DDI E does not support port reversal, the functionality is
638 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
639 * port reversal bit */
640 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
641 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
642 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
646 /* Program PCH FDI Receiver TU */
647 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
649 /* Enable PCH FDI Receiver with auto-training */
650 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
651 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
652 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
654 /* Wait for FDI receiver lane calibration */
657 /* Unset FDI_RX_MISC pwrdn lanes */
658 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
659 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
660 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
661 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
663 /* Wait for FDI auto training time */
666 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
667 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
668 drm_dbg_kms(&dev_priv->drm,
669 "FDI link training done on step %d\n", i);
674 * Leave things enabled even if we failed to train FDI.
675 * Results in less fireworks from the state checker.
677 if (i == n_entries * 2 - 1) {
678 drm_err(&dev_priv->drm, "FDI link training failed!\n");
682 rx_ctl_val &= ~FDI_RX_ENABLE;
683 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
684 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
686 temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
687 temp &= ~DDI_BUF_CTL_ENABLE;
688 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
689 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
691 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
692 temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
693 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
694 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
695 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
696 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
698 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
700 /* Reset FDI_RX_MISC pwrdn lanes */
701 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
702 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
703 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
704 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
705 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
708 /* Enable normal pixel sending for FDI */
709 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
710 DP_TP_CTL_FDI_AUTOTRAIN |
711 DP_TP_CTL_LINK_TRAIN_NORMAL |
712 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
716 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
718 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
719 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
720 enum pipe pipe = crtc->pipe;
724 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
725 reg = FDI_RX_CTL(pipe);
726 temp = intel_de_read(dev_priv, reg);
727 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
728 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
729 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
730 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
732 intel_de_posting_read(dev_priv, reg);
735 /* Switch from Rawclk to PCDclk */
736 temp = intel_de_read(dev_priv, reg);
737 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
739 intel_de_posting_read(dev_priv, reg);
742 /* Enable CPU FDI TX PLL, always on for Ironlake */
743 reg = FDI_TX_CTL(pipe);
744 temp = intel_de_read(dev_priv, reg);
745 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
746 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
748 intel_de_posting_read(dev_priv, reg);
753 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
755 struct drm_device *dev = crtc->base.dev;
756 struct drm_i915_private *dev_priv = to_i915(dev);
757 enum pipe pipe = crtc->pipe;
761 /* Switch from PCDclk to Rawclk */
762 reg = FDI_RX_CTL(pipe);
763 temp = intel_de_read(dev_priv, reg);
764 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
766 /* Disable CPU FDI TX PLL */
767 reg = FDI_TX_CTL(pipe);
768 temp = intel_de_read(dev_priv, reg);
769 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
771 intel_de_posting_read(dev_priv, reg);
774 reg = FDI_RX_CTL(pipe);
775 temp = intel_de_read(dev_priv, reg);
776 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
778 /* Wait for the clocks to turn off. */
779 intel_de_posting_read(dev_priv, reg);
783 void ilk_fdi_disable(struct intel_crtc *crtc)
785 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
786 enum pipe pipe = crtc->pipe;
790 /* disable CPU FDI tx and PCH FDI rx */
791 reg = FDI_TX_CTL(pipe);
792 temp = intel_de_read(dev_priv, reg);
793 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
794 intel_de_posting_read(dev_priv, reg);
796 reg = FDI_RX_CTL(pipe);
797 temp = intel_de_read(dev_priv, reg);
798 temp &= ~(0x7 << 16);
799 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
800 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
802 intel_de_posting_read(dev_priv, reg);
805 /* Ironlake workaround, disable clock pointer after downing FDI */
806 if (HAS_PCH_IBX(dev_priv))
807 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
808 FDI_RX_PHASE_SYNC_POINTER_OVR);
810 /* still set train pattern 1 */
811 reg = FDI_TX_CTL(pipe);
812 temp = intel_de_read(dev_priv, reg);
813 temp &= ~FDI_LINK_TRAIN_NONE;
814 temp |= FDI_LINK_TRAIN_PATTERN_1;
815 intel_de_write(dev_priv, reg, temp);
817 reg = FDI_RX_CTL(pipe);
818 temp = intel_de_read(dev_priv, reg);
819 if (HAS_PCH_CPT(dev_priv)) {
820 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
821 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
823 temp &= ~FDI_LINK_TRAIN_NONE;
824 temp |= FDI_LINK_TRAIN_PATTERN_1;
826 /* BPC in FDI rx is consistent with that in PIPECONF */
827 temp &= ~(0x07 << 16);
828 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
829 intel_de_write(dev_priv, reg, temp);
831 intel_de_posting_read(dev_priv, reg);
836 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
838 if (IS_IRONLAKE(dev_priv)) {
839 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
840 } else if (IS_SANDYBRIDGE(dev_priv)) {
841 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
842 } else if (IS_IVYBRIDGE(dev_priv)) {
843 /* FIXME: detect B0+ stepping and use auto training */
844 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;